language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/sql/schema.py
|
{
"start": 139715,
"end": 140482
}
|
class ____(ColumnDefault):
"""default generator for a SQL expression
.. versionadded:: 2.0
"""
is_clause_element = True
has_arg = True
arg: _SQLExprDefault
def __init__(
self,
arg: _SQLExprDefault,
for_update: bool = False,
) -> None:
self.for_update = for_update
self.arg = arg
def _copy(self) -> ColumnElementColumnDefault:
return ColumnElementColumnDefault(
arg=self.arg, for_update=self.for_update
)
@util.memoized_property
@util.preload_module("sqlalchemy.sql.sqltypes")
def _arg_is_typed(self) -> bool:
sqltypes = util.preloaded.sql_sqltypes
return not isinstance(self.arg.type, sqltypes.NullType)
|
ColumnElementColumnDefault
|
python
|
huggingface__transformers
|
tests/models/llama4/test_modeling_llama4.py
|
{
"start": 1094,
"end": 5205
}
|
class ____(unittest.TestCase):
model_id = "meta-llama/Llama-4-Scout-17B-16E"
@classmethod
def setUpClass(cls):
cls.model = Llama4ForConditionalGeneration.from_pretrained(
"meta-llama/Llama-4-Scout-17B-16E",
device_map="auto",
dtype=torch.float32,
attn_implementation="eager",
)
def setUp(self):
self.processor = Llama4Processor.from_pretrained("meta-llama/Llama-4-Scout-17B-16E", padding_side="left")
url = "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png"
self.messages_1 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{"type": "image", "url": url},
{"type": "text", "text": "What is shown in this image?"},
],
},
]
self.messages_2 = [
{"role": "system", "content": [{"type": "text", "text": "You are a helpful assistant."}]},
{
"role": "user",
"content": [
{
"type": "image",
"url": "https://huggingface.co/datasets/hf-internal-testing/fixtures-captioning/resolve/main/cow_beach_1.png",
},
{
"type": "image",
"url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg",
},
{"type": "text", "text": "Are these images identical?"},
],
},
]
def tearDown(self):
cleanup(torch_device, gc_collect=True)
def test_model_17b_16e_fp32(self):
EXPECTED_TEXTS = Expectations(
{
("xpu", 3): ['system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach with a blue sky and a body of water in the background. The cow is brown with a white face'],
("cuda", None): ['system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white'],
}
) # fmt: skip
EXPECTED_TEXT = EXPECTED_TEXTS.get_expectation()
inputs = self.processor.apply_chat_template(
self.messages_1, tokenize=True, add_generation_prompt=True, return_tensors="pt", return_dict=True
).to(device=torch_device, dtype=self.model.dtype)
output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
print(output_text)
self.assertEqual(output_text, EXPECTED_TEXT)
def test_model_17b_16e_batch(self):
inputs = self.processor.apply_chat_template(
[self.messages_1, self.messages_2],
tokenize=True,
return_dict=True,
return_tensors="pt",
padding=True,
add_generation_prompt=True,
).to(device=torch_device, dtype=torch.float32)
output = self.model.generate(**inputs, max_new_tokens=30, do_sample=False)
output_text = self.processor.batch_decode(output, skip_special_tokens=True)
EXPECTED_TEXTS = [
'system\n\nYou are a helpful assistant.user\n\nWhat is shown in this image?assistant\n\nThe image shows a cow standing on a beach, with a blue sky and a body of water in the background. The cow is brown with a white',
'system\n\nYou are a helpful assistant.user\n\nAre these images identical?assistant\n\nNo, these images are not identical. The first image shows a cow standing on a beach with a blue sky and a white cloud in the background.'
] # fmt: skip
self.assertEqual(output_text, EXPECTED_TEXTS)
|
Llama4IntegrationTest
|
python
|
huggingface__transformers
|
src/transformers/models/mobilenet_v2/modeling_mobilenet_v2.py
|
{
"start": 8918,
"end": 12939
}
|
class ____(MobileNetV2PreTrainedModel):
def __init__(self, config: MobileNetV2Config, add_pooling_layer: bool = True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
# Output channels for the projection layers
channels = [16, 24, 24, 32, 32, 32, 64, 64, 64, 64, 96, 96, 96, 160, 160, 160, 320]
channels = [apply_depth_multiplier(config, x) for x in channels]
# Strides for the depthwise layers
strides = [2, 1, 2, 1, 1, 2, 1, 1, 1, 1, 1, 1, 2, 1, 1, 1]
self.conv_stem = MobileNetV2Stem(
config,
in_channels=config.num_channels,
expanded_channels=apply_depth_multiplier(config, 32),
out_channels=channels[0],
)
current_stride = 2 # first conv layer has stride 2
dilation = 1
self.layer = nn.ModuleList()
for i in range(16):
# Keep making the feature maps smaller or use dilated convolution?
if current_stride == config.output_stride:
layer_stride = 1
layer_dilation = dilation
dilation *= strides[i] # larger dilation starts in next block
else:
layer_stride = strides[i]
layer_dilation = 1
current_stride *= layer_stride
self.layer.append(
MobileNetV2InvertedResidual(
config,
in_channels=channels[i],
out_channels=channels[i + 1],
stride=layer_stride,
dilation=layer_dilation,
)
)
if config.finegrained_output and config.depth_multiplier < 1.0:
output_channels = 1280
else:
output_channels = apply_depth_multiplier(config, 1280)
self.conv_1x1 = MobileNetV2ConvLayer(
config,
in_channels=channels[-1],
out_channels=output_channels,
kernel_size=1,
)
self.pooler = nn.AdaptiveAvgPool2d((1, 1)) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]:
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.conv_stem(pixel_values)
all_hidden_states = () if output_hidden_states else None
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
last_hidden_state = self.conv_1x1(hidden_states)
if self.pooler is not None:
pooled_output = torch.flatten(self.pooler(last_hidden_state), start_dim=1)
else:
pooled_output = None
if not return_dict:
return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None)
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=all_hidden_states,
)
@auto_docstring(
custom_intro="""
MobileNetV2 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
"""
)
|
MobileNetV2Model
|
python
|
jazzband__django-polymorphic
|
src/polymorphic/admin/childadmin.py
|
{
"start": 325,
"end": 427
}
|
class ____(RuntimeError):
"The admin site for the model is not registered."
|
ParentAdminNotRegistered
|
python
|
huggingface__transformers
|
src/transformers/models/deepseek_v2/modular_deepseek_v2.py
|
{
"start": 21821,
"end": 22073
}
|
class ____(LlamaForSequenceClassification):
pass
__all__ = [
"DeepseekV2PreTrainedModel",
"DeepseekV2Model",
"DeepseekV2ForCausalLM",
"DeepseekV2ForSequenceClassification",
"DeepseekV2Config",
]
|
DeepseekV2ForSequenceClassification
|
python
|
openai__openai-python
|
tests/test_transform.py
|
{
"start": 3609,
"end": 3925
}
|
class ____(TypedDict):
bar: Annotated[str, PropertyInfo(alias="Bar")]
@parametrize
@pytest.mark.asyncio
async def test_includes_unknown_keys(use_async: bool) -> None:
assert await transform({"bar": "bar", "baz_": {"FOO": 1}}, Foo6, use_async) == {
"Bar": "bar",
"baz_": {"FOO": 1},
}
|
Foo6
|
python
|
huggingface__transformers
|
src/transformers/models/edgetam_video/modeling_edgetam_video.py
|
{
"start": 52243,
"end": 55035
}
|
class ____(nn.Module):
def __init__(self, config: EdgeTamVideoConfig):
super().__init__()
self.config = config
self.hidden_size = config.perceiver_resampler_hidden_size
self.num_attention_heads = config.perceiver_resampler_num_attention_heads
self.head_dim = config.perceiver_resampler_attention_head_dim
self.attention_dropout = config.perceiver_resampler_attention_dropout
self.inner_dim = self.head_dim * self.num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.k_proj = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.v_proj = nn.Linear(self.hidden_size, self.inner_dim, bias=False)
self.o_proj = nn.Linear(self.inner_dim, self.hidden_size, bias=False)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
positional_encoding: Optional[torch.Tensor] = None,
**kwargs,
) -> torch.Tensor:
# Project queries, keys, and values
query = self.q_proj(query)
key = self.k_proj(key)
value = self.v_proj(value)
# Reshape for multi-head attention
batch_size, seq_len_q = query.shape[:2]
query = query.view(batch_size, seq_len_q, self.num_attention_heads, self.head_dim).transpose(1, 2)
seq_len_kv = key.shape[1]
key = key.view(batch_size, seq_len_kv, self.num_attention_heads, self.head_dim).transpose(1, 2)
value = value.view(batch_size, seq_len_kv, self.num_attention_heads, self.head_dim).transpose(1, 2)
# Add positional encoding if provided
if positional_encoding is not None:
pos_encoding = positional_encoding.view(
batch_size, seq_len_kv, self.num_attention_heads, self.head_dim
).transpose(1, 2)
key = key + pos_encoding
value = value + pos_encoding
# Apply attention
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, _ = attention_interface(
self,
query,
key,
value,
attention_mask=None,
dropout=0.0 if not self.training else self.attention_dropout,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
# Reshape output
attn_output = attn_output.transpose(1, 2).contiguous().view(batch_size, seq_len_q, self.inner_dim)
return self.o_proj(attn_output)
|
EdgeTamVideoPerceiverAttention
|
python
|
pytorch__pytorch
|
torch/cuda/_sanitizer.py
|
{
"start": 4408,
"end": 4689
}
|
class ____(Exception):
"""Wrapper class for errors reported by CUDA Sanitizer."""
def __init__(self, errors: list[SynchronizationError]):
self.errors = errors
def __str__(self):
return f"detected {len(self.errors)} errors"
@dataclass
|
CUDASanitizerErrors
|
python
|
ZoranPandovski__al-go-rithms
|
machine_learning/cluster_analysis/k-means/python/model/k_point.py
|
{
"start": 24,
"end": 582
}
|
class ____:
def __init__(self, x: float, y: float):
self.x=x
self.y=y
self.centroid = None
def distance_to(self,point):
return sqrt(pow(self.x-point.x,2)+pow(self.y-point.y,2))
def set_centroid(self,centroid):
self.centroid=centroid
def __repr__(self):
return "KPoint2D:[x={}, y={}, centroid={}]".format(self.x, self.y, self.centroid)
def __eq__(self, other):
return isinstance(other, KPoint2D) and self.x==other.x and self.y==other.y and self.centroid == other.centroid
|
KPoint2D
|
python
|
PrefectHQ__prefect
|
tests/server/orchestration/api/test_workers.py
|
{
"start": 16440,
"end": 34158
}
|
class ____:
async def test_update_work_pool(self, client, session, work_pool):
response = await client.patch(
f"/work_pools/{work_pool.name}",
json=dict(is_paused=True, concurrency_limit=5),
)
assert response.status_code == status.HTTP_204_NO_CONTENT, response.text
session.expunge_all()
result = await models.workers.read_work_pool(
session=session, work_pool_id=work_pool.id
)
assert result.is_paused is True
assert result.concurrency_limit == 5
assert_status_events(work_pool.name, ["prefect.work-pool.paused"])
async def test_update_work_pool_storage_configuration(self, client, work_pool):
bundle_upload_step = {
"prefect_aws.experimental.bundles.upload": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
bundle_execution_step = {
"prefect_aws.experimental.bundles.execute": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
default_result_storage_block_id = uuid.uuid4()
response = await client.get(f"/work_pools/{work_pool.name}")
assert response.status_code == 200
assert response.json()["storage_configuration"] == {
"bundle_upload_step": None,
"bundle_execution_step": None,
"default_result_storage_block_id": None,
}
new_data = schemas.actions.WorkPoolUpdate(
storage_configuration=schemas.core.WorkPoolStorageConfiguration(
bundle_upload_step=bundle_upload_step,
bundle_execution_step=bundle_execution_step,
default_result_storage_block_id=default_result_storage_block_id,
),
).model_dump(mode="json", exclude_unset=True)
response = await client.patch(
f"/work_pools/{work_pool.name}",
json=new_data,
)
assert response.status_code == 204
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.json()["storage_configuration"] == {
"bundle_upload_step": bundle_upload_step,
"bundle_execution_step": bundle_execution_step,
"default_result_storage_block_id": str(default_result_storage_block_id),
}
async def test_update_work_pool_storage_configuration_with_invalid_key(
self,
client,
work_pool,
):
response = await client.patch(
f"/work_pools/{work_pool.name}",
json={"storage_configuration": {"invalid_key": "invalid_value"}},
)
assert response.status_code == 422
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.json()["storage_configuration"] == {
"bundle_upload_step": None,
"bundle_execution_step": None,
"default_result_storage_block_id": None,
}
async def test_clear_work_pool_storage_configuration(
self,
client,
):
bundle_upload_step = {
"prefect_aws.experimental.bundles.upload": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
bundle_execution_step = {
"prefect_aws.experimental.bundles.execute": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
default_result_storage_block_id = uuid.uuid4()
create_response = await client.post(
"/work_pools/",
json={
"name": "olympic",
"type": "kubernetes",
"storage_configuration": {
"bundle_upload_step": bundle_upload_step,
"bundle_execution_step": bundle_execution_step,
"default_result_storage_block_id": str(
default_result_storage_block_id
),
},
},
)
assert create_response.status_code == 201
assert create_response.json()["storage_configuration"] == {
"bundle_upload_step": bundle_upload_step,
"bundle_execution_step": bundle_execution_step,
"default_result_storage_block_id": str(default_result_storage_block_id),
}
response = await client.patch(
"/work_pools/olympic",
json={"storage_configuration": {}},
)
assert response.status_code == 204
work_pool_response = await client.get("/work_pools/olympic")
assert work_pool_response.json()["storage_configuration"] == {
"bundle_upload_step": None,
"bundle_execution_step": None,
"default_result_storage_block_id": None,
}
async def test_work_pool_storage_configuration_not_cleared_on_unrelated_update(
self, client
):
bundle_upload_step = {
"prefect_aws.experimental.bundles.upload": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
bundle_execution_step = {
"prefect_aws.experimental.bundles.execute": {
"requires": "prefect-aws",
"bucket": "MY_BUCKET_NAME",
"aws_credentials_block_name": "MY_CREDS_BLOCK_NAME",
},
}
default_result_storage_block_id = uuid.uuid4()
await client.post(
"/work_pools/",
json={
"name": "olympic",
"type": "kubernetes",
"storage_configuration": {
"bundle_upload_step": bundle_upload_step,
"bundle_execution_step": bundle_execution_step,
"default_result_storage_block_id": str(
default_result_storage_block_id
),
},
},
)
response = await client.patch(
"/work_pools/olympic",
json={"description": "literally the newest"},
)
assert response.status_code == 204
work_pool_response = await client.get("/work_pools/olympic")
assert work_pool_response.json()["storage_configuration"] == {
"bundle_upload_step": bundle_upload_step,
"bundle_execution_step": bundle_execution_step,
"default_result_storage_block_id": str(default_result_storage_block_id),
}
async def test_update_work_pool_with_no_workers(self, client, work_pool):
assert work_pool.is_paused is False
assert work_pool.status == schemas.statuses.WorkPoolStatus.NOT_READY.value
response = await client.patch(
f"/work_pools/{work_pool.name}",
json=schemas.actions.WorkPoolUpdate(is_paused=True).model_dump(
mode="json", exclude_unset=True
),
)
assert response.status_code == 204, response.text
response = await client.get(f"/work_pools/{work_pool.name}")
assert response.json()["is_paused"] is True
assert response.json()["status"] == schemas.statuses.WorkPoolStatus.PAUSED.value
# Unpause the work pool
response = await client.patch(
f"/work_pools/{work_pool.name}",
json=schemas.actions.WorkPoolUpdate(is_paused=False).model_dump(
mode="json", exclude_unset=True
),
)
assert response.status_code == 204, response.text
response = await client.get(f"/work_pools/{work_pool.name}")
assert response.json()["is_paused"] is False
assert (
response.json()["status"] == schemas.statuses.WorkPoolStatus.NOT_READY.value
)
assert_status_events(
work_pool.name, ["prefect.work-pool.paused", "prefect.work-pool.not-ready"]
)
async def test_unpause_work_pool_with_online_workers(self, client, work_pool):
# Heartbeat a worker to make the work pool ready
heartbeat_response = await client.post(
f"/work_pools/{work_pool.name}/workers/heartbeat",
json=dict(name="test-worker"),
)
assert heartbeat_response.status_code == status.HTTP_204_NO_CONTENT
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.status_code == status.HTTP_200_OK
assert (
work_pool_response.json()["status"]
== schemas.statuses.WorkPoolStatus.READY.value
)
# Pause the work pool
pause_response = await client.patch(
f"/work_pools/{work_pool.name}",
json=schemas.actions.WorkPoolUpdate(is_paused=True).model_dump(
mode="json", exclude_unset=True
),
)
assert pause_response.status_code == 204
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.json()["is_paused"] is True
assert (
work_pool_response.json()["status"]
== schemas.statuses.WorkPoolStatus.PAUSED.value
)
# Unpause the work pool
unpause_response = await client.patch(
f"/work_pools/{work_pool.name}",
json=schemas.actions.WorkPoolUpdate(is_paused=False).model_dump(
mode="json", exclude_unset=True
),
)
assert unpause_response.status_code == 204
work_pool_response = await client.get(f"/work_pools/{work_pool.name}")
assert work_pool_response.json()["is_paused"] is False
assert (
work_pool_response.json()["status"]
== schemas.statuses.WorkPoolStatus.READY.value
)
assert_status_events(
work_pool.name,
[
"prefect.work-pool.ready",
"prefect.work-pool.paused",
"prefect.work-pool.ready",
],
)
async def test_update_work_pool_zero_concurrency(
self, client, session, work_pool, db
):
response = await client.patch(
f"/work_pools/{work_pool.name}",
json=dict(concurrency_limit=0),
)
assert response.status_code == status.HTTP_204_NO_CONTENT, response.text
async with db.session_context() as session:
result = await models.workers.read_work_pool(
session=session, work_pool_id=work_pool.id
)
assert result.concurrency_limit == 0
async def test_update_work_pool_invalid_concurrency(
self, client, session, work_pool
):
response = await client.patch(
f"/work_pools/{work_pool.name}",
json=dict(concurrency_limit=-5),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
session.expunge_all()
result = await models.workers.read_work_pool(
session=session, work_pool_id=work_pool.id
)
assert result.concurrency_limit is None
@pytest.mark.parametrize("name", RESERVED_POOL_NAMES)
async def test_update_reserved_pool(self, session, client, name):
assert await models.workers.create_work_pool(
session=session, work_pool=WorkPoolCreate(name=name)
)
await session.commit()
# fails if we try to update the description
response = await client.patch(
f"/work_pools/{name}",
json=dict(description=name, is_paused=True, concurrency_limit=5),
)
assert response.status_code == status.HTTP_403_FORBIDDEN, response.text
assert "reserved for internal use" in response.json()["detail"]
# succeeds if just pause and concurrency
response = await client.patch(
f"/work_pools/{name}",
json=dict(is_paused=True, concurrency_limit=5),
)
assert response.status_code == status.HTTP_204_NO_CONTENT, response.text
async def test_update_work_pool_template(self, session, client):
name = "Pool 1"
base_job_template = {
"job_configuration": {
"command": "{{ command }}",
},
"variables": {
"properties": {
"command": {
"type": "array",
"title": "Command",
"items": {"type": "string"},
"default": ["echo", "hello"],
},
},
"required": [],
},
}
pool = await models.workers.create_work_pool(
session=session,
work_pool=WorkPoolCreate(name=name, base_job_template=base_job_template),
)
await session.commit()
base_job_template["variables"]["properties"]["command"]["default"] = ["woof!"]
response = await client.patch(
f"/work_pools/{name}",
json=dict(base_job_template=base_job_template),
)
assert response.status_code == status.HTTP_204_NO_CONTENT, response.text
session.expunge_all()
result = await models.workers.read_work_pool(
session=session, work_pool_id=pool.id
)
assert result.base_job_template["variables"]["properties"]["command"][
"default"
] == ["woof!"]
async def test_update_work_pool_template_validation_missing_keys(
self, client, session
):
name = "Pool 1"
await models.workers.create_work_pool(
session=session,
work_pool=WorkPoolCreate(name=name),
)
await session.commit()
session.expunge_all()
response = await client.patch(
f"/work_pools/{name}",
json=dict(name=name, base_job_template={"foo": "bar", "x": ["y"]}),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert (
"The `base_job_template` must contain both a `job_configuration` key and a"
" `variables` key." in response.json()["exception_detail"][0]["msg"]
)
async def test_update_work_pool_template_validation_missing_variables(
self, client, session
):
name = "Pool 1"
missing_variable_template = {
"job_configuration": {
"command": "{{ other_variable }}",
},
"variables": {
"properties": {
"command": {
"type": "array",
"title": "Command",
"items": {"type": "string"},
"default": ["echo", "hello"],
},
},
"required": [],
},
}
await models.workers.create_work_pool(
session=session,
work_pool=WorkPoolCreate(name=name),
)
await session.commit()
session.expunge_all()
response = await client.patch(
f"/work_pools/{name}",
json=dict(name=name, base_job_template=missing_variable_template),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert (
"The variables specified in the job configuration template must be "
"present as properties in the variables schema. "
"Your job configuration uses the following undeclared "
"variable(s): other_variable."
in response.json()["exception_detail"][0]["msg"]
)
async def test_update_work_pool_template_validation_missing_nested_variables(
self, client, session
):
name = "Pool 1"
missing_variable_template = {
"job_configuration": {
"config": {
"command": "{{ missing_variable }}",
}
},
"variables": {
"properties": {
"command": {
"type": "array",
"title": "Command",
"items": {"type": "string"},
"default": ["echo", "hello"],
},
},
"required": [],
},
}
await models.workers.create_work_pool(
session=session,
work_pool=WorkPoolCreate(name=name),
)
await session.commit()
session.expunge_all()
response = await client.patch(
f"/work_pools/{name}",
json=dict(name="Pool 1", base_job_template=missing_variable_template),
)
assert response.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY, (
response.text
)
assert (
"The variables specified in the job configuration template must be "
"present as properties in the variables schema. "
"Your job configuration uses the following undeclared "
"variable(s): missing_variable."
in response.json()["exception_detail"][0]["msg"]
)
|
TestUpdateWorkPool
|
python
|
crytic__slither
|
slither/utils/halstead.py
|
{
"start": 4882,
"end": 7911
}
|
class ____:
"""Class to hold the Halstead metrics for all contracts. Contains methods useful for reporting.
There are 3 sections in the report:
1. Core metrics (n1, n2, N1, N2)
2. Extended metrics 1 (n, N, S, V)
3. Extended metrics 2 (D, E, T, B)
"""
contracts: List[Contract] = field(default_factory=list)
contract_metrics: OrderedDict = field(default_factory=OrderedDict)
title: str = "Halstead complexity metrics"
full_text: str = ""
core: SectionInfo = field(default=SectionInfo)
extended1: SectionInfo = field(default=SectionInfo)
extended2: SectionInfo = field(default=SectionInfo)
CORE_KEYS = (
"Total Operators",
"Unique Operators",
"Total Operands",
"Unique Operands",
)
EXTENDED1_KEYS = (
"Vocabulary",
"Program Length",
"Estimated Length",
"Volume",
)
EXTENDED2_KEYS = (
"Difficulty",
"Effort",
"Time",
"Estimated Bugs",
)
SECTIONS: Tuple[Tuple[str, str, Tuple[str]]] = (
("Core", "core", CORE_KEYS),
("Extended 1/2", "extended1", EXTENDED1_KEYS),
("Extended 2/2", "extended2", EXTENDED2_KEYS),
)
def __post_init__(self) -> None:
# Compute the metrics for each contract and for all contracts.
self.update_contract_metrics()
self.add_all_contracts_metrics()
self.update_reporting_sections()
def update_contract_metrics(self) -> None:
for contract in self.contracts:
self.contract_metrics[contract.name] = HalsteadContractMetrics(contract=contract)
def add_all_contracts_metrics(self) -> None:
# If there are more than 1 contract, compute the metrics for all contracts.
if len(self.contracts) <= 1:
return
all_operators = [
operator
for contract in self.contracts
for operator in self.contract_metrics[contract.name].all_operators
]
all_operands = [
operand
for contract in self.contracts
for operand in self.contract_metrics[contract.name].all_operands
]
self.contract_metrics["ALL CONTRACTS"] = HalsteadContractMetrics(
None, all_operators=all_operators, all_operands=all_operands
)
def update_reporting_sections(self) -> None:
# Create the table and text for each section.
data = {
contract.name: self.contract_metrics[contract.name].to_dict()
for contract in self.contracts
}
for (title, attr, keys) in self.SECTIONS:
pretty_table = make_pretty_table(["Contract", *keys], data, False)
section_title = f"{self.title} ({title})"
txt = f"\n\n{section_title}:\n{pretty_table}\n"
self.full_text += txt
setattr(
self,
attr,
SectionInfo(title=section_title, pretty_table=pretty_table, txt=txt),
)
|
HalsteadMetrics
|
python
|
scipy__scipy
|
scipy/signal/tests/test_signaltools.py
|
{
"start": 71852,
"end": 94744
}
|
class ____:
def generate(self, shape, xp):
prodshape = shape if isinstance(shape, int) else math.prod(shape)
x = xp.linspace(0, prodshape - 1, prodshape)
if not isinstance(shape, int):
x = xp.reshape(x, shape)
return self.convert_dtype(x, xp)
def convert_dtype(self, arr, xp):
if self.dtype == np.dtype('O'):
arr = np.asarray(arr)
out = np.empty(arr.shape, self.dtype)
iter = np.nditer([arr, out], ['refs_ok','zerosize_ok'],
[['readonly'],['writeonly']])
for x, y in iter:
y[...] = self.type(x[()])
return out
else:
dtype = (getattr(xp, self.dtype)
if isinstance(self.dtype, str)
else self.dtype)
return xp.asarray(arr, dtype=dtype)
@skip_xp_backends('cupy', reason='XXX https://github.com/scipy/scipy/issues/23539')
def test_invalid_params(self, xp):
"""Verify all exceptions are raised. """
b, a, x = xp.asarray([1]), xp.asarray([2]), xp.asarray([3, 4])
with pytest.raises(ValueError, match="^Parameter b is not"):
lfilter(xp.eye(2), a, x) # b not one-dimensional
with pytest.raises(ValueError, match="^Parameter b is not"):
lfilter(xp.asarray([]), a, x) # b empty
with pytest.raises(ValueError, match="^Parameter a is not"):
lfilter(b, xp.eye(2), x) # a not one-dimensional
with pytest.raises(ValueError, match="^Parameter a is not"):
lfilter(b, xp.asarray([]), x) # a empty
with pytest.raises(NotImplementedError, match="^Parameter's dtypes produced "):
b, a, x = (xp.astype(v_, xp.uint64, copy=False) for v_ in (b, a, x))
lfilter(b, a, x) # fails with uint64 dtype
def test_rank_1_IIR(self, xp):
x = self.generate((6,), xp)
b = self.convert_dtype([1, -1], xp)
a = self.convert_dtype([0.5, -0.5], xp)
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.], xp)
assert_array_almost_equal(lfilter(b, a, x), y_r)
def test_rank_1_FIR(self, xp):
x = self.generate((6,), xp)
b = self.convert_dtype([1, 1], xp)
a = self.convert_dtype([1], xp)
y_r = self.convert_dtype([0, 1, 3, 5, 7, 9.], xp)
assert_array_almost_equal(lfilter(b, a, x), y_r)
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_rank_1_IIR_init_cond(self, xp):
x = self.generate((6,), xp)
b = self.convert_dtype([1, 0, -1], xp)
a = self.convert_dtype([0.5, -0.5], xp)
zi = self.convert_dtype([1, 2], xp)
y_r = self.convert_dtype([1, 5, 9, 13, 17, 21], xp)
zf_r = self.convert_dtype([13, -10], xp)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_rank_1_FIR_init_cond(self, xp):
x = self.generate((6,), xp)
b = self.convert_dtype([1, 1, 1], xp)
a = self.convert_dtype([1], xp)
zi = self.convert_dtype([1, 1], xp)
y_r = self.convert_dtype([1, 2, 3, 6, 9, 12.], xp)
zf_r = self.convert_dtype([9, 5], xp)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, y_r)
assert_array_almost_equal(zf, zf_r)
def test_rank_2_IIR_axis_0(self, xp):
x = self.generate((4, 3), xp)
b = self.convert_dtype([1, -1], xp)
a = self.convert_dtype([0.5, 0.5], xp)
y_r2_a0 = self.convert_dtype([[0, 2, 4], [6, 4, 2], [0, 2, 4],
[6, 4, 2]], xp)
y = lfilter(b, a, x, axis=0)
assert_array_almost_equal(y_r2_a0, y)
def test_rank_2_IIR_axis_1(self, xp):
x = self.generate((4, 3), xp)
b = self.convert_dtype([1, -1], xp)
a = self.convert_dtype([0.5, 0.5], xp)
y_r2_a1 = self.convert_dtype([[0, 2, 0], [6, -4, 6], [12, -10, 12],
[18, -16, 18]], xp)
y = lfilter(b, a, x, axis=1)
assert_array_almost_equal(y_r2_a1, y)
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_rank_2_IIR_axis_0_init_cond(self, xp):
x = self.generate((4, 3), xp)
b = self.convert_dtype([1, -1], xp)
a = self.convert_dtype([0.5, 0.5], xp)
zi = self.convert_dtype(np.ones((4,1)), xp)
y_r2_a0_1 = self.convert_dtype([[1, 1, 1], [7, -5, 7], [13, -11, 13],
[19, -17, 19]], xp)
zf_r = self.convert_dtype([-5, -17, -29, -41], xp)[:, np.newaxis]
y, zf = lfilter(b, a, x, axis=1, zi=zi)
assert_array_almost_equal(y_r2_a0_1, y)
assert_array_almost_equal(zf, zf_r)
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_rank_2_IIR_axis_1_init_cond(self, xp):
x = self.generate((4, 3), xp)
b = self.convert_dtype([1, -1], xp)
a = self.convert_dtype([0.5, 0.5], xp)
zi = self.convert_dtype(np.ones((1, 3)), xp)
y_r2_a0_0 = self.convert_dtype([[1, 3, 5], [5, 3, 1],
[1, 3, 5], [5, 3, 1]], xp)
zf_r = self.convert_dtype([[-23, -23, -23]], xp)
y, zf = lfilter(b, a, x, axis=0, zi=zi)
assert_array_almost_equal(y_r2_a0_0, y)
assert_array_almost_equal(zf, zf_r)
def test_rank_3_IIR(self, xp):
x = self.generate((4, 3, 2), xp)
b = self.convert_dtype([1, -1], xp)
a = self.convert_dtype([0.5, 0.5], xp)
a_np, b_np, x_np = map(_xp_copy_to_numpy, (a, b, x))
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b_np, a_np, w), axis, x_np)
assert_array_almost_equal(y, xp.asarray(y_r))
@xfail_xp_backends("cupy", reason="inaccurate")
def test_rank_3_IIR_init_cond(self, xp):
x = self.generate((4, 3, 2), xp)
b = self.convert_dtype([1, -1], xp)
a = self.convert_dtype([0.5, 0.5], xp)
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 1
zi = self.convert_dtype(xp.ones(zi_shape), xp)
zi1 = self.convert_dtype([1], xp)
y, zf = lfilter(b, a, x, axis, zi)
b_np, a_np, zi1_np = map(_xp_copy_to_numpy, (b, a, zi1))
def lf0(w):
return lfilter(b_np, a_np, w, zi=zi1_np)[0]
def lf1(w):
return lfilter(b_np, a_np, w, zi=zi1_np)[1]
y_r = np.apply_along_axis(lf0, axis, _xp_copy_to_numpy(x))
zf_r = np.apply_along_axis(lf1, axis, _xp_copy_to_numpy(x))
assert_array_almost_equal(y, xp.asarray(y_r))
assert_array_almost_equal(zf, xp.asarray(zf_r))
def test_rank_3_FIR(self, xp):
x = self.generate((4, 3, 2), xp)
b = self.convert_dtype([1, 0, -1], xp)
a = self.convert_dtype([1], xp)
a_np, b_np, x_np = map(_xp_copy_to_numpy, (a, b, x))
for axis in range(x.ndim):
y = lfilter(b, a, x, axis)
y_r = np.apply_along_axis(lambda w: lfilter(b_np, a_np, w), axis, x_np)
assert_array_almost_equal(y, xp.asarray(y_r))
@xfail_xp_backends("cupy", reason="inaccurate")
def test_rank_3_FIR_init_cond(self, xp):
x = self.generate((4, 3, 2), xp)
b = self.convert_dtype([1, 0, -1], xp)
a = self.convert_dtype([1], xp)
x_np, b_np, a_np = map(_xp_copy_to_numpy, (x, b, a))
for axis in range(x.ndim):
zi_shape = list(x.shape)
zi_shape[axis] = 2
zi = self.convert_dtype(xp.ones(zi_shape), xp)
zi1 = self.convert_dtype([1, 1], xp)
zi1_np = _xp_copy_to_numpy(zi1)
y, zf = lfilter(b, a, x, axis, zi)
b_np, a_np, zi1_np = map(_xp_copy_to_numpy, (b, a, zi1))
def lf0(w):
return lfilter(b_np, a_np, w, zi=zi1_np)[0]
def lf1(w):
return lfilter(b_np, a_np, w, zi=zi1_np)[1]
y_r = np.apply_along_axis(lf0, axis, x_np)
zf_r = np.apply_along_axis(lf1, axis, x_np)
assert_array_almost_equal(y, xp.asarray(y_r))
assert_array_almost_equal(zf, xp.asarray(zf_r))
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_zi_pseudobroadcast(self, xp):
x = self.generate((4, 5, 20), xp)
b, a = signal.butter(8, 0.2, output='ba')
b = self.convert_dtype(b, xp)
a = self.convert_dtype(a, xp)
zi_size = b.shape[0] - 1
# lfilter requires x.ndim == zi.ndim exactly. However, zi can have
# length 1 dimensions.
zi_full = self.convert_dtype(xp.ones((4, 5, zi_size)), xp)
zi_sing = self.convert_dtype(xp.ones((1, 1, zi_size)), xp)
y_full, zf_full = lfilter(b, a, x, zi=zi_full)
y_sing, zf_sing = lfilter(b, a, x, zi=zi_sing)
assert_array_almost_equal(y_sing, y_full)
assert_array_almost_equal(zf_full, zf_sing)
# lfilter does not prepend ones
assert_raises(ValueError, lfilter, b, a, x, -1, xp.ones(zi_size))
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_scalar_a(self, xp):
# a can be a scalar.
x = self.generate(6, xp)
b = self.convert_dtype([1, 0, -1], xp)
a = self.convert_dtype([1], xp)
y_r = self.convert_dtype([0, 1, 2, 2, 2, 2], xp)
y = lfilter(b, a[0], x)
assert_array_almost_equal(y, y_r)
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_zi_some_singleton_dims(self, xp):
# lfilter doesn't really broadcast (no prepending of 1's). But does
# do singleton expansion if x and zi have the same ndim. This was
# broken only if a subset of the axes were singletons (gh-4681).
x = self.convert_dtype(xp.zeros((3, 2, 5), dtype=xp.int64), xp)
b = self.convert_dtype(xp.ones(5, dtype=xp.int64), xp)
a = self.convert_dtype(xp.asarray([1, 0, 0]), xp)
zi = np.ones((3, 1, 4), dtype=np.int64)
zi[1, :, :] *= 2
zi[2, :, :] *= 3
zi = xp.asarray(zi)
zi = self.convert_dtype(zi, xp)
zf_expected = self.convert_dtype(xp.zeros((3, 2, 4), dtype=xp.int64), xp)
y_expected = np.zeros((3, 2, 5), dtype=np.int64)
y_expected[:, :, :4] = [[[1]], [[2]], [[3]]]
y_expected = xp.asarray(y_expected)
y_expected = self.convert_dtype(y_expected, xp)
# IIR
y_iir, zf_iir = lfilter(b, a, x, -1, zi)
assert_array_almost_equal(y_iir, y_expected)
assert_array_almost_equal(zf_iir, zf_expected)
# FIR
y_fir, zf_fir = lfilter(b, a[0], x, -1, zi)
assert_array_almost_equal(y_fir, y_expected)
assert_array_almost_equal(zf_fir, zf_expected)
def base_bad_size_zi(self, b, a, x, axis, zi, xp):
b = self.convert_dtype(b, xp)
a = self.convert_dtype(a, xp)
x = self.convert_dtype(x, xp)
zi = self.convert_dtype(zi, xp)
assert_raises(ValueError, lfilter, b, a, x, axis, zi)
@skip_xp_backends('cupy', reason='cupy does not raise')
def test_bad_size_zi(self, xp):
# rank 1
x1 = xp.arange(6)
self.base_bad_size_zi([1], [1], x1, -1, [1], xp)
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1], xp)
self.base_bad_size_zi([1, 1], [1], x1, -1, [[0]], xp)
self.base_bad_size_zi([1, 1], [1], x1, -1, [0, 1, 2], xp)
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [[0]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x1, -1, [0, 1, 2], xp)
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1], xp)
self.base_bad_size_zi([1], [1, 1], x1, -1, [[0]], xp)
self.base_bad_size_zi([1], [1, 1], x1, -1, [0, 1, 2], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [[0], [1]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x1, -1, [0, 1, 2, 3], xp)
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0], xp)
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [[0], [1]], xp)
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2], xp)
self.base_bad_size_zi([1, 1], [1, 1, 1], x1, -1, [0, 1, 2, 3], xp)
# rank 2
x2 = np.arange(12).reshape((4,3))
x2 = xp.asarray(x2)
# for axis=0 zi.shape should == (max(len(a),len(b))-1, 3)
self.base_bad_size_zi([1], [1], x2, 0, [0], xp)
# for each of these there are 5 cases tested (in this order):
# 1. not deep enough, right # elements
# 2. too deep, right # elements
# 3. right depth, right # elements, transposed
# 4. right depth, too few elements
# 5. right depth, too many elements
self.base_bad_size_zi([1, 1], [1], x2, 0, [0, 1, 2], xp)
self.base_bad_size_zi([1, 1], [1], x2, 0, [[[0, 1, 2]]], xp)
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0], [1], [2]], xp)
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0, 1]], xp)
self.base_bad_size_zi([1, 1], [1], x2, 0, [[0, 1, 2, 3]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [0, 1, 2, 3, 4, 5], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[[0, 1, 2], [3, 4, 5]]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0, 1], [2, 3], [4, 5]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0, 1], [2, 3]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 0, [[0, 1, 2, 3], [4, 5, 6, 7]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 0, [0, 1, 2], xp)
self.base_bad_size_zi([1], [1, 1], x2, 0, [[[0, 1, 2]]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0], [1], [2]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0, 1]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 0, [[0, 1, 2, 3]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [0, 1, 2, 3, 4, 5], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[[0, 1, 2], [3, 4, 5]]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0, 1], [2, 3], [4, 5]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0, 1], [2, 3]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 0, [[0, 1, 2, 3], [4, 5, 6, 7]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [0, 1, 2, 3, 4, 5], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[[0, 1, 2], [3, 4, 5]]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0, 1], [2, 3], [4, 5]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0, [[0, 1], [2, 3]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 0,
[[0, 1, 2, 3], [4, 5, 6, 7]], xp)
# for axis=1 zi.shape should == (4, max(len(a),len(b))-1)
self.base_bad_size_zi([1], [1], x2, 1, [0], xp)
self.base_bad_size_zi([1, 1], [1], x2, 1, [0, 1, 2, 3], xp)
self.base_bad_size_zi([1, 1], [1], x2, 1, [[[0], [1], [2], [3]]], xp)
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0, 1, 2, 3]], xp)
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0], [1], [2]], xp)
self.base_bad_size_zi([1, 1], [1], x2, 1, [[0], [1], [2], [3], [4]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [0, 1, 2, 3, 4, 5, 6, 7], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 1,
[[[0, 1], [2, 3], [4, 5], [6, 7]]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0, 1, 2, 3], [4, 5, 6, 7]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 1, [[0, 1], [2, 3], [4, 5]], xp)
self.base_bad_size_zi([1, 1, 1], [1], x2, 1,
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 1, [0, 1, 2, 3], xp)
self.base_bad_size_zi([1], [1, 1], x2, 1, [[[0], [1], [2], [3]]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0, 1, 2, 3]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0], [1], [2]], xp)
self.base_bad_size_zi([1], [1, 1], x2, 1, [[0], [1], [2], [3], [4]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [0, 1, 2, 3, 4, 5, 6, 7], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 1,
[[[0, 1], [2, 3], [4, 5], [6, 7]]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0, 1, 2, 3], [4, 5, 6, 7]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0, 1], [2, 3], [4, 5]], xp)
self.base_bad_size_zi([1], [1, 1, 1], x2, 1, [[0, 1],
[2, 3], [4, 5], [6, 7], [8, 9]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [0, 1, 2, 3, 4, 5, 6, 7], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1,
[[[0, 1], [2, 3], [4, 5], [6, 7]]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1,
[[0, 1, 2, 3], [4, 5, 6, 7]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1, [[0, 1], [2, 3], [4, 5]], xp)
self.base_bad_size_zi([1, 1, 1], [1, 1], x2, 1,
[[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]], xp)
def test_empty_zi(self, xp):
# Regression test for #880: empty array for zi crashes.
x = self.generate((5,), xp)
a = self.convert_dtype([1], xp)
b = self.convert_dtype([1], xp)
zi = self.convert_dtype([], xp)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, x)
assert zf.dtype == (getattr(xp, self.dtype)
if isinstance(self.dtype, str)
else self.dtype)
assert xp_size(zf) == 0
@skip_xp_backends('jax.numpy', reason='jax does not support inplace ops')
@pytest.mark.parametrize('a', (1, [1], [1, .5, 1.5], 2, [2], [2, 1, 3]),
ids=str)
@make_xp_test_case(lfiltic)
def test_lfiltic(self, a, xp):
# Test for #22470: lfiltic does not handle `a[0] != 1`
# and, more in general, test that lfiltic behaves consistently with lfilter
if is_cupy(xp) and isinstance(a, int | float):
pytest.skip('cupy does not supoprt scalar filter coefficients')
x = self.generate(6, xp) # arbitrary input
b = self.convert_dtype([.5, 1., .2], xp) # arbitrary b
a = self.convert_dtype(a, xp)
N = xp_size(a) - 1
M = xp_size(b) - 1
K = M + N if is_cupy(xp) else max(N, M)
# compute reference initial conditions as final conditions of lfilter
y1, zi_1 = lfilter(b, a, x, zi=self.generate(K, xp))
# copute initial conditions from lfiltic
zi_2 = lfiltic(b, a, xp.flip(y1), xp.flip(x))
# compare lfiltic's output with reference
assert_array_almost_equal(zi_1, zi_2)
@make_xp_test_case(lfiltic)
def test_lfiltic_bad_coeffs(xp):
# Test for invalid filter coefficients (wrong shape or zero `a[0]`)
assert_raises(ValueError, lfiltic, [1, 2], [], [0, 0], [0, 1])
assert_raises(ValueError, lfiltic, [1, 2], [0, 2], [0, 0], [0, 1])
assert_raises(ValueError, lfiltic, [1, 2], [[1], [2]], [0, 0], [0, 1])
assert_raises(ValueError, lfiltic, [[1], [2]], [1], [0, 0], [0, 1])
@skip_xp_backends(
'array_api_strict', reason='int64 and float64 cannot be promoted together'
)
@skip_xp_backends('jax.numpy', reason='jax dtype defaults differ')
@make_xp_test_case(lfiltic)
def test_lfiltic_bad_zi(self, xp):
# Regression test for #3699: bad initial conditions
a = self.convert_dtype([1], xp)
b = self.convert_dtype([1], xp)
# "y" sets the datatype of zi, so it truncates if int
zi = lfiltic(b, a, xp.asarray([1., 0]))
zi_1 = lfiltic(b, a, xp.asarray([1.0, 0]))
zi_2 = lfiltic(b, a, xp.asarray([True, False]))
xp_assert_equal(zi, zi_1)
check_dtype_arg = {} if self.dtype == object else {'check_dtype': False}
xp_assert_equal(zi, zi_2, **check_dtype_arg)
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_short_x_FIR(self, xp):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1], xp)
b = self.convert_dtype([1, 0, -1], xp)
zi = self.convert_dtype([2, 7], xp)
x = self.convert_dtype([72], xp)
ye = self.convert_dtype([74], xp)
zfe = self.convert_dtype([7, -72], xp)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
@skip_xp_backends('cupy', reason='XXX https://github.com/cupy/cupy/pull/8677')
def test_short_x_IIR(self, xp):
# regression test for #5116
# x shorter than b, with non None zi fails
a = self.convert_dtype([1, 1], xp)
b = self.convert_dtype([1, 0, -1], xp)
zi = self.convert_dtype([2, 7], xp)
x = self.convert_dtype([72], xp)
ye = self.convert_dtype([74], xp)
zfe = self.convert_dtype([-67, -72], xp)
y, zf = lfilter(b, a, x, zi=zi)
assert_array_almost_equal(y, ye)
assert_array_almost_equal(zf, zfe)
def test_do_not_modify_a_b_IIR(self, xp):
x = self.generate((6,), xp)
b = self.convert_dtype([1, -1], xp)
b0 = xp_copy(b, xp=xp)
a = self.convert_dtype([0.5, -0.5], xp)
a0 = xp_copy(a, xp=xp)
y_r = self.convert_dtype([0, 2, 4, 6, 8, 10.], xp)
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
xp_assert_equal(b, b0)
xp_assert_equal(a, a0)
def test_do_not_modify_a_b_FIR(self, xp):
x = self.generate((6,), xp)
b = self.convert_dtype([1, 0, 1], xp)
b0 = xp_copy(b, xp=xp)
a = self.convert_dtype([2], xp)
a0 = xp_copy(a, xp=xp)
y_r = self.convert_dtype([0, 0.5, 1, 2, 3, 4.], xp)
y_f = lfilter(b, a, x)
assert_array_almost_equal(y_f, y_r)
xp_assert_equal(b, b0)
xp_assert_equal(a, a0)
@skip_xp_backends(np_only=True)
@pytest.mark.parametrize("a", [1.0, [1.0], np.array(1.0)])
@pytest.mark.parametrize("b", [1.0, [1.0], np.array(1.0)])
def test_scalar_input(self, a, b, xp):
data = np.random.randn(10)
data = xp.asarray(data)
xp_assert_close(
lfilter(xp.asarray([1.0]), xp.asarray([1.0]), data),
lfilter(b, a, data)
)
|
_TestLinearFilter
|
python
|
pyca__cryptography
|
src/cryptography/hazmat/primitives/asymmetric/dh.py
|
{
"start": 2422,
"end": 3912
}
|
class ____(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def key_size(self) -> int:
"""
The bit length of the prime modulus.
"""
@abc.abstractmethod
def public_key(self) -> DHPublicKey:
"""
The DHPublicKey associated with this private key.
"""
@abc.abstractmethod
def parameters(self) -> DHParameters:
"""
The DHParameters object associated with this private key.
"""
@abc.abstractmethod
def exchange(self, peer_public_key: DHPublicKey) -> bytes:
"""
Given peer's DHPublicKey, carry out the key exchange and
return shared key as bytes.
"""
@abc.abstractmethod
def private_numbers(self) -> DHPrivateNumbers:
"""
Returns a DHPrivateNumbers.
"""
@abc.abstractmethod
def private_bytes(
self,
encoding: _serialization.Encoding,
format: _serialization.PrivateFormat,
encryption_algorithm: _serialization.KeySerializationEncryption,
) -> bytes:
"""
Returns the key serialized as bytes.
"""
@abc.abstractmethod
def __copy__(self) -> DHPrivateKey:
"""
Returns a copy.
"""
@abc.abstractmethod
def __deepcopy__(self, memo: dict) -> DHPrivateKey:
"""
Returns a deep copy.
"""
DHPrivateKeyWithSerialization = DHPrivateKey
DHPrivateKey.register(rust_openssl.dh.DHPrivateKey)
|
DHPrivateKey
|
python
|
run-llama__llama_index
|
llama-index-integrations/tools/llama-index-tools-mcp/llama_index/tools/mcp/base.py
|
{
"start": 512,
"end": 8240
}
|
class ____(
BaseToolSpec, TypeResolutionMixin, TypeCreationMixin, FieldExtractionMixin
):
"""
MCPToolSpec will get the tools from MCP Client (only need to implement ClientSession) and convert them to LlamaIndex's FunctionTool objects.
Args:
client: An MCP client instance implementing ClientSession, and it should support the following methods in ClientSession:
- list_tools: List all tools.
- call_tool: Call a tool.
- list_resources: List all resources.
- read_resource: Read a resource.
allowed_tools: If set, only return tools with the specified names.
include_resources: Whether to include resources in the tool list.
"""
def __init__(
self,
client: ClientSession,
allowed_tools: Optional[List[str]] = None,
include_resources: bool = False,
) -> None:
self.client = client
self.allowed_tools = allowed_tools
self.include_resources = include_resources
self.properties_cache = {}
async def fetch_tools(self) -> List[Any]:
"""
An asynchronous method to get the tools list from MCP Client. If allowed_tools is set, it will filter the tools.
Returns:
A list of tools, each tool object needs to contain name, description, inputSchema properties.
"""
response = await self.client.list_tools()
tools = response.tools if hasattr(response, "tools") else []
if self.allowed_tools is None:
# get all tools by default
return tools
if any(self.allowed_tools):
return [tool for tool in tools if tool.name in self.allowed_tools]
logging.warning(
"Returning an empty tool list due to the empty `allowed_tools` list. Please ensure `allowed_tools` is set appropriately."
)
return []
async def fetch_resources(self) -> List[Resource]:
"""
An asynchronous method to get the resources list from MCP Client.
"""
static_response = await self.client.list_resources()
dynamic_response = await self.client.list_resource_templates()
static_resources = (
static_response.resources if hasattr(static_response, "resources") else []
)
dynamic_resources = (
dynamic_response.resourceTemplates
if hasattr(dynamic_response, "resourceTemplates")
else []
)
resources = static_resources + dynamic_resources
if self.allowed_tools is None:
return resources
if any(self.allowed_tools):
return [
resource
for resource in resources
if resource.name in self.allowed_tools
]
logging.warning(
"Returning an empty resource list due to the empty `allowed_tools` list. Please ensure `allowed_tools` is set appropriately."
)
return []
def _create_tool_fn(self, tool_name: str) -> Callable:
"""
Create a tool call function for a specified MCP tool name. The function internally wraps the call_tool call to the MCP Client.
"""
async def async_tool_fn(**kwargs):
return await self.client.call_tool(tool_name, kwargs)
return async_tool_fn
def _create_resource_fn(self, resource_uri: str) -> Callable:
"""
Create a resource call function for a specified MCP resource name. The function internally wraps the read_resource call to the MCP Client.
"""
async def async_resource_fn():
return await self.client.read_resource(resource_uri)
return async_resource_fn
async def to_tool_list_async(self) -> List[FunctionTool]:
"""
Asynchronous method to convert MCP tools to FunctionTool objects.
Returns:
A list of FunctionTool objects.
"""
tools_list = await self.fetch_tools()
function_tool_list: List[FunctionTool] = []
for tool in tools_list:
fn = self._create_tool_fn(tool.name)
# Create a Pydantic model based on the tool inputSchema
model_schema = self.create_model_from_json_schema(
tool.inputSchema, model_name=f"{tool.name}_Schema"
)
metadata = ToolMetadata(
name=tool.name,
description=tool.description,
fn_schema=model_schema,
)
function_tool = FunctionTool.from_defaults(
async_fn=fn, tool_metadata=metadata
)
function_tool_list.append(function_tool)
if self.include_resources:
resources_list = await self.fetch_resources()
for resource in resources_list:
if hasattr(resource, "uri"):
uri = resource.uri
elif hasattr(resource, "template"):
uri = resource.template
fn = self._create_resource_fn(uri)
function_tool_list.append(
FunctionTool.from_defaults(
async_fn=fn,
name=resource.name.replace("/", "_"),
description=resource.description,
)
)
return function_tool_list
def to_tool_list(self) -> List[FunctionTool]:
"""
Synchronous interface: Convert MCP Client tools to FunctionTool objects.
Note: This method should not be called in an asynchronous environment, otherwise an exception will be thrown. Use to_tool_list_async instead.
Returns:
A list of FunctionTool objects.
"""
return patch_sync(self.to_tool_list_async)()
def create_model_from_json_schema(
self,
schema: dict[str, Any],
model_name: str = "DynamicModel",
) -> type[BaseModel]:
"""
To create a Pydantic model from the JSON Schema of MCP tools.
Args:
schema: A JSON Schema dictionary containing properties and required fields.
model_name: The name of the model.
Returns:
A Pydantic model class.
"""
defs = schema.get("$defs", {})
# Process all type definitions
for cls_name, cls_schema in defs.items():
self.properties_cache[cls_name] = self._create_model(
cls_schema,
cls_name,
defs,
)
return self._create_model(schema, model_name)
def _create_model(
self,
schema: dict,
model_name: str,
defs: dict = {},
) -> type[BaseModel]:
"""Create a Pydantic model from a schema."""
if model_name in self.properties_cache:
return self.properties_cache[model_name]
fields = self._extract_fields(schema, defs)
model = create_model(model_name, **fields)
self.properties_cache[model_name] = model
return model
def patch_sync(func_async: Callable) -> Callable:
def patched_sync(*args: Any, **kwargs: Any) -> Any:
try:
loop = asyncio.get_running_loop()
except RuntimeError:
loop = None
# If the current environment is asynchronous, raise an exception to prompt the use of the asynchronous interface
if loop and loop.is_running():
raise RuntimeError(
"In an asynchronous environment, synchronous calls are not supported. Please use the asynchronous interface (e.g., to_tool_list_async) instead."
)
return asyncio.run(func_async(*args, **kwargs))
return patched_sync
|
McpToolSpec
|
python
|
django__django
|
tests/apps/apps.py
|
{
"start": 530,
"end": 612
}
|
class ____(AppConfig):
name = "apps"
label = "relabeled"
|
RelabeledAppsConfig
|
python
|
getsentry__sentry
|
src/sentry/integrations/gitlab/webhooks.py
|
{
"start": 3307,
"end": 5012
}
|
class ____(SCMWebhook, ABC):
@property
def provider(self) -> str:
return IntegrationProviderSlug.GITLAB.value
def get_repo(
self, integration: RpcIntegration, organization: RpcOrganization, event: Mapping[str, Any]
):
"""
Given a webhook payload, get the associated Repository record.
Assumes a 'project' key in event payload.
"""
try:
project_id = event["project"]["id"]
except KeyError:
logger.info(
"gitlab.webhook.missing-projectid", extra={"integration_id": integration.id}
)
logger.exception("Missing project ID.")
raise Http404()
external_id = "{}:{}".format(integration.metadata["instance"], project_id)
try:
repo = Repository.objects.get(
organization_id=organization.id, provider=PROVIDER_NAME, external_id=external_id
)
except Repository.DoesNotExist:
return None
return repo
def update_repo_data(self, repo: Repository, event: Mapping[str, Any]):
"""
Given a webhook payload, update stored repo data if needed.
Assumes a 'project' key in event payload, with certain subkeys. Rework
this if that stops being a safe assumption.
"""
project = event["project"]
url_from_event = project["web_url"]
path_from_event = project["path_with_namespace"]
if repo.url != url_from_event or repo.config.get("path") != path_from_event:
repo.update(
url=url_from_event,
config=dict(repo.config, path=path_from_event),
)
|
GitlabWebhook
|
python
|
plotly__plotly.py
|
plotly/graph_objs/layout/scene/_yaxis.py
|
{
"start": 235,
"end": 76693
}
|
class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.scene"
_path_str = "layout.scene.yaxis"
_valid_props = {
"autorange",
"autorangeoptions",
"autotypenumbers",
"backgroundcolor",
"calendar",
"categoryarray",
"categoryarraysrc",
"categoryorder",
"color",
"dtick",
"exponentformat",
"gridcolor",
"gridwidth",
"hoverformat",
"labelalias",
"linecolor",
"linewidth",
"maxallowed",
"minallowed",
"minexponent",
"mirror",
"nticks",
"range",
"rangemode",
"separatethousands",
"showaxeslabels",
"showbackground",
"showexponent",
"showgrid",
"showline",
"showspikes",
"showticklabels",
"showtickprefix",
"showticksuffix",
"spikecolor",
"spikesides",
"spikethickness",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"type",
"visible",
"zeroline",
"zerolinecolor",
"zerolinewidth",
}
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided and it has a value for both the lower and
upper bound, `autorange` is set to False. Using "min" applies
autorange only to set the minimum. Using "max" applies
autorange only to set the maximum. Using *min reversed* applies
autorange only to set the minimum on a reversed axis. Using
*max reversed* applies autorange only to set the maximum on a
reversed axis. Using "reversed" applies autorange on both ends
and reverses the axis direction.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed', 'min reversed', 'max reversed',
'min', 'max']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
@property
def autorangeoptions(self):
"""
The 'autorangeoptions' property is an instance of Autorangeoptions
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.Autorangeoptions`
- A dict of string/value properties that will be passed
to the Autorangeoptions constructor
Returns
-------
plotly.graph_objs.layout.scene.yaxis.Autorangeoptions
"""
return self["autorangeoptions"]
@autorangeoptions.setter
def autorangeoptions(self, val):
self["autorangeoptions"] = val
@property
def autotypenumbers(self):
"""
Using "strict" a numeric string in trace data is not converted
to a number. Using *convert types* a numeric string in trace
data may be treated as a number during automatic axis `type`
detection. Defaults to layout.autotypenumbers.
The 'autotypenumbers' property is an enumeration that may be specified as:
- One of the following enumeration values:
['convert types', 'strict']
Returns
-------
Any
"""
return self["autotypenumbers"]
@autotypenumbers.setter
def autotypenumbers(self, val):
self["autotypenumbers"] = val
@property
def backgroundcolor(self):
"""
Sets the background color of this axis' wall.
The 'backgroundcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["backgroundcolor"]
@backgroundcolor.setter
def backgroundcolor(self, val):
self["backgroundcolor"] = val
@property
def calendar(self):
"""
Sets the calendar system to use for `range` and `tick0` if this
is a date axis. This does not set the calendar for interpreting
data on this axis, that's specified in the trace or via the
global `layout.calendar`
The 'calendar' property is an enumeration that may be specified as:
- One of the following enumeration values:
['chinese', 'coptic', 'discworld', 'ethiopian',
'gregorian', 'hebrew', 'islamic', 'jalali', 'julian',
'mayan', 'nanakshahi', 'nepali', 'persian', 'taiwan',
'thai', 'ummalqura']
Returns
-------
Any
"""
return self["calendar"]
@calendar.setter
def calendar(self, val):
self["calendar"] = val
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`. Set `categoryorder` to *total ascending* or
*total descending* if order should be determined by the
numerical order of the values. Similarly, the order can be
determined by the min, max, sum, mean, geometric mean or median
of all the values.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array', 'total ascending', 'total descending', 'min
ascending', 'min descending', 'max ascending', 'max
descending', 'sum ascending', 'sum descending', 'mean
ascending', 'mean descending', 'geometric mean ascending',
'geometric mean descending', 'median ascending', 'median
descending']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B. "SI" uses prefixes from "femto" f (10^-15) to "tera" T
(10^12). *SI extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or *SI
extended* is used and the exponent is beyond the above ranges,
the formatting rule will automatically be switched to the power
notation.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def gridcolor(self):
"""
Sets the color of the grid lines.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def hoverformat(self):
"""
Sets the hover text formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'hoverformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["hoverformat"]
@hoverformat.setter
def hoverformat(self, val):
self["hoverformat"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
@property
def maxallowed(self):
"""
Determines the maximum range of this axis.
The 'maxallowed' property accepts values of any type
Returns
-------
Any
"""
return self["maxallowed"]
@maxallowed.setter
def maxallowed(self, val):
self["maxallowed"] = val
@property
def minallowed(self):
"""
Determines the minimum range of this axis.
The 'minallowed' property accepts values of any type
Returns
-------
Any
"""
return self["minallowed"]
@minallowed.setter
def minallowed(self, val):
self["minallowed"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number. This only
has an effect when `tickformat` is "SI" or "B".
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def mirror(self):
"""
Determines if the axis lines or/and ticks are mirrored to the
opposite side of the plotting area. If True, the axis lines are
mirrored. If "ticks", the axis lines and ticks are mirrored. If
False, mirroring is disable. If "all", axis lines are mirrored
on all shared-axes subplots. If "allticks", axis lines and
ticks are mirrored on all shared-axes subplots.
The 'mirror' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, 'ticks', False, 'all', 'allticks']
Returns
-------
Any
"""
return self["mirror"]
@mirror.setter
def mirror(self, val):
self["mirror"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
Leaving either or both elements `null` impacts the default
`autorange`.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def rangemode(self):
"""
If "normal", the range is computed in relation to the extrema
of the input data. If "tozero", the range extends to 0,
regardless of the input data If "nonnegative", the range is
non-negative, regardless of the input data. Applies only to
linear axes.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'tozero', 'nonnegative']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showaxeslabels(self):
"""
Sets whether or not this axis is labeled
The 'showaxeslabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showaxeslabels"]
@showaxeslabels.setter
def showaxeslabels(self, val):
self["showaxeslabels"] = val
@property
def showbackground(self):
"""
Sets whether or not this axis' wall has a background color.
The 'showbackground' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showbackground"]
@showbackground.setter
def showbackground(self, val):
self["showbackground"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
@property
def showspikes(self):
"""
Sets whether or not spikes starting from data points to this
axis' wall are shown on hover.
The 'showspikes' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showspikes"]
@showspikes.setter
def showspikes(self, val):
self["showspikes"] = val
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def spikecolor(self):
"""
Sets the color of the spikes.
The 'spikecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["spikecolor"]
@spikecolor.setter
def spikecolor(self, val):
self["spikecolor"] = val
@property
def spikesides(self):
"""
Sets whether or not spikes extending from the projection data
points to this axis' wall boundaries are shown on hover.
The 'spikesides' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["spikesides"]
@spikesides.setter
def spikesides(self, val):
self["spikesides"] = val
@property
def spikethickness(self):
"""
Sets the thickness (in px) of the spikes.
The 'spikethickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["spikethickness"]
@spikethickness.setter
def spikethickness(self, val):
self["spikethickness"] = val
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.layout.scene.yaxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.layout.scene.yaxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.layout.scene.yaxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.layout.scene.yaxis.tickformatstopdefaults),
sets the default property values to use for elements of
layout.scene.yaxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.layout.scene.yaxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.layout.scene.yaxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.layout.scene.yaxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'log', 'date', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def visible(self):
"""
A single toggle to hide the axis while preserving interaction
like dragging. Default is true when a cheater plot is present
on the axis, otherwise false
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def zeroline(self):
"""
Determines whether or not a line is drawn at along the 0 value
of this axis. If True, the zero line is drawn on top of the
grid lines.
The 'zeroline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["zeroline"]
@zeroline.setter
def zeroline(self, val):
self["zeroline"] = val
@property
def zerolinecolor(self):
"""
Sets the line color of the zero line.
The 'zerolinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["zerolinecolor"]
@zerolinecolor.setter
def zerolinecolor(self, val):
self["zerolinecolor"] = val
@property
def zerolinewidth(self):
"""
Sets the width (in px) of the zero line.
The 'zerolinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["zerolinewidth"]
@zerolinewidth.setter
def zerolinewidth(self, val):
self["zerolinewidth"] = val
@property
def _prop_descriptions(self):
return """\
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided and it has a
value for both the lower and upper bound, `autorange`
is set to False. Using "min" applies autorange only to
set the minimum. Using "max" applies autorange only to
set the maximum. Using *min reversed* applies autorange
only to set the minimum on a reversed axis. Using *max
reversed* applies autorange only to set the maximum on
a reversed axis. Using "reversed" applies autorange on
both ends and reverses the axis direction.
autorangeoptions
:class:`plotly.graph_objects.layout.scene.yaxis.Autoran
geoptions` instance or dict with compatible properties
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
maxallowed
Determines the maximum range of this axis.
minallowed
Determines the minimum range of this axis.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears. Leaving either or both elements `null` impacts
the default `autorange`.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If "tozero", the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.ya
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.yaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.yaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.yaxis.Title`
instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
"""
def __init__(
self,
arg=None,
autorange=None,
autorangeoptions=None,
autotypenumbers=None,
backgroundcolor=None,
calendar=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
color=None,
dtick=None,
exponentformat=None,
gridcolor=None,
gridwidth=None,
hoverformat=None,
labelalias=None,
linecolor=None,
linewidth=None,
maxallowed=None,
minallowed=None,
minexponent=None,
mirror=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showaxeslabels=None,
showbackground=None,
showexponent=None,
showgrid=None,
showline=None,
showspikes=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
spikecolor=None,
spikesides=None,
spikethickness=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
type=None,
visible=None,
zeroline=None,
zerolinecolor=None,
zerolinewidth=None,
**kwargs,
):
"""
Construct a new YAxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.scene.YAxis`
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided and it has a
value for both the lower and upper bound, `autorange`
is set to False. Using "min" applies autorange only to
set the minimum. Using "max" applies autorange only to
set the maximum. Using *min reversed* applies autorange
only to set the minimum on a reversed axis. Using *max
reversed* applies autorange only to set the maximum on
a reversed axis. Using "reversed" applies autorange on
both ends and reverses the axis direction.
autorangeoptions
:class:`plotly.graph_objects.layout.scene.yaxis.Autoran
geoptions` instance or dict with compatible properties
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
backgroundcolor
Sets the background color of this axis' wall.
calendar
Sets the calendar system to use for `range` and `tick0`
if this is a date axis. This does not set the calendar
for interpreting data on this axis, that's specified in
the trace or via the global `layout.calendar`
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`. Set `categoryorder`
to *total ascending* or *total descending* if order
should be determined by the numerical order of the
values. Similarly, the order can be determined by the
min, max, sum, mean, geometric mean or median of all
the values.
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B. "SI" uses prefixes
from "femto" f (10^-15) to "tera" T (10^12). *SI
extended* covers instead the full SI range from
"quecto" q (10^-30) to "quetta" Q (10^30). If "SI" or
*SI extended* is used and the exponent is beyond the
above ranges, the formatting rule will automatically be
switched to the power notation.
gridcolor
Sets the color of the grid lines.
gridwidth
Sets the width (in px) of the grid lines.
hoverformat
Sets the hover text formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
maxallowed
Determines the maximum range of this axis.
minallowed
Determines the minimum range of this axis.
minexponent
Hide SI prefix for 10^n if |n| is below this number.
This only has an effect when `tickformat` is "SI" or
"B".
mirror
Determines if the axis lines or/and ticks are mirrored
to the opposite side of the plotting area. If True, the
axis lines are mirrored. If "ticks", the axis lines and
ticks are mirrored. If False, mirroring is disable. If
"all", axis lines are mirrored on all shared-axes
subplots. If "allticks", axis lines and ticks are
mirrored on all shared-axes subplots.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears. Leaving either or both elements `null` impacts
the default `autorange`.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If "tozero", the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data. Applies only to linear axes.
separatethousands
If "true", even 4-digit integers are separated
showaxeslabels
Sets whether or not this axis is labeled
showbackground
Sets whether or not this axis' wall has a background
color.
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showspikes
Sets whether or not spikes starting from data points to
this axis' wall are shown on hover.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
spikecolor
Sets the color of the spikes.
spikesides
Sets whether or not spikes extending from the
projection data points to this axis' wall boundaries
are shown on hover.
spikethickness
Sets the thickness (in px) of the spikes.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.layout.scene.ya
xis.Tickformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.layout.scen
e.yaxis.tickformatstopdefaults), sets the default
property values to use for elements of
layout.scene.yaxis.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.layout.scene.yaxis.Title`
instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
visible
A single toggle to hide the axis while preserving
interaction like dragging. Default is true when a
cheater plot is present on the axis, otherwise false
zeroline
Determines whether or not a line is drawn at along the
0 value of this axis. If True, the zero line is drawn
on top of the grid lines.
zerolinecolor
Sets the line color of the zero line.
zerolinewidth
Sets the width (in px) of the zero line.
Returns
-------
YAxis
"""
super().__init__("yaxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.scene.YAxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.scene.YAxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("autorange", arg, autorange)
self._set_property("autorangeoptions", arg, autorangeoptions)
self._set_property("autotypenumbers", arg, autotypenumbers)
self._set_property("backgroundcolor", arg, backgroundcolor)
self._set_property("calendar", arg, calendar)
self._set_property("categoryarray", arg, categoryarray)
self._set_property("categoryarraysrc", arg, categoryarraysrc)
self._set_property("categoryorder", arg, categoryorder)
self._set_property("color", arg, color)
self._set_property("dtick", arg, dtick)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("hoverformat", arg, hoverformat)
self._set_property("labelalias", arg, labelalias)
self._set_property("linecolor", arg, linecolor)
self._set_property("linewidth", arg, linewidth)
self._set_property("maxallowed", arg, maxallowed)
self._set_property("minallowed", arg, minallowed)
self._set_property("minexponent", arg, minexponent)
self._set_property("mirror", arg, mirror)
self._set_property("nticks", arg, nticks)
self._set_property("range", arg, range)
self._set_property("rangemode", arg, rangemode)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showaxeslabels", arg, showaxeslabels)
self._set_property("showbackground", arg, showbackground)
self._set_property("showexponent", arg, showexponent)
self._set_property("showgrid", arg, showgrid)
self._set_property("showline", arg, showline)
self._set_property("showspikes", arg, showspikes)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("spikecolor", arg, spikecolor)
self._set_property("spikesides", arg, spikesides)
self._set_property("spikethickness", arg, spikethickness)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickcolor", arg, tickcolor)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("ticklen", arg, ticklen)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticks", arg, ticks)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("tickwidth", arg, tickwidth)
self._set_property("title", arg, title)
self._set_property("type", arg, type)
self._set_property("visible", arg, visible)
self._set_property("zeroline", arg, zeroline)
self._set_property("zerolinecolor", arg, zerolinecolor)
self._set_property("zerolinewidth", arg, zerolinewidth)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
YAxis
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/checkpointing/test_model_checkpoint.py
|
{
"start": 35120,
"end": 35229
}
|
class ____(BoringModel):
def on_fit_end(self):
raise RuntimeError("Trouble!")
|
TroubledModelOnFitEnd
|
python
|
huggingface__transformers
|
src/transformers/models/markuplm/modeling_markuplm.py
|
{
"start": 34640,
"end": 39769
}
|
class ____(MarkupLMPreTrainedModel):
# Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification.__init__ with bert->markuplm, Bert->MarkupLM
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.markuplm = MarkupLMModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
xpath_tags_seq: Optional[torch.Tensor] = None,
xpath_subs_seq: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
token_type_ids: Optional[torch.Tensor] = None,
position_ids: Optional[torch.Tensor] = None,
inputs_embeds: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple[torch.Tensor], SequenceClassifierOutput]:
r"""
xpath_tags_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Tag IDs for each token in the input sequence, padded up to config.max_depth.
xpath_subs_seq (`torch.LongTensor` of shape `(batch_size, sequence_length, config.max_depth)`, *optional*):
Subscript IDs for each token in the input sequence, padded up to config.max_depth.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
Examples:
```python
>>> from transformers import AutoProcessor, AutoModelForSequenceClassification
>>> import torch
>>> processor = AutoProcessor.from_pretrained("microsoft/markuplm-base")
>>> model = AutoModelForSequenceClassification.from_pretrained("microsoft/markuplm-base", num_labels=7)
>>> html_string = "<html> <head> <title>Page Title</title> </head> </html>"
>>> encoding = processor(html_string, return_tensors="pt")
>>> with torch.no_grad():
... outputs = model(**encoding)
>>> loss = outputs.loss
>>> logits = outputs.logits
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.markuplm(
input_ids,
xpath_tags_seq=xpath_tags_seq,
xpath_subs_seq=xpath_subs_seq,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=True,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
__all__ = [
"MarkupLMForQuestionAnswering",
"MarkupLMForSequenceClassification",
"MarkupLMForTokenClassification",
"MarkupLMModel",
"MarkupLMPreTrainedModel",
]
|
MarkupLMForSequenceClassification
|
python
|
Pylons__pyramid
|
tests/test_view.py
|
{
"start": 30596,
"end": 32030
}
|
class ____(unittest.TestCase):
def test_it(self):
from pyramid.view import view_defaults
@view_defaults(route_name='abc', renderer='def')
class Foo:
pass
self.assertEqual(Foo.__view_defaults__['route_name'], 'abc')
self.assertEqual(Foo.__view_defaults__['renderer'], 'def')
def test_it_inheritance_not_overridden(self):
from pyramid.view import view_defaults
@view_defaults(route_name='abc', renderer='def')
class Foo:
pass
class Bar(Foo):
pass
self.assertEqual(Bar.__view_defaults__['route_name'], 'abc')
self.assertEqual(Bar.__view_defaults__['renderer'], 'def')
def test_it_inheritance_overriden(self):
from pyramid.view import view_defaults
@view_defaults(route_name='abc', renderer='def')
class Foo:
pass
@view_defaults(route_name='ghi')
class Bar(Foo):
pass
self.assertEqual(Bar.__view_defaults__['route_name'], 'ghi')
self.assertFalse('renderer' in Bar.__view_defaults__)
def test_it_inheritance_overriden_empty(self):
from pyramid.view import view_defaults
@view_defaults(route_name='abc', renderer='def')
class Foo:
pass
@view_defaults()
class Bar(Foo):
pass
self.assertEqual(Bar.__view_defaults__, {})
|
Test_view_defaults
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_omni_moe/modeling_qwen3_omni_moe.py
|
{
"start": 71433,
"end": 73558
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config, layer_idx):
super().__init__()
self.self_attn = Qwen3OmniMoeThinkerTextAttention(config, layer_idx)
if (layer_idx not in config.mlp_only_layers) and (
config.num_experts > 0 and (layer_idx + 1) % config.decoder_sparse_step == 0
):
self.mlp = Qwen3OmniMoeThinkerTextSparseMoeBlock(config)
else:
self.mlp = Qwen3OmniMoeThinkerTextMLP(config, intermediate_size=config.intermediate_size)
self.input_layernorm = Qwen3OmniMoeThinkerTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.post_attention_layernorm = Qwen3OmniMoeThinkerTextRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.hidden_size = config.hidden_size
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> torch.Tensor:
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
# Self Attention
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.post_attention_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
@auto_docstring
|
Qwen3OmniMoeThinkerTextDecoderLayer
|
python
|
pyparsing__pyparsing
|
tests/test_examples.py
|
{
"start": 115,
"end": 1637
}
|
class ____(unittest.TestCase):
def _run(self, name):
mod = import_module("examples." + name)
# use pyparsing context to reset each test to clean
# pyparsing settings
with ppt.reset_pyparsing_context():
getattr(mod, "main", lambda *args, **kwargs: None)()
def test_numerics(self):
self._run("numerics")
def test_parse_python_value(self):
self._run("parse_python_value")
def test_tap(self):
self._run("TAP")
def test_roman_numerals(self):
self._run("roman_numerals")
def test_sexp_parser(self):
self._run("sexpParser")
def test_oc(self):
self._run("oc")
def test_delta_time(self):
self._run("delta_time")
def test_eval_arith(self):
self._run("eval_arith")
def test_select_parser(self):
self._run("select_parser")
def test_booleansearchparser(self):
self._run("booleansearchparser")
def test_rosettacode(self):
self._run("rosettacode")
def test_excelExpr(self):
self._run("excel_expr")
def test_lucene_grammar(self):
self._run("lucene_grammar")
def test_range_check(self):
self._run("range_check")
def test_stackish(self):
self._run("stackish")
def test_email_parser(self):
self._run("email_address_parser")
def test_mongodb_query_parser(self):
self._run("mongodb_query_expression")
def test_lox_parser(self):
self._run("lox_parser")
|
TestExamples
|
python
|
mlflow__mlflow
|
mlflow/system_metrics/metrics/cpu_monitor.py
|
{
"start": 138,
"end": 776
}
|
class ____(BaseMetricsMonitor):
"""Class for monitoring CPU stats."""
def collect_metrics(self):
# Get CPU metrics.
cpu_percent = psutil.cpu_percent()
self._metrics["cpu_utilization_percentage"].append(cpu_percent)
system_memory = psutil.virtual_memory()
self._metrics["system_memory_usage_megabytes"].append(system_memory.used / 1e6)
self._metrics["system_memory_usage_percentage"].append(
system_memory.used / system_memory.total * 100
)
def aggregate_metrics(self):
return {k: round(sum(v) / len(v), 1) for k, v in self._metrics.items()}
|
CPUMonitor
|
python
|
pytorch__pytorch
|
tools/linter/adapters/pyproject_linter.py
|
{
"start": 536,
"end": 8081
}
|
class ____(NamedTuple):
path: str | None
line: int | None
char: int | None
code: str
severity: LintSeverity
name: str
original: str | None
replacement: str | None
description: str | None
def format_error_message(
filename: str,
error: Exception | None = None,
*,
message: str | None = None,
) -> LintMessage:
if message is None and error is not None:
message = f"Failed due to {error.__class__.__name__}:\n{error}"
return LintMessage(
path=filename,
line=None,
char=None,
code="PYPROJECT",
severity=LintSeverity.ERROR,
name="pyproject.toml consistency",
original=None,
replacement=None,
description=message,
)
def check_file(filename: str) -> list[LintMessage]:
path = Path(filename).absolute()
try:
pyproject = tomllib.loads(path.read_text(encoding="utf-8"))
except (tomllib.TOMLDecodeError, OSError) as err:
return [format_error_message(filename, err)]
if not (isinstance(pyproject, dict) and isinstance(pyproject.get("project"), dict)):
return [
format_error_message(
filename,
message=(
"'project' section in pyproject.toml must present and be a table."
),
)
]
project = pyproject["project"]
requires_python = project.get("requires-python")
if requires_python is not None:
if not isinstance(requires_python, str):
return [
format_error_message(
filename,
message="'project.requires-python' must be a string.",
)
]
python_major = 3
specifier_set = SpecifierSet(requires_python)
for specifier in specifier_set:
if Version(specifier.version).major != python_major:
return [
format_error_message(
filename,
message=(
"'project.requires-python' must only specify "
f"Python {python_major} versions, but found {specifier.version}."
),
)
]
large_minor = 1000
supported_python_versions = list(
specifier_set.filter(
f"{python_major}.{minor}" for minor in range(large_minor + 1)
)
)
if not supported_python_versions:
return [
format_error_message(
filename,
message=(
"'project.requires-python' must specify at least one "
f"Python {python_major} version, but found {requires_python!r}."
),
)
]
if f"{python_major}.0" in supported_python_versions:
return [
format_error_message(
filename,
message=(
"'project.requires-python' must specify a minimum version, "
f"but found {requires_python!r}."
),
)
]
# if f"{python_major}.{large_minor}" in supported_python_versions:
# return [
# format_error_message(
# filename,
# message=(
# "'project.requires-python' must specify a maximum version, "
# f"but found {requires_python!r}."
# ),
# )
# ]
classifiers = project.get("classifiers")
if not (
isinstance(classifiers, list)
and all(isinstance(c, str) for c in classifiers)
):
return [
format_error_message(
filename,
message="'project.classifiers' must be an array of strings.",
)
]
if len(set(classifiers)) != len(classifiers):
return [
format_error_message(
filename,
message="'project.classifiers' must not contain duplicates.",
)
]
# python_version_classifiers = [
# c
# for c in classifiers
# if (
# c.startswith("Programming Language :: Python :: ")
# and not c.endswith((f":: {python_major}", f":: {python_major} :: Only"))
# )
# ]
# if python_version_classifiers:
# python_version_classifier_set = set(python_version_classifiers)
# supported_python_version_classifier_set = {
# f"Programming Language :: Python :: {v}"
# for v in supported_python_versions
# }
# if python_version_classifier_set != supported_python_version_classifier_set:
# missing_classifiers = sorted(
# supported_python_version_classifier_set
# - python_version_classifier_set
# )
# extra_classifiers = sorted(
# python_version_classifier_set
# - supported_python_version_classifier_set
# )
# if missing_classifiers:
# return [
# format_error_message(
# filename,
# message=(
# "'project.classifiers' is missing the following classifier(s):\n"
# + "\n".join(f" {c!r}" for c in missing_classifiers)
# ),
# )
# ]
# if extra_classifiers:
# return [
# format_error_message(
# filename,
# message=(
# "'project.classifiers' contains extra classifier(s):\n"
# + "\n".join(f" {c!r}" for c in extra_classifiers)
# ),
# )
# ]
return []
def main() -> None:
parser = argparse.ArgumentParser(
description="Check consistency of pyproject.toml files.",
fromfile_prefix_chars="@",
)
parser.add_argument(
"--verbose",
action="store_true",
help="verbose logging",
)
parser.add_argument(
"filenames",
nargs="+",
help="paths to lint",
)
args = parser.parse_args()
logging.basicConfig(
format="<%(processName)s:%(levelname)s> %(message)s",
level=logging.NOTSET
if args.verbose
else logging.DEBUG
if len(args.filenames) < 1000
else logging.INFO,
stream=sys.stderr,
)
with concurrent.futures.ProcessPoolExecutor(
max_workers=os.cpu_count(),
) as executor:
futures = {executor.submit(check_file, x): x for x in args.filenames}
for future in concurrent.futures.as_completed(futures):
try:
for lint_message in future.result():
print(json.dumps(lint_message._asdict()), flush=True)
except Exception:
logging.critical('Failed at "%s".', futures[future])
raise
if __name__ == "__main__":
main()
|
LintMessage
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_theme01.py
|
{
"start": 315,
"end": 1326
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("theme01.xlsx")
def test_create_file(self):
"""Test the addition of a theme file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [55993088, 55995008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/callbacks/test_early_stopping.py
|
{
"start": 20348,
"end": 25650
}
|
class ____(BoringModel):
def __init__(self):
super().__init__()
self.epoch_losses = [5.0, 4.0, 3.0, 2.0, 1.0]
def on_validation_epoch_end(self):
loss = self.epoch_losses[self.current_epoch] if self.current_epoch < len(self.epoch_losses) else 0.1
self.log("val_loss", loss)
@pytest.mark.parametrize(
(
"model_cls",
"early_stopping_kwargs",
"trainer_kwargs",
"expected_reason",
"reason_message_substr",
"should_stop",
"state_dict_override",
),
[
# Patience exhausted
(
ModelWithHighLoss,
{"monitor": "val_loss", "patience": 2, "verbose": True},
{"max_epochs": 10, "enable_progress_bar": False},
EarlyStoppingReason.PATIENCE_EXHAUSTED,
"did not improve",
True,
None,
),
# Stopping threshold
(
ModelWithDecreasingLoss,
{"monitor": "val_loss", "stopping_threshold": 0.6, "mode": "min", "verbose": True},
{"max_epochs": 10, "enable_progress_bar": False},
EarlyStoppingReason.STOPPING_THRESHOLD,
"Stopping threshold reached",
True,
None,
),
# Divergence threshold
(
ModelWithIncreasingLoss,
{"monitor": "val_loss", "divergence_threshold": 8.0, "mode": "min", "verbose": True},
{"max_epochs": 10, "enable_progress_bar": False},
EarlyStoppingReason.DIVERGENCE_THRESHOLD,
"Divergence threshold reached",
True,
None,
),
# Non-finite metric
(
ModelWithNaNLoss,
{"monitor": "val_loss", "check_finite": True, "verbose": True},
{"max_epochs": 10, "enable_progress_bar": False},
EarlyStoppingReason.NON_FINITE_METRIC,
"is not finite",
True,
None,
),
# Not stopped (normal completion)
(
ModelWithImprovingLoss,
{"monitor": "val_loss", "patience": 3, "verbose": True},
{"max_epochs": 3, "enable_progress_bar": False},
EarlyStoppingReason.NOT_STOPPED,
None,
False,
None,
),
# State persistence
(
None,
{"monitor": "val_loss", "patience": 3},
{},
EarlyStoppingReason.PATIENCE_EXHAUSTED,
"Test message",
None,
{"stopping_reason": EarlyStoppingReason.PATIENCE_EXHAUSTED, "stopping_reason_message": "Test message"},
),
# Backward compatibility (old state dict)
(
None,
{"monitor": "val_loss", "patience": 3},
{},
EarlyStoppingReason.NOT_STOPPED,
None,
None,
{
"wait_count": 2,
"stopped_epoch": 5,
"best_score": torch.tensor(0.5),
"patience": 3,
},
),
],
)
def test_early_stopping_reasons(
tmp_path,
model_cls,
early_stopping_kwargs,
trainer_kwargs,
expected_reason,
reason_message_substr,
should_stop,
state_dict_override,
):
"""Test all early stopping reasons in a single parametrized test."""
if state_dict_override is not None:
early_stopping = EarlyStopping(**early_stopping_kwargs)
if "stopping_reason" in state_dict_override:
# State persistence test
early_stopping.stopping_reason = state_dict_override["stopping_reason"]
early_stopping.stopping_reason_message = state_dict_override["stopping_reason_message"]
state_dict = early_stopping.state_dict()
new_early_stopping = EarlyStopping(**early_stopping_kwargs)
new_early_stopping.load_state_dict(state_dict)
assert new_early_stopping.stopping_reason == expected_reason
assert new_early_stopping.stopping_reason_message == reason_message_substr
else:
# Backward compatibility test
early_stopping.load_state_dict(copy.deepcopy(state_dict_override))
assert early_stopping.stopping_reason == expected_reason
assert early_stopping.stopping_reason_message is None
assert early_stopping.wait_count == state_dict_override["wait_count"]
assert early_stopping.stopped_epoch == state_dict_override["stopped_epoch"]
return
# All other tests
model = model_cls()
early_stopping = EarlyStopping(**early_stopping_kwargs)
trainer = Trainer(
default_root_dir=tmp_path,
callbacks=[early_stopping],
**trainer_kwargs,
)
trainer.fit(model)
assert early_stopping.stopping_reason == expected_reason
if reason_message_substr is not None:
assert early_stopping.stopping_reason_message is not None
assert reason_message_substr in early_stopping.stopping_reason_message
else:
assert early_stopping.stopping_reason_message is None
if should_stop is not None:
if should_stop:
assert early_stopping.stopped_epoch > 0
else:
assert early_stopping.stopped_epoch == 0
|
ModelWithImprovingLoss
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/operators/datacatalog.py
|
{
"start": 7421,
"end": 12688
}
|
class ____(GoogleCloudBaseOperator):
"""
Creates an EntryGroup.
The newly created entry group ID are saved under the ``entry_group_id`` key in XCOM.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDataCatalogCreateEntryGroupOperator`
:param location: Required. The location of the entry group to create.
:param entry_group_id: Required. The id of the entry group to create. The id must begin with a letter
or underscore, contain only English letters, numbers and underscores, and be at most 64
characters.
:param entry_group: The entry group to create. Defaults to an empty entry group.
If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.cloud.datacatalog_v1beta1.types.EntryGroup`
:param project_id: The ID of the Google Cloud project that owns the entry group.
If set to ``None`` or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will be
retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: Optional, The connection ID used to connect to Google Cloud.
Defaults to 'google_cloud_default'.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"entry_group_id",
"entry_group",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (DataCatalogEntryGroupLink(),)
def __init__(
self,
*,
location: str,
entry_group_id: str,
entry_group: dict | EntryGroup,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.entry_group_id = entry_group_id
self.entry_group = entry_group
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDataCatalogHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
try:
result = hook.create_entry_group(
location=self.location,
entry_group_id=self.entry_group_id,
entry_group=self.entry_group,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except AlreadyExists:
self.log.info("Entry already exists. Skipping create operation.")
result = hook.get_entry_group(
location=self.location,
entry_group=self.entry_group_id,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
_, _, entry_group_id = result.name.rpartition("/")
self.log.info("Current entry group ID: %s", entry_group_id)
context["ti"].xcom_push(key="entry_group_id", value=entry_group_id)
DataCatalogEntryGroupLink.persist(
context=context,
entry_group_id=self.entry_group_id,
location_id=self.location,
project_id=self.project_id or hook.project_id,
)
return EntryGroup.to_dict(result)
@deprecated(
planned_removal_date="January 30, 2026",
use_instead="airflow.providers.google.cloud.operators.dataplex.DataplexCatalogCreateEntryOperator, "
"airflow.providers.google.cloud.operators.dataplex.DataplexCatalogUpdateEntryOperator",
reason="The Data Catalog will be discontinued on January 30, 2026 "
"in favor of Dataplex Universal Catalog.",
category=AirflowProviderDeprecationWarning,
)
|
CloudDataCatalogCreateEntryGroupOperator
|
python
|
pandas-dev__pandas
|
pandas/io/json/_json.py
|
{
"start": 7711,
"end": 8661
}
|
class ____(Writer):
_default_orient = "columns"
@property
def obj_to_write(self) -> NDFrame | Mapping[IndexLabel, Any]:
if not self.index and self.orient == "split":
obj_to_write = self.obj.to_dict(orient="split")
del obj_to_write["index"]
else:
obj_to_write = self.obj
return obj_to_write
def _format_axes(self) -> None:
"""
Try to format axes if they are datelike.
"""
if not self.obj.index.is_unique and self.orient in ("index", "columns"):
raise ValueError(
f"DataFrame index must be unique for orient='{self.orient}'."
)
if not self.obj.columns.is_unique and self.orient in (
"index",
"columns",
"records",
):
raise ValueError(
f"DataFrame columns must be unique for orient='{self.orient}'."
)
|
FrameWriter
|
python
|
psf__requests
|
tests/test_requests.py
|
{
"start": 87587,
"end": 90221
}
|
class ____:
def test_stream_timeout(self, httpbin):
try:
requests.get(httpbin("delay/10"), timeout=2.0)
except requests.exceptions.Timeout as e:
assert "Read timed out" in e.args[0].args[0]
@pytest.mark.parametrize(
"timeout, error_text",
(
((3, 4, 5), "(connect, read)"),
("foo", "must be an int, float or None"),
),
)
def test_invalid_timeout(self, httpbin, timeout, error_text):
with pytest.raises(ValueError) as e:
requests.get(httpbin("get"), timeout=timeout)
assert error_text in str(e)
@pytest.mark.parametrize("timeout", (None, Urllib3Timeout(connect=None, read=None)))
def test_none_timeout(self, httpbin, timeout):
"""Check that you can set None as a valid timeout value.
To actually test this behavior, we'd want to check that setting the
timeout to None actually lets the request block past the system default
timeout. However, this would make the test suite unbearably slow.
Instead we verify that setting the timeout to None does not prevent the
request from succeeding.
"""
r = requests.get(httpbin("get"), timeout=timeout)
assert r.status_code == 200
@pytest.mark.parametrize(
"timeout", ((None, 0.1), Urllib3Timeout(connect=None, read=0.1))
)
def test_read_timeout(self, httpbin, timeout):
try:
requests.get(httpbin("delay/10"), timeout=timeout)
pytest.fail("The recv() request should time out.")
except ReadTimeout:
pass
@pytest.mark.parametrize(
"timeout", ((0.1, None), Urllib3Timeout(connect=0.1, read=None))
)
def test_connect_timeout(self, timeout):
try:
requests.get(TARPIT, timeout=timeout)
pytest.fail("The connect() request should time out.")
except ConnectTimeout as e:
assert isinstance(e, ConnectionError)
assert isinstance(e, Timeout)
@pytest.mark.parametrize(
"timeout", ((0.1, 0.1), Urllib3Timeout(connect=0.1, read=0.1))
)
def test_total_timeout_connect(self, timeout):
try:
requests.get(TARPIT, timeout=timeout)
pytest.fail("The connect() request should time out.")
except ConnectTimeout:
pass
def test_encoded_methods(self, httpbin):
"""See: https://github.com/psf/requests/issues/2316"""
r = requests.request(b"GET", httpbin("get"))
assert r.ok
SendCall = collections.namedtuple("SendCall", ("args", "kwargs"))
|
TestTimeout
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 602209,
"end": 602534
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("RepositoryTopic", graphql_name="node")
|
RepositoryTopicEdge
|
python
|
walkccc__LeetCode
|
solutions/30. Substring with Concatenation of All Words/30.py
|
{
"start": 0,
"end": 531
}
|
class ____:
def findSubstring(self, s: str, words: list[str]) -> list[int]:
if len(s) == 0 or words == []:
return []
k = len(words)
n = len(words[0])
ans = []
count = collections.Counter(words)
for i in range(len(s) - k * n + 1):
seen = collections.defaultdict(int)
j = 0
while j < k:
word = s[i + j * n: i + j * n + n]
seen[word] += 1
if seen[word] > count[word]:
break
j += 1
if j == k:
ans.append(i)
return ans
|
Solution
|
python
|
geekcomputers__Python
|
venv/Lib/site-packages/pip/_internal/resolution/resolvelib/provider.py
|
{
"start": 2804,
"end": 9935
}
|
class ____(_ProviderBase):
"""Pip's provider implementation for resolvelib.
:params constraints: A mapping of constraints specified by the user. Keys
are canonicalized project names.
:params ignore_dependencies: Whether the user specified ``--no-deps``.
:params upgrade_strategy: The user-specified upgrade strategy.
:params user_requested: A set of canonicalized package names that the user
supplied for pip to install/upgrade.
"""
def __init__(
self,
factory: Factory,
constraints: Dict[str, Constraint],
ignore_dependencies: bool,
upgrade_strategy: str,
user_requested: Dict[str, int],
) -> None:
self._factory = factory
self._constraints = constraints
self._ignore_dependencies = ignore_dependencies
self._upgrade_strategy = upgrade_strategy
self._user_requested = user_requested
self._known_depths: Dict[str, float] = collections.defaultdict(lambda: math.inf)
def identify(self, requirement_or_candidate: Union[Requirement, Candidate]) -> str:
return requirement_or_candidate.name
def get_preference(
self,
identifier: str,
resolutions: Mapping[str, Candidate],
candidates: Mapping[str, Iterator[Candidate]],
information: Mapping[str, Iterable["PreferenceInformation"]],
backtrack_causes: Sequence["PreferenceInformation"],
) -> "Preference":
"""Produce a sort key for given requirement based on preference.
The lower the return value is, the more preferred this group of
arguments is.
Currently pip considers the following in order:
* Prefer if any of the known requirements is "direct", e.g. points to an
explicit URL.
* If equal, prefer if any requirement is "pinned", i.e. contains
operator ``===`` or ``==``.
* If equal, calculate an approximate "depth" and resolve requirements
closer to the user-specified requirements first. If the depth cannot
by determined (eg: due to no matching parents), it is considered
infinite.
* Order user-specified requirements by the order they are specified.
* If equal, prefers "non-free" requirements, i.e. contains at least one
operator, such as ``>=`` or ``<``.
* If equal, order alphabetically for consistency (helps debuggability).
"""
try:
next(iter(information[identifier]))
except StopIteration:
# There is no information for this identifier, so there's no known
# candidates.
has_information = False
else:
has_information = True
if has_information:
lookups = (r.get_candidate_lookup() for r, _ in information[identifier])
candidate, ireqs = zip(*lookups)
else:
candidate, ireqs = None, ()
operators = [
specifier.operator
for specifier_set in (ireq.specifier for ireq in ireqs if ireq)
for specifier in specifier_set
]
direct = candidate is not None
pinned = any(op[:2] == "==" for op in operators)
unfree = bool(operators)
try:
requested_order: Union[int, float] = self._user_requested[identifier]
except KeyError:
requested_order = math.inf
if has_information:
parent_depths = (
self._known_depths[parent.name] if parent is not None else 0.0
for _, parent in information[identifier]
)
inferred_depth = min(d for d in parent_depths) + 1.0
else:
inferred_depth = math.inf
else:
inferred_depth = 1.0
self._known_depths[identifier] = inferred_depth
requested_order = self._user_requested.get(identifier, math.inf)
# Requires-Python has only one candidate and the check is basically
# free, so we always do it first to avoid needless work if it fails.
requires_python = identifier == REQUIRES_PYTHON_IDENTIFIER
# Prefer the causes of backtracking on the assumption that the problem
# resolving the dependency tree is related to the failures that caused
# the backtracking
backtrack_cause = self.is_backtrack_cause(identifier, backtrack_causes)
return (
not requires_python,
not direct,
not pinned,
not backtrack_cause,
inferred_depth,
requested_order,
not unfree,
identifier,
)
def find_matches(
self,
identifier: str,
requirements: Mapping[str, Iterator[Requirement]],
incompatibilities: Mapping[str, Iterator[Candidate]],
) -> Iterable[Candidate]:
def _eligible_for_upgrade(identifier: str) -> bool:
"""Are upgrades allowed for this project?
This checks the upgrade strategy, and whether the project was one
that the user specified in the command line, in order to decide
whether we should upgrade if there's a newer version available.
(Note that we don't need access to the `--upgrade` flag, because
an upgrade strategy of "to-satisfy-only" means that `--upgrade`
was not specified).
"""
if self._upgrade_strategy == "eager":
return True
elif self._upgrade_strategy == "only-if-needed":
user_order = _get_with_identifier(
self._user_requested,
identifier,
default=None,
)
return user_order is not None
return False
constraint = _get_with_identifier(
self._constraints,
identifier,
default=Constraint.empty(),
)
return self._factory.find_candidates(
identifier=identifier,
requirements=requirements,
constraint=constraint,
prefers_installed=(not _eligible_for_upgrade(identifier)),
incompatibilities=incompatibilities,
is_satisfied_by=self.is_satisfied_by,
)
@lru_cache(maxsize=None)
def is_satisfied_by(self, requirement: Requirement, candidate: Candidate) -> bool:
return requirement.is_satisfied_by(candidate)
def get_dependencies(self, candidate: Candidate) -> Sequence[Requirement]:
with_requires = not self._ignore_dependencies
return [r for r in candidate.iter_dependencies(with_requires) if r is not None]
@staticmethod
def is_backtrack_cause(
identifier: str, backtrack_causes: Sequence["PreferenceInformation"]
) -> bool:
for backtrack_cause in backtrack_causes:
if identifier == backtrack_cause.requirement.name:
return True
if backtrack_cause.parent and identifier == backtrack_cause.parent.name:
return True
return False
|
PipProvider
|
python
|
bokeh__bokeh
|
tests/unit/bokeh/core/property/test_validation__property.py
|
{
"start": 2929,
"end": 11283
}
|
class ____:
# test_Any unnecessary (no validation)
# TODO (bev) test_Image
def test_Angle(self) -> None:
p = Angle()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a value of type Real, got junk of type str")
def test_Bool(self) -> None:
p = Bool()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a value of type bool or bool_?, got junk of type str")
def test_Complex(self) -> None:
p = Complex()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a value of type Complex, got junk of type str")
def test_Float(self) -> None:
p = Float()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a value of type Real, got junk of type str")
def test_Int(self) -> None:
p = Int()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a value of type Integral, got junk of type str")
def test_Interval(self) -> None:
p = Interval(Float, 0.0, 1.0)
with pytest.raises(ValueError) as e:
p.validate(2)
assert matches(str(e.value), r"expected a value of type Float in range \[0.0, 1.0\], got 2")
def test_Percent(self) -> None:
p = Percent()
with pytest.raises(ValueError) as e:
p.validate(10)
assert matches(str(e.value), r"expected a value in range \[0, 1\], got 10")
def test_Size(self) -> None:
p = Size()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a value of type Real, got junk of type str")
def test_List(self) -> None:
p = List(Float)
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected sequence List\(Float\), got 'junk' of type <class 'str'>")
def test_Seq(self) -> None:
p = Seq(Float)
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected sequence Seq\(Float\), got 'junk' of type <class 'str'>")
def test_Seq_Any(self) -> None:
p = Seq(Any)
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected sequence Seq\(Any\), got 'junk' of type <class 'str'>")
def test_Dict(self) -> None:
p = Dict(String, Float)
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a dict of type Dict\(String, Float\), got a value of type <class 'str'>")
def test_Dict_Invalid_Key(self) -> None:
d = Dict(String, String)
with pytest.raises(ValueError) as err:
d.validate({"Foo": "Bar", 1: "Baz"})
assert "invalid keys: 1" in str(err.value)
def test_Dict_Invalid_Value(self) -> None:
d = Dict(String, String)
with pytest.raises(ValueError) as err:
d.validate({"Foo": "Bar", "Baz": 1})
assert "invalid values for keys: Baz" in str(err.value)
@pytest.mark.parametrize("key_type,key", KEYS)
@pytest.mark.parametrize("val_type,val", VALS)
def test_Dict_Valid(self, key_type, key, val_type, val) -> None:
d = Dict(key_type, val_type)
try:
d.validate({key: val})
except ValueError:
pytest.fail("ValueError should not be raised on validating a correct dictionary")
def test_Dict_Multiple_Invalid_Keys(self) -> None:
d = Dict(String, String)
with pytest.raises(ValueError) as err:
d.validate({"Foo": "Bar", 1: "Baz", None: "Bosh", 4.5: "Bump"})
assert "invalid keys: 1, None, 4.5" in str(err.value)
def test_Dict_Multiple_Invalid_Values(self) -> None:
d = Dict(String, String)
with pytest.raises(ValueError) as err:
d.validate({"Foo": "Bar", "Baz": 1, "Bosh": 3.2, "Bump": None})
assert "invalid values for keys: Baz, Bosh, Bump" in str(err.value)
def test_Dict_Multiple_Invalid_Keys_And_Values(self) -> None:
d = Dict(String, String)
with pytest.raises(ValueError) as err:
d.validate({"Foo": 2, 1: "Baz", None: None, 4.5: "Bump", "Fow": 3.2})
assert "invalid keys: 1, None, 4.5 and invalid values for keys: Foo, None, Fow" in str(err.value)
def test_Tuple(self) -> None:
p = Tuple(Int, Int)
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected an element of Tuple\(Int, Int\), got 'junk'")
def test_Color(self) -> None:
p = Color()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected an element of either Enum\(.*\), .* or RGB, got 'junk'")
def test_ColumnData(self) -> None:
p = ColumnData(String, Seq(Float))
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a dict of type ColumnData\(String, Seq\(Float\)\), got a value of type <class 'str'>")
def test_Datetime(self) -> None:
p = Datetime()
with pytest.raises(ValueError) as e:
p.validate(object())
assert matches(str(e.value), r"Expected a date, datetime object, or timestamp, got <object object at 0x.*>")
def test_Date(self) -> None:
p = Date()
with pytest.raises(ValueError) as e:
p.validate(object())
assert matches(str(e.value), r"Expected an ISO date string, got <object object at 0x.*>")
def test_DashPattern(self) -> None:
p = DashPattern()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected an element of either Enum\(.*\), Regex\(.*\) or Seq\(Int\), got 'junk'")
def test_Either(self) -> None:
p = Either(Int, Float)
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected an element of either Int or Float, got 'junk'")
def test_Enum(self) -> None:
p = Enum("red", "green")
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"invalid value: 'junk'; allowed values are red or green")
def test_FontSize(self) -> None:
p = FontSize()
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"'junk' is not a valid font size value")
def test_Instance(self) -> None:
p = Instance(HasProps)
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected an instance of type HasProps, got junk of type str")
def test_MinMaxBounds(self) -> None:
p = MinMaxBounds()
with pytest.raises(ValueError) as e:
p.validate(10)
assert matches(str(e.value), r"expected an element of either .*, got 10")
def test_Regex(self) -> None:
p = Regex("green")
with pytest.raises(ValueError) as e:
p.validate("junk")
assert matches(str(e.value), r"expected a string matching 'green' pattern, got 'junk'")
def test_String(self) -> None:
p = String()
with pytest.raises(ValueError) as e:
p.validate(10)
assert matches(str(e.value), r"expected a value of type str, got 10 of type int")
def test_MarkerType(self) -> None:
p = MarkerType()
with pytest.raises(ValueError) as e:
p.validate("foo")
assert matches(str(e.value), r"invalid value: 'foo'; allowed values are asterisk, .* or y")
@pytest.mark.parametrize('spec', SPECS)
def test_Spec(self, spec) -> None:
p = spec(default=None)
with pytest.raises(ValueError) as e:
p.validate(dict(bad="junk"))
assert matches(str(e.value), r"expected an element of either String, .*, got {'bad': 'junk'}")
@pytest.mark.parametrize('detail', [True, False])
|
TestValidateDetailDefault
|
python
|
pytorch__pytorch
|
test/backends/xeon/test_launch.py
|
{
"start": 231,
"end": 2572
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
self._test_dir = tempfile.mkdtemp(prefix=self.__class__.__name__)
def tearDown(self):
shutil.rmtree(self._test_dir)
def test_cpu_info(self):
lscpu_info = """# The following is the parsable format, which can be fed to other
# programs. Each different item in every column has an unique ID
# starting from zero.
# CPU,Core,Socket,Node
0,0,0,0
1,1,0,0
2,2,0,0
3,3,0,0
4,4,1,1
5,5,1,1
6,6,1,1
7,7,1,1
8,0,0,0
9,1,0,0
10,2,0,0
11,3,0,0
12,4,1,1
13,5,1,1
14,6,1,1
15,7,1,1
"""
from torch.backends.xeon.run_cpu import _CPUinfo
cpuinfo = _CPUinfo(lscpu_info)
assert cpuinfo._physical_core_nums() == 8
assert cpuinfo._logical_core_nums() == 16
assert cpuinfo.get_node_physical_cores(0) == [0, 1, 2, 3]
assert cpuinfo.get_node_physical_cores(1) == [4, 5, 6, 7]
assert cpuinfo.get_node_logical_cores(0) == [0, 1, 2, 3, 8, 9, 10, 11]
assert cpuinfo.get_node_logical_cores(1) == [4, 5, 6, 7, 12, 13, 14, 15]
assert cpuinfo.get_all_physical_cores() == [0, 1, 2, 3, 4, 5, 6, 7]
assert cpuinfo.get_all_logical_cores() == [
0,
1,
2,
3,
8,
9,
10,
11,
4,
5,
6,
7,
12,
13,
14,
15,
]
assert cpuinfo.numa_aware_check([0, 1, 2, 3]) == [0]
assert cpuinfo.numa_aware_check([4, 5, 6, 7]) == [1]
assert cpuinfo.numa_aware_check([2, 3, 4, 5]) == [0, 1]
def test_multi_threads(self):
num = 0
with subprocess.Popen(
f"python -m torch.backends.xeon.run_cpu --ninstances 4 --use-default-allocator \
--disable-iomp --disable-numactl --disable-taskset --log-path {self._test_dir} --no-python pwd",
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as p:
for line in p.stdout.readlines():
segs = str(line, "utf-8").strip().split("-")
if segs[-1].strip() == "pwd":
num += 1
assert num == 4, "Failed to launch multiple instances for inference"
if __name__ == "__main__":
run_tests()
|
TestTorchrun
|
python
|
dask__distributed
|
distributed/worker_state_machine.py
|
{
"start": 15550,
"end": 15678
}
|
class ____(SendMessageToScheduler):
op = "add-keys"
__slots__ = ("keys",)
keys: Collection[Key]
@dataclass
|
AddKeysMsg
|
python
|
davidhalter__parso
|
parso/tree.py
|
{
"start": 11736,
"end": 11923
}
|
class ____(Leaf):
__slots__ = ('type',)
def __init__(self, type, value, start_pos, prefix=''):
super().__init__(value, start_pos, prefix)
self.type = type
|
TypedLeaf
|
python
|
jina-ai__jina
|
jina/orchestrate/pods/__init__.py
|
{
"start": 662,
"end": 9852
}
|
class ____(ABC):
"""
:class:`BasePod` is an interface from which all the classes managing the lifetime of a Runtime inside a local process,
container must inherit.
It exposes the required APIs so that the `BasePod` can be handled by the `cli` api as a context manager or by a `Deployment`.
What makes a BasePod a BasePod is that it manages the lifecycle of a Runtime (gateway or not gateway)
"""
def __init__(self, args: 'argparse.Namespace'):
self.args = args
if self.args.pod_role == PodRoleType.GATEWAY:
_update_gateway_args(
self.args,
gateway_load_balancer=getattr(
self.args, 'gateway_load_balancer', False
),
)
self.args.parallel = getattr(self.args, 'shards', 1)
self.name = self.args.name or self.__class__.__name__
self.logger = JinaLogger(self.name, **vars(self.args))
self._envs = {'JINA_DEPLOYMENT_NAME': self.name}
if self.args.quiet:
self._envs['JINA_LOG_CONFIG'] = 'QUIET'
if self.args.env:
self._envs.update(self.args.env)
# arguments needed to create `runtime` and communicate with it in the `run` in the stack of the new process
# or thread.f
test_worker = multiprocessing.Process()
self.is_ready = _get_event(test_worker)
self.is_shutdown = _get_event(test_worker)
self.cancel_event = _get_event(test_worker)
self.is_started = _get_event(test_worker)
self.is_signal_handlers_installed = _get_event(test_worker)
self.ready_or_shutdown = ConditionalEvent(
events_list=[self.is_ready, self.is_shutdown],
)
self.runtime_ctrl_address = self._get_control_address()
self._timeout_ctrl = self.args.timeout_ctrl
def _get_control_address(self):
return f'127.0.0.1:{self.args.port[0]}'
def close(self) -> None:
"""Close the Pod
This method makes sure that the `Process` is properly finished and its resources properly released
"""
self.logger.debug('waiting for ready or shutdown signal from runtime')
if not self.is_shutdown.is_set() and self.is_started.is_set():
try:
self.logger.debug(f'terminate')
self._terminate()
if not self.is_shutdown.wait(
timeout=self._timeout_ctrl if not __windows__ else 1.0
):
if not __windows__:
raise Exception(
f'Shutdown signal was not received for {self._timeout_ctrl} seconds'
)
else:
self.logger.warning(
'Pod was forced to close after 1 second. Graceful closing is not available on Windows.'
)
except Exception as ex:
self.logger.error(
(
f'{ex!r} during {self.close!r}'
+ f'\n add "--quiet-error" to suppress the exception details'
if not self.args.quiet_error
else ''
),
exc_info=not self.args.quiet_error,
)
else:
# here shutdown has been set already, therefore `run` will gracefully finish
self.logger.debug(
f'{"shutdown is already set" if self.is_shutdown.is_set() else "Runtime was never started"}. Runtime will end gracefully on its own'
)
if not self.is_shutdown.is_set():
self.is_signal_handlers_installed.wait(
timeout=self._timeout_ctrl if not __windows__ else 1.0
) # waiting for is_signal_handlers_installed will make sure signal handlers are installed
self._terminate()
self.is_shutdown.set()
self.logger.debug(__stop_msg__)
self.logger.close()
def __enter__(self):
return self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _wait_for_ready_or_shutdown(self, timeout: Optional[float]):
"""
Waits for the process to be ready or to know it has failed.
:param timeout: The time to wait before readiness or failure is determined
.. # noqa: DAR201
"""
from jina.serve.runtimes.servers import BaseServer
return BaseServer.wait_for_ready_or_shutdown(
timeout=timeout,
ready_or_shutdown_event=self.ready_or_shutdown.event,
ctrl_address=self.runtime_ctrl_address,
timeout_ctrl=self._timeout_ctrl,
protocol=getattr(self.args, 'protocol', ["grpc"])[0],
# for now protocol is not yet there part of Executor
)
def _fail_start_timeout(self, timeout):
"""
Closes the Pod and raises a TimeoutError with the corresponding warning messages
:param timeout: The time to wait before readiness or failure is determined
.. # noqa: DAR201
"""
_timeout = timeout or -1
self.logger.warning(
f'{self} timeout after waiting for {self.args.timeout_ready}ms, '
f'if your executor takes time to load, you may increase --timeout-ready'
)
self.close()
raise TimeoutError(
f'{typename(self)}:{self.name} can not be initialized after {_timeout * 1e3}ms'
)
def _check_failed_to_start(self):
"""
Raises a corresponding exception if failed to start
"""
if self.is_shutdown.is_set():
# return too early and the shutdown is set, means something fails!!
if not self.is_started.is_set():
raise RuntimeFailToStart
else:
raise RuntimeRunForeverEarlyError
def wait_start_success(self):
"""Block until all pods starts successfully.
If not success, it will raise an error hoping the outer function to catch it
"""
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
if self._wait_for_ready_or_shutdown(_timeout):
self._check_failed_to_start()
self.logger.debug(__ready_msg__)
else:
self._fail_start_timeout(_timeout)
async def async_wait_start_success(self):
"""
Wait for the `Pod` to start successfully in a non-blocking manner
"""
import asyncio
from jina.serve.runtimes.servers import BaseServer
_timeout = self.args.timeout_ready
if _timeout <= 0:
_timeout = None
else:
_timeout /= 1e3
timeout_ns = 1e9 * _timeout if _timeout else None
now = time.time_ns()
check_protocol = getattr(self.args, 'protocol', ["grpc"])[0]
async def check_readiness_server():
self.logger.debug(
f'Checking readiness to {self.runtime_ctrl_address} with protocol {check_protocol}'
)
ready = await BaseServer.async_is_ready(
ctrl_address=self.runtime_ctrl_address,
timeout=_timeout,
protocol=check_protocol,
logger=self.logger,
# Executor does not have protocol yet
)
if ready:
self.logger.debug(
f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is ready'
)
else:
self.logger.debug(
f'Server on {self.runtime_ctrl_address} with protocol {check_protocol} is not yet ready'
)
return ready
while timeout_ns is None or time.time_ns() - now < timeout_ns:
if (
self.ready_or_shutdown.event.is_set()
and ( # submit the health check to the pod, if it is
self.is_shutdown.is_set() # a worker and not shutdown
or not self.args.pod_role == PodRoleType.WORKER
or (await check_readiness_server())
)
):
self._check_failed_to_start()
self.logger.debug(__ready_msg__)
return
else:
await asyncio.sleep(0.1)
self._fail_start_timeout(_timeout)
@property
def role(self) -> 'PodRoleType':
"""Get the role of this pod in a deployment
.. #noqa: DAR201"""
return self.args.pod_role
@abstractmethod
def start(self):
"""Start the BasePod.
This method calls :meth:`start` in :class:`multiprocesssing.Process`.
.. #noqa: DAR201
"""
...
@abstractmethod
def _terminate(self): ...
@abstractmethod
def join(self, *args, **kwargs):
"""Joins the BasePod. Wait for the BasePod to properly terminate
:param args: extra positional arguments
:param kwargs: extra keyword arguments
"""
...
|
BasePod
|
python
|
ray-project__ray
|
rllib/env/wrappers/dm_control_wrapper.py
|
{
"start": 7595,
"end": 8025
}
|
class ____(gym.ActionWrapper):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._low = -1.0
self._high = 1.0
self.action_space = gym.spaces.Box(
self._low,
self._high,
self.action_space.shape,
self.action_space.dtype,
)
def action(self, action):
return np.clip(action, self._low, self._high)
|
ActionClip
|
python
|
doocs__leetcode
|
solution/3400-3499/3494.Find the Minimum Amount of Time to Brew Potions/Solution.py
|
{
"start": 0,
"end": 434
}
|
class ____:
def minTime(self, skill: List[int], mana: List[int]) -> int:
max = lambda a, b: a if a > b else b
n = len(skill)
f = [0] * n
for x in mana:
tot = 0
for i in range(n):
tot = max(tot, f[i]) + skill[i] * x
f[-1] = tot
for i in range(n - 2, -1, -1):
f[i] = f[i + 1] - skill[i + 1] * x
return f[-1]
|
Solution
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/cli/parsers/key_value_parsers.py
|
{
"start": 8395,
"end": 9270
}
|
class ____(KeyValueParser):
"""Composite argument parser for POSIX SSH host key/value pairs."""
def get_parsers(self, state: ParserState) -> dict[str, Parser]:
"""Return a dictionary of key names and value parsers."""
return dict(
python=PythonParser(versions=list(SUPPORTED_PYTHON_VERSIONS), allow_venv=False, allow_default=False),
)
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
python_parser = PythonParser(versions=SUPPORTED_PYTHON_VERSIONS, allow_venv=False, allow_default=False)
section_name = 'ssh options'
state.sections[f'target {section_name} (comma separated):'] = '\n'.join([
f' python={python_parser.document(state)}',
])
return f'{{{section_name}}}'
|
PosixSshKeyValueParser
|
python
|
HIPS__autograd
|
autograd/builtins.py
|
{
"start": 3390,
"end": 3502
}
|
class ____(tuple_, metaclass=TupleMeta):
def __new__(cls, xs):
return make_sequence(tuple_, *xs)
|
tuple
|
python
|
pymupdf__PyMuPDF
|
src/table.py
|
{
"start": 12729,
"end": 13714
}
|
class ____:
"""
A TextMap maps each unicode character in the text to an individual `char`
object (or, in the case of layout-implied whitespace, `None`).
"""
def __init__(self, tuples=None) -> None:
self.tuples = tuples
self.as_string = "".join(map(itemgetter(0), tuples))
def match_to_dict(
self,
m,
main_group: int = 0,
return_groups: bool = True,
return_chars: bool = True,
) -> dict:
subset = self.tuples[m.start(main_group) : m.end(main_group)]
chars = [c for (text, c) in subset if c is not None]
x0, top, x1, bottom = objects_to_bbox(chars)
result = {
"text": m.group(main_group),
"x0": x0,
"top": top,
"x1": x1,
"bottom": bottom,
}
if return_groups:
result["groups"] = m.groups()
if return_chars:
result["chars"] = chars
return result
|
TextMap
|
python
|
apache__airflow
|
task-sdk/src/airflow/sdk/bases/decorator.py
|
{
"start": 5358,
"end": 11850
}
|
class ____(BaseOperator):
"""
Wraps a Python callable and captures args/kwargs when called for execution.
:param python_callable: A reference to an object that is callable
:param op_kwargs: a dictionary of keyword arguments that will get unpacked
in your function (templated)
:param op_args: a list of positional arguments that will get unpacked when
calling your callable (templated)
:param multiple_outputs: If set to True, the decorated function's return value will be unrolled to
multiple XCom values. Dict will unroll to XCom values with its keys as XCom keys. Defaults to False.
:param kwargs_to_upstream: For certain operators, we might need to upstream certain arguments
that would otherwise be absorbed by the DecoratedOperator (for example python_callable for the
PythonOperator). This gives a user the option to upstream kwargs as needed.
"""
template_fields: Sequence[str] = ("op_args", "op_kwargs")
template_fields_renderers = {"op_args": "py", "op_kwargs": "py"}
# since we won't mutate the arguments, we should just do the shallow copy
# there are some cases we can't deepcopy the objects (e.g protobuf).
shallow_copy_attrs: Sequence[str] = ("python_callable",)
def __init__(
self,
*,
python_callable: Callable,
task_id: str,
op_args: Collection[Any] | None = None,
op_kwargs: Mapping[str, Any] | None = None,
kwargs_to_upstream: dict[str, Any] | None = None,
**kwargs,
) -> None:
if not getattr(self, "_BaseOperator__from_mapped", False):
# If we are being created from calling unmap(), then don't mangle the task id
task_id = get_unique_task_id(task_id, kwargs.get("dag"), kwargs.get("task_group"))
self.python_callable = python_callable
kwargs_to_upstream = kwargs_to_upstream or {}
op_args = op_args or []
op_kwargs = op_kwargs or {}
# Check the decorated function's signature. We go through the argument
# list and "fill in" defaults to arguments that are known context keys,
# since values for those will be provided when the task is run. Since
# we're not actually running the function, None is good enough here.
signature = inspect.signature(python_callable)
# Don't allow context argument defaults other than None to avoid ambiguities.
faulty_parameters = [
param.name
for param in signature.parameters.values()
if param.name in KNOWN_CONTEXT_KEYS and param.default not in (None, inspect.Parameter.empty)
]
if faulty_parameters:
message = f"Context key parameter {faulty_parameters[0]} can't have a default other than None"
raise ValueError(message)
parameters = [
param.replace(default=None) if param.name in KNOWN_CONTEXT_KEYS else param
for param in signature.parameters.values()
]
try:
signature = signature.replace(parameters=parameters)
except ValueError as err:
message = textwrap.dedent(
f"""
The function signature broke while assigning defaults to context key parameters.
The decorator is replacing the signature
> {python_callable.__name__}({", ".join(str(param) for param in signature.parameters.values())})
with
> {python_callable.__name__}({", ".join(str(param) for param in parameters)})
which isn't valid: {err}
"""
)
raise ValueError(message) from err
# Check that arguments can be binded. There's a slight difference when
# we do validation for task-mapping: Since there's no guarantee we can
# receive enough arguments at parse time, we use bind_partial to simply
# check all the arguments we know are valid. Whether these are enough
# can only be known at execution time, when unmapping happens, and this
# is called without the _airflow_mapped_validation_only flag.
if kwargs.get("_airflow_mapped_validation_only"):
signature.bind_partial(*op_args, **op_kwargs)
else:
signature.bind(*op_args, **op_kwargs)
self.op_args = op_args
self.op_kwargs = op_kwargs
super().__init__(task_id=task_id, **kwargs_to_upstream, **kwargs)
def execute(self, context: Context):
# todo make this more generic (move to prepare_lineage) so it deals with non taskflow operators
# as well
for arg in itertools.chain(self.op_args, self.op_kwargs.values()):
if isinstance(arg, Asset):
self.inlets.append(arg)
return_value = super().execute(context)
return self._handle_output(return_value=return_value)
def _handle_output(self, return_value: Any):
"""
Handle logic for whether a decorator needs to push a single return value or multiple return values.
It sets outlets if any assets are found in the returned value(s)
:param return_value:
:param context:
:param xcom_push:
"""
if isinstance(return_value, Asset):
self.outlets.append(return_value)
if isinstance(return_value, list):
for item in return_value:
if isinstance(item, Asset):
self.outlets.append(item)
return return_value
def _hook_apply_defaults(self, *args, **kwargs):
if "python_callable" not in kwargs:
return args, kwargs
python_callable = kwargs["python_callable"]
default_args = kwargs.get("default_args") or {}
op_kwargs = kwargs.get("op_kwargs") or {}
f_sig = inspect.signature(python_callable)
for arg in f_sig.parameters:
if arg not in op_kwargs and arg in default_args:
op_kwargs[arg] = default_args[arg]
kwargs["op_kwargs"] = op_kwargs
return args, kwargs
def get_python_source(self):
raw_source = inspect.getsource(self.python_callable)
res = textwrap.dedent(raw_source)
res = remove_task_decorator(res, self.custom_operator_name)
return res
FParams = ParamSpec("FParams")
FReturn = TypeVar("FReturn")
OperatorSubclass = TypeVar("OperatorSubclass", bound="BaseOperator")
@attr.define(slots=False)
|
DecoratedOperator
|
python
|
psf__black
|
tests/data/cases/preview_long_strings__regression.py
|
{
"start": 6071,
"end": 10353
}
|
class ____:
def xxxx_xxx_xx_xxxxxxxxxx_xxxx_xxxxxxxxx(xxxx):
xxxxxxxx = [
xxxxxxxxxxxxxxxx(
'xxxx',
xxxxxxxxxxx={
'xxxx' : 1.0,
},
xxxxxx={'xxxxxx 1' : xxxxxx(xxxx='xxxxxx 1', xxxxxx=600.0)},
xxxxxxxx_xxxxxxx=0.0,
),
xxxxxxxxxxxxxxxx(
'xxxxxxx',
xxxxxxxxxxx={
'xxxx' : 1.0,
},
xxxxxx={'xxxxxx 1' : xxxxxx(xxxx='xxxxxx 1', xxxxxx=200.0)},
xxxxxxxx_xxxxxxx=0.0,
),
xxxxxxxxxxxxxxxx(
'xxxx',
),
]
some_dictionary = {
'xxxxx006': ['xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx== xxxxx000 xxxxxxxxxx\n',
'xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx== xxxxx010 xxxxxxxxxx\n'],
'xxxxx016': ['xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx0xx6xxxxxxxxxx2xxxxxx9xxxxxxxxxx0xxxxx1xxx2x/xx9xx6+x+xxxxxxxxxxxxxx4xxxxxxxxxxxxxxxxxxxxx43xxx2xx2x4x++xxx6xxxxxxxxx+xxxxx/xx9x+xxxxxxxxxxxxxx8x15xxxxxxxxxxxxxxxxx82xx/xxxxxxxxxxxxxx/x5xxxxxxxxxxxxxx6xxxxxx74x4/xxx4x+xxxxxxxxx2xxxxxxxx87xxxxx4xxxxxxxx3xx0xxxxx4xxx1xx9xx5xxxxxxx/xxxxx5xx6xx4xxxx1x/x2xxxxxxxxxxxx64xxxxxxx1x0xx5xxxxxxxxxxxxxx== xxxxx000 xxxxxxxxxx\n',
'xxx-xxx xxxxx3xxxx1xx2xxxxxxxxxxxxxx6xxxxxxxxxxxxxx9xxxxxxxxxxxxx3xxx9xxxxxxxxxxxxxxxx0xxxxxxxxxxxxxxxxx2xxxx2xxx6xxxxx/xx54xxxxxxxxx4xxx3xxxxxx9xx3xxxxx39xxxxxxxxx5xx91xxxx7xxxxxx8xxxxxxxxxxxxxxxx9xxx93xxxxxxxxxxxxxxxxx7xxx8xx8xx4/x1xxxxx1x3xxxxxxxxxxxxx3xxxxxx9xx4xx4x7xxxxxxxxxxxxx1xxxxxxxxx7xxxxxxxxxxxxxx4xx6xxxxxxxxx9xxx7xxxx2xxxxxxxxxxxxxxxxxxxxxx8xxxxxxxxxxxxxxxxxxxx6xx== xxxxx010 xxxxxxxxxx\n']
}
def foo():
xxx_xxx = (
'xxxx xxx xxxxxxxx_xxxx xx "xxxxxxxxxx".'
'\n xxx: xxxxxx xxxxxxxx_xxxx=xxxxxxxxxx'
) # xxxx xxxxxxxxxx xxxx xx xxxx xx xxx xxxxxxxx xxxxxx xxxxx.
some_tuple = ("some string", "some string" " which should be joined")
some_commented_string = ( # This comment stays at the top.
"This string is long but not so long that it needs hahahah toooooo be so greatttt"
" {} that I just can't think of any more good words to say about it at"
" allllllllllll".format("ha") # comments here are fine
)
some_commented_string = (
"This string is long but not so long that it needs hahahah toooooo be so greatttt" # But these
" {} that I just can't think of any more good words to say about it at" # comments will stay
" allllllllllll".format("ha") # comments here are fine
)
lpar_and_rpar_have_comments = func_call( # LPAR Comment
"Long really ridiculous type of string that shouldn't really even exist at all. I mean commmme onnn!!!", # Comma Comment
) # RPAR Comment
cmd_fstring = (
f"sudo -E deluge-console info --detailed --sort-reverse=time_added "
f"{'' if ID is None else ID} | perl -nE 'print if /^{field}:/'"
)
cmd_fstring = f"sudo -E deluge-console info --detailed --sort-reverse=time_added {'' if ID is None else ID} | perl -nE 'print if /^{field}:/'"
cmd_fstring = f"sudo -E deluge-console info --detailed --sort-reverse=time_added {'{{}}' if ID is None else ID} | perl -nE 'print if /^{field}:/'"
cmd_fstring = f"sudo -E deluge-console info --detailed --sort-reverse=time_added {{'' if ID is None else ID}} | perl -nE 'print if /^{field}:/'"
fstring = f"This string really doesn't need to be an {{{{fstring}}}}, but this one most certainly, absolutely {does}."
fstring = (
f"We have to remember to escape {braces}."
" Like {these}."
f" But not {this}."
)
|
A
|
python
|
pytorch__pytorch
|
torch/compiler/_cache.py
|
{
"start": 496,
"end": 1768
}
|
class ____(ABC):
"""
Data for each cache artifact that will be serialized and deserialized
"""
key: str
content: bytes = dataclasses.field(repr=False) # Do not display potential binary
@staticmethod
def serialize(writer: BytesWriter, cls: "CacheArtifact") -> None:
writer.write_str(cls.key)
writer.write_bytes(cls.content)
@staticmethod
def deserialize(artifact_type: str, reader: BytesReader) -> "CacheArtifact":
key = reader.read_str()
content = reader.read_bytes()
return CacheArtifactFactory.create(artifact_type, key, content)
@staticmethod
def encode(content: Any) -> bytes:
assert isinstance(content, bytes), f"Expected bytes, got {type(content)}"
return content
@abstractmethod
def populate_cache(self) -> None:
pass
@staticmethod
def type() -> str:
"""
Returns the type of the artifact. Must be unique across all CacheArtifact classes.
CacheArtifactFactory.register will add property method to CacheInfo based on this (def {type}_artifacts)
that returns all artifacts for specific cache.
"""
raise RuntimeError("CacheArtifact is an abstract class, please use a subclass")
|
CacheArtifact
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 861554,
"end": 862324
}
|
class ____(sgqlc.types.relay.Connection):
"""The connection type for ProjectV2SortByField."""
__schema__ = github_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(sgqlc.types.list_of("ProjectV2SortByFieldEdge"), graphql_name="edges")
"""A list of edges."""
nodes = sgqlc.types.Field(sgqlc.types.list_of(ProjectV2SortByField), graphql_name="nodes")
"""A list of nodes."""
page_info = sgqlc.types.Field(sgqlc.types.non_null(PageInfo), graphql_name="pageInfo")
"""Information to aid in pagination."""
total_count = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="totalCount")
"""Identifies the total count of items in the connection."""
|
ProjectV2SortByFieldConnection
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/errors.py
|
{
"start": 17053,
"end": 17310
}
|
class ____(DagsterError):
"""Indicates the resolved executor is incompatible with the state of other systems
such as the :py:class:`~dagster._core.instance.DagsterInstance` or system storage configuration.
"""
|
DagsterUnmetExecutorRequirementsError
|
python
|
spack__spack
|
lib/spack/spack/test/spec_semantics.py
|
{
"start": 3980,
"end": 100097
}
|
class ____:
"""Test satisfies(), intersects(), constrain() and other semantic operations on specs."""
@pytest.mark.parametrize(
"lhs,rhs,expected",
[
("libelf@0.8.13", "@0:1", "libelf@0.8.13"),
("libdwarf^libelf@0.8.13", "^libelf@0:1", "libdwarf^libelf@0.8.13"),
("libelf", Spec(), "libelf"),
("libdwarf", Spec(), "libdwarf"),
("%intel", Spec(), "%intel"),
("^mpi", Spec(), "^mpi"),
("+debug", Spec(), "+debug"),
("@3:", Spec(), "@3:"),
# Versions
("libelf@0:2.5", "libelf@2.1:3", "libelf@2.1:2.5"),
("libelf@0:2.5%gcc@2:4.6", "libelf@2.1:3%gcc@4.5:4.7", "libelf@2.1:2.5%gcc@4.5:4.6"),
# Namespaces
("builtin.mpich", "mpich", "builtin.mpich"),
("builtin.mock.mpich", "mpich", "builtin.mock.mpich"),
("builtin.mpich", "builtin.mpich", "builtin.mpich"),
("mpileaks ^builtin.mock.mpich", "^mpich", "mpileaks ^builtin.mock.mpich"),
# Virtual dependencies are fully resolved during concretization, so we can constrain
# abstract specs but that would result in a new node
("mpileaks ^builtin.mock.mpich", "^mpi", "mpileaks ^mpi ^builtin.mock.mpich"),
(
"mpileaks ^builtin.mock.mpich",
"^builtin.mock.mpich",
"mpileaks ^builtin.mock.mpich",
),
# Compilers
("foo%gcc", "%gcc", "foo%gcc"),
("foo%intel", "%intel", "foo%intel"),
("foo%gcc", "%gcc@4.7.2", "foo%gcc@4.7.2"),
("foo%intel", "%intel@4.7.2", "foo%intel@4.7.2"),
("foo%gcc@4.5", "%gcc@4.4:4.6", "foo%gcc@4.5"),
("foo@2.0%gcc@4.5", "@1:3%gcc@4.4:4.6", "foo@2.0%gcc@4.5"),
("foo %gcc@4.7.3", "%gcc@4.7", "foo %gcc@4.7.3"),
("libelf %gcc@4.4.7", "libelf %gcc@4.4.7", "libelf %gcc@4.4.7"),
("libelf", "libelf %gcc@4.4.7", "libelf %gcc@4.4.7"),
# Architecture
("foo platform=test", "platform=test", "foo platform=test"),
("foo platform=linux", "platform=linux", "foo platform=linux"),
(
"foo platform=test",
"platform=test target=frontend",
"foo platform=test target=frontend",
),
(
"foo platform=test",
"platform=test os=frontend target=frontend",
"foo platform=test os=frontend target=frontend",
),
(
"foo platform=test os=frontend target=frontend",
"platform=test",
"foo platform=test os=frontend target=frontend",
),
("foo arch=test-None-None", "platform=test", "foo platform=test"),
(
"foo arch=test-None-frontend",
"platform=test target=frontend",
"foo platform=test target=frontend",
),
(
"foo arch=test-frontend-frontend",
"platform=test os=frontend target=frontend",
"foo platform=test os=frontend target=frontend",
),
(
"foo arch=test-frontend-frontend",
"platform=test",
"foo platform=test os=frontend target=frontend",
),
(
"foo platform=test target=backend os=backend",
"platform=test target=backend os=backend",
"foo platform=test target=backend os=backend",
),
(
"libelf target=default_target os=default_os",
"libelf target=default_target os=default_os",
"libelf target=default_target os=default_os",
),
# Dependencies
("mpileaks ^mpich", "^mpich", "mpileaks ^mpich"),
("mpileaks ^mpich@2.0", "^mpich@1:3", "mpileaks ^mpich@2.0"),
(
"mpileaks ^mpich@2.0 ^callpath@1.5",
"^mpich@1:3 ^callpath@1.4:1.6",
"mpileaks^mpich@2.0^callpath@1.5",
),
("mpileaks ^mpi", "^mpi", "mpileaks ^mpi"),
("mpileaks ^mpi", "^mpich", "mpileaks ^mpi ^mpich"),
("mpileaks^mpi@1.5", "^mpi@1.2:1.6", "mpileaks^mpi@1.5"),
("mpileaks^mpi@2:", "^mpich", "mpileaks^mpi@2: ^mpich"),
("mpileaks^mpi@2:", "^mpich@3.0.4", "mpileaks^mpi@2: ^mpich@3.0.4"),
# Variants
("mpich+foo", "mpich+foo", "mpich+foo"),
("mpich++foo", "mpich++foo", "mpich++foo"),
("mpich~foo", "mpich~foo", "mpich~foo"),
("mpich~~foo", "mpich~~foo", "mpich~~foo"),
("mpich foo=1", "mpich foo=1", "mpich foo=1"),
("mpich foo==1", "mpich foo==1", "mpich foo==1"),
("mpich+foo", "mpich foo=True", "mpich+foo"),
("mpich++foo", "mpich foo=True", "mpich+foo"),
("mpich foo=true", "mpich+foo", "mpich+foo"),
("mpich foo==true", "mpich++foo", "mpich++foo"),
("mpich~foo", "mpich foo=FALSE", "mpich~foo"),
("mpich~~foo", "mpich foo=FALSE", "mpich~foo"),
("mpich foo=False", "mpich~foo", "mpich~foo"),
("mpich foo==False", "mpich~foo", "mpich~foo"),
("mpich foo=*", "mpich~foo", "mpich~foo"),
("mpich+foo", "mpich foo=*", "mpich+foo"),
(
'multivalue-variant foo="bar,baz"',
"multivalue-variant foo=bar,baz",
"multivalue-variant foo=bar,baz",
),
(
'multivalue-variant foo="bar,baz"',
"multivalue-variant foo=*",
"multivalue-variant foo=bar,baz",
),
(
'multivalue-variant foo="bar,baz"',
"multivalue-variant foo=bar",
"multivalue-variant foo=bar,baz",
),
(
'multivalue-variant foo="bar,baz"',
"multivalue-variant foo=baz",
"multivalue-variant foo=bar,baz",
),
(
'multivalue-variant foo="bar,baz,barbaz"',
"multivalue-variant foo=bar,baz",
"multivalue-variant foo=bar,baz,barbaz",
),
(
'multivalue-variant foo="bar,baz"',
'foo="baz,bar"', # Order of values doesn't matter
"multivalue-variant foo=bar,baz",
),
("mpich+foo", "mpich", "mpich+foo"),
("mpich~foo", "mpich", "mpich~foo"),
("mpich foo=1", "mpich", "mpich foo=1"),
("mpich", "mpich++foo", "mpich++foo"),
("libelf+debug", "libelf+foo", "libelf+debug+foo"),
("libelf+debug", "libelf+debug+foo", "libelf+debug+foo"),
("libelf debug=2", "libelf foo=1", "libelf debug=2 foo=1"),
("libelf debug=2", "libelf debug=2 foo=1", "libelf debug=2 foo=1"),
("libelf+debug", "libelf~foo", "libelf+debug~foo"),
("libelf+debug", "libelf+debug~foo", "libelf+debug~foo"),
("libelf++debug", "libelf+debug+foo", "libelf+debug+foo"),
("libelf debug==2", "libelf foo=1", "libelf debug==2 foo=1"),
("libelf debug==2", "libelf debug=2 foo=1", "libelf debug=2 foo=1"),
("libelf++debug", "libelf++debug~foo", "libelf++debug~foo"),
("libelf foo=bar,baz", "libelf foo=*", "libelf foo=bar,baz"),
("libelf foo=*", "libelf foo=bar,baz", "libelf foo=bar,baz"),
(
'multivalue-variant foo="bar"',
'multivalue-variant foo="baz"',
'multivalue-variant foo="bar,baz"',
),
(
'multivalue-variant foo="bar,barbaz"',
'multivalue-variant foo="baz"',
'multivalue-variant foo="bar,baz,barbaz"',
),
# Namespace (special case, but like variants
("builtin.libelf", "namespace=builtin", "builtin.libelf"),
("libelf", "namespace=builtin", "builtin.libelf"),
# Flags
("mpich ", 'mpich cppflags="-O3"', 'mpich cppflags="-O3"'),
(
'mpich cppflags="-O3 -Wall"',
'mpich cppflags="-O3 -Wall"',
'mpich cppflags="-O3 -Wall"',
),
('mpich cppflags=="-O3"', 'mpich cppflags=="-O3"', 'mpich cppflags=="-O3"'),
(
'libelf cflags="-O3"',
'libelf cppflags="-Wall"',
'libelf cflags="-O3" cppflags="-Wall"',
),
(
'libelf cflags="-O3"',
'libelf cppflags=="-Wall"',
'libelf cflags="-O3" cppflags=="-Wall"',
),
(
'libelf cflags=="-O3"',
'libelf cppflags=="-Wall"',
'libelf cflags=="-O3" cppflags=="-Wall"',
),
(
'libelf cflags="-O3"',
'libelf cflags="-O3" cppflags="-Wall"',
'libelf cflags="-O3" cppflags="-Wall"',
),
(
"libelf patches=ba5e334fe247335f3a116decfb5284100791dc302b5571ff5e664d8f9a6806c2",
"libelf patches=ba5e3", # constrain by a patch sha256 prefix
# TODO: the result below is not ideal. Prefix satisfies() works for patches, but
# constrain() isn't similarly special-cased to do the same thing
(
"libelf patches=ba5e3,"
"ba5e334fe247335f3a116decfb5284100791dc302b5571ff5e664d8f9a6806c2"
),
),
# deptypes on direct deps
(
"mpileaks %[deptypes=build] mpich",
"mpileaks %[deptypes=link] mpich",
"mpileaks %[deptypes=build,link] mpich",
),
# conditional edges
(
"libelf",
"%[when='%c' virtuals=c]gcc ^[when='+mpi' virtuals=mpi]mpich",
"libelf %[when='%c' virtuals=c]gcc ^[when='+mpi' virtuals=mpi]mpich",
),
(
"libelf %[when='%c' virtuals=c]gcc",
"%[when='%c' virtuals=c]gcc@10.3.1",
"libelf%[when='%c' virtuals=c]gcc@10.3.1",
),
(
"libelf %[when='%c' virtuals=c]gcc",
"%[when='%c' virtuals=c]gcc@10.3.1 ^[when='+mpi'] mpich",
"libelf%[when='%c' virtuals=c]gcc@10.3.1 ^[when='+mpi']mpich",
),
(
"libelf %[when='%c' virtuals=c]gcc",
"%[when='%cxx' virtuals=cxx]gcc@10.3.1",
"libelf%[when='%c' virtuals=c]gcc %[when='%cxx' virtuals=cxx]gcc@10.3.1",
),
(
"libelf %[when='+c' virtuals=c]gcc",
"%[when='%c' virtuals=c]gcc@10.3.1",
"libelf %[when='+c' virtuals=c]gcc %[when='%c' virtuals=c]gcc@10.3.1",
),
],
)
def test_abstract_specs_can_constrain_each_other(self, lhs, rhs, expected):
"""Test that lhs and rhs intersect with each other, and that they can be constrained
with each other. Also check that the constrained result match the expected spec.
"""
lhs, rhs, expected = Spec(lhs), Spec(rhs), Spec(expected)
assert lhs.intersects(rhs)
assert rhs.intersects(lhs)
c1, c2 = lhs.copy(), rhs.copy()
c1.constrain(rhs)
c2.constrain(lhs)
assert c1 == c2
assert c1 == expected
@pytest.mark.parametrize(
"lhs,rhs,expected_lhs,expected_rhs,propagated_lhs,propagated_rhs",
[
(
'mpich cppflags="-O3"',
'mpich cppflags="-O2"',
'mpich cppflags="-O3 -O2"',
'mpich cppflags="-O2 -O3"',
[],
[],
),
(
'mpich cflags="-O3 -g"',
'mpich cflags=="-O3"',
'mpich cflags="-O3 -g"',
'mpich cflags="-O3 -g"',
[],
[],
),
(
'mpich cflags=="-O3 -g"',
'mpich cflags=="-O3"',
'mpich cflags=="-O3 -g"',
'mpich cflags=="-O3 -g"',
[("cflags", "-O3"), ("cflags", "-g")],
[("cflags", "-O3"), ("cflags", "-g")],
),
],
)
def test_constrain_compiler_flags(
self, lhs, rhs, expected_lhs, expected_rhs, propagated_lhs, propagated_rhs
):
"""Constraining is asymmetric for compiler flags."""
lhs, rhs, expected_lhs, expected_rhs = (
Spec(lhs),
Spec(rhs),
Spec(expected_lhs),
Spec(expected_rhs),
)
assert lhs.intersects(rhs)
assert rhs.intersects(lhs)
c1, c2 = lhs.copy(), rhs.copy()
c1.constrain(rhs)
c2.constrain(lhs)
assert c1 == expected_lhs
assert c2 == expected_rhs
for x in [c1, c2]:
assert x.satisfies(lhs)
assert x.satisfies(rhs)
def _propagated_flags(_spec):
result = set()
for flagtype in _spec.compiler_flags:
for flag in _spec.compiler_flags[flagtype]:
if flag.propagate:
result.add((flagtype, flag))
return result
assert set(propagated_lhs) <= _propagated_flags(c1)
assert set(propagated_rhs) <= _propagated_flags(c2)
def test_constrain_specs_by_hash(self, default_mock_concretization, database):
"""Test that Specs specified only by their hashes can constrain eachother."""
mpich_dag_hash = "/" + database.query_one("mpich").dag_hash()
spec = Spec(mpich_dag_hash[:7])
assert spec.constrain(Spec(mpich_dag_hash)) is False
assert spec.abstract_hash == mpich_dag_hash[1:]
def test_mismatched_constrain_spec_by_hash(self, default_mock_concretization, database):
"""Test that Specs specified only by their incompatible hashes fail appropriately."""
lhs = "/" + database.query_one("callpath ^mpich").dag_hash()
rhs = "/" + database.query_one("callpath ^mpich2").dag_hash()
with pytest.raises(spack.spec.InvalidHashError):
Spec(lhs).constrain(Spec(rhs))
with pytest.raises(spack.spec.InvalidHashError):
Spec(lhs[:7]).constrain(Spec(rhs))
@pytest.mark.parametrize(
"lhs,rhs", [("libelf", Spec()), ("libelf", "@0:1"), ("libelf", "@0:1 %gcc")]
)
def test_concrete_specs_which_satisfies_abstract(self, lhs, rhs, default_mock_concretization):
"""Test that constraining an abstract spec by a compatible concrete one makes the
abstract spec concrete, and equal to the one it was constrained with.
"""
lhs, rhs = default_mock_concretization(lhs), Spec(rhs)
assert lhs.intersects(rhs)
assert rhs.intersects(lhs)
assert lhs.satisfies(rhs)
assert not rhs.satisfies(lhs)
assert lhs.constrain(rhs) is False
assert rhs.constrain(lhs) is True
assert rhs.concrete
assert lhs.satisfies(rhs)
assert rhs.satisfies(lhs)
assert lhs == rhs
@pytest.mark.parametrize(
"lhs,rhs",
[
("foo platform=linux", "platform=test os=redhat6 target=x86"),
("foo os=redhat6", "platform=test os=debian6 target=x86_64"),
("foo target=x86_64", "platform=test os=redhat6 target=x86"),
("foo%gcc@4.3", "%gcc@4.4:4.6"),
("foo@4.0%gcc", "@1:3%gcc"),
("foo@4.0%gcc@4.5", "@1:3%gcc@4.4:4.6"),
("builtin.mock.mpich", "builtin.mpich"),
("mpileaks ^builtin.mock.mpich", "^builtin.mpich"),
("mpileaks^mpich@1.2", "^mpich@2.0"),
("mpileaks^mpich@4.0^callpath@1.5", "^mpich@1:3^callpath@1.4:1.6"),
("mpileaks^mpich@2.0^callpath@1.7", "^mpich@1:3^callpath@1.4:1.6"),
("mpileaks^mpich@4.0^callpath@1.7", "^mpich@1:3^callpath@1.4:1.6"),
("mpileaks^mpi@3", "^mpi@1.2:1.6"),
("mpileaks^mpi@3:", "^mpich2@1.4"),
("mpileaks^mpi@3:", "^mpich2"),
("mpileaks^mpi@3:", "^mpich@1.0"),
("mpich~foo", "mpich+foo"),
("mpich+foo", "mpich~foo"),
("mpich foo=True", "mpich foo=False"),
("mpich~~foo", "mpich++foo"),
("mpich++foo", "mpich~~foo"),
("mpich foo==True", "mpich foo==False"),
("libelf@0:2.0", "libelf@2.1:3"),
("libelf@0:2.5%gcc@4.8:4.9", "libelf@2.1:3%gcc@4.5:4.7"),
("libelf+debug", "libelf~debug"),
("libelf+debug~foo", "libelf+debug+foo"),
("libelf debug=True", "libelf debug=False"),
("namespace=builtin.mock", "namespace=builtin"),
],
)
def test_constraining_abstract_specs_with_empty_intersection(self, lhs, rhs):
"""Check that two abstract specs with an empty intersection cannot be constrained
with each other.
"""
lhs, rhs = Spec(lhs), Spec(rhs)
assert not lhs.intersects(rhs)
assert not rhs.intersects(lhs)
with pytest.raises(UnsatisfiableSpecError):
lhs.constrain(rhs)
with pytest.raises(UnsatisfiableSpecError):
rhs.constrain(lhs)
@pytest.mark.parametrize(
"lhs,rhs",
[
("mpich", "mpich +foo"),
("mpich", "mpich~foo"),
("mpich", "mpich foo=1"),
("multivalue-variant foo=bar", "multivalue-variant +foo"),
("multivalue-variant foo=bar", "multivalue-variant ~foo"),
("multivalue-variant fee=bar", "multivalue-variant fee=baz"),
],
)
def test_concrete_specs_which_do_not_satisfy_abstract(
self, lhs, rhs, default_mock_concretization
):
lhs, rhs = default_mock_concretization(lhs), Spec(rhs)
assert lhs.intersects(rhs) is False
assert rhs.intersects(lhs) is False
assert not lhs.satisfies(rhs)
assert not rhs.satisfies(lhs)
with pytest.raises(UnsatisfiableSpecError):
assert lhs.constrain(rhs)
with pytest.raises(UnsatisfiableSpecError):
assert rhs.constrain(lhs)
@pytest.mark.parametrize(
"lhs,rhs", [("mpich", "mpich++foo"), ("mpich", "mpich~~foo"), ("mpich", "mpich foo==1")]
)
def test_concrete_specs_which_satisfy_abstract(self, lhs, rhs, default_mock_concretization):
lhs, rhs = default_mock_concretization(lhs), Spec(rhs)
assert lhs.intersects(rhs)
assert rhs.intersects(lhs)
assert lhs.satisfies(rhs)
s1 = lhs.copy()
s1.constrain(rhs)
assert s1 == lhs and s1.satisfies(lhs)
s2 = rhs.copy()
s2.constrain(lhs)
assert s2 == lhs and s2.satisfies(lhs)
@pytest.mark.parametrize(
"lhs,rhs,expected,constrained",
[
# hdf5++mpi satisfies hdf5, and vice versa, because of the non-contradiction semantic
("hdf5++mpi", "hdf5", True, "hdf5++mpi"),
("hdf5", "hdf5++mpi", True, "hdf5++mpi"),
# Same holds true for arbitrary propagated variants
("hdf5++mpi", "hdf5++shared", True, "hdf5++mpi++shared"),
# Here hdf5+mpi satisfies hdf5++mpi but not vice versa
("hdf5++mpi", "hdf5+mpi", False, "hdf5+mpi"),
("hdf5+mpi", "hdf5++mpi", True, "hdf5+mpi"),
# Non contradiction is violated
("hdf5 ^foo~mpi", "hdf5++mpi", False, "hdf5++mpi ^foo~mpi"),
("hdf5++mpi", "hdf5 ^foo~mpi", False, "hdf5++mpi ^foo~mpi"),
],
)
def test_abstract_specs_with_propagation(self, lhs, rhs, expected, constrained):
"""Tests (and documents) behavior of variant propagation on abstract specs.
Propagated variants do not comply with subset semantic, making it difficult to give
precise definitions. Here we document the behavior that has been decided for the
practical cases we face.
"""
lhs, rhs, constrained = Spec(lhs), Spec(rhs), Spec(constrained)
assert lhs.satisfies(rhs) is expected
c = lhs.copy()
c.constrain(rhs)
assert c == constrained
c = rhs.copy()
c.constrain(lhs)
assert c == constrained
def test_basic_satisfies_conditional_dep(self, default_mock_concretization):
"""Tests basic semantic of satisfies with conditional dependencies, on a concrete spec"""
concrete = default_mock_concretization("mpileaks ^mpich")
# This branch exists, so the condition is met, and is satisfied
assert concrete.satisfies("^[virtuals=mpi] mpich")
assert concrete.satisfies("^[when='^notapackage' virtuals=mpi] mpich")
assert concrete.satisfies("^[when='^mpi' virtuals=mpi] mpich")
# This branch does not exist, but the condition is not met
assert not concrete.satisfies("^zmpi")
assert concrete.satisfies("^[when='^notapackage'] zmpi")
assert not concrete.satisfies("^[when='^mpi'] zmpi")
def test_satisfies_single_valued_variant(self):
"""Tests that the case reported in
https://github.com/spack/spack/pull/2386#issuecomment-282147639
is handled correctly.
"""
a = spack.concretize.concretize_one("pkg-a foobar=bar")
assert a.satisfies("foobar=bar")
assert a.satisfies("foobar=*")
# Assert that an autospec generated from a literal
# gives the right result for a single valued variant
assert "foobar=bar" in a
assert "foobar==bar" in a
assert "foobar=baz" not in a
assert "foobar=fee" not in a
# ... and for a multi valued variant
assert "foo=bar" in a
# Check that conditional dependencies are treated correctly
assert "^pkg-b" in a
def test_unsatisfied_single_valued_variant(self):
a = spack.concretize.concretize_one("pkg-a foobar=baz")
assert "^pkg-b" not in a
mv = spack.concretize.concretize_one("multivalue-variant")
assert "pkg-a@1.0" not in mv
def test_indirect_unsatisfied_single_valued_variant(self):
spec = spack.concretize.concretize_one("singlevalue-variant-dependent")
assert "pkg-a@1.0" not in spec
def test_satisfied_namespace(self):
spec = spack.concretize.concretize_one("zlib")
assert spec.satisfies("namespace=builtin_mock")
assert not spec.satisfies("namespace=builtin")
@pytest.mark.parametrize(
"spec_string",
[
"tcl namespace==foobar",
"tcl arch==foobar",
"tcl os==foobar",
"tcl patches==foobar",
"tcl dev_path==foobar",
],
)
def test_propagate_reserved_variant_names(self, spec_string):
with pytest.raises(spack.spec_parser.SpecParsingError, match="Propagation"):
Spec(spec_string)
def test_multivalued_variant_1(self, default_mock_concretization):
# Semantics for a multi-valued variant is different
# Depending on whether the spec is concrete or not
a = default_mock_concretization("multivalue-variant foo=bar")
b = Spec("multivalue-variant foo=bar,baz")
assert not a.satisfies(b)
def test_multivalued_variant_2(self):
a = Spec("multivalue-variant foo=bar")
b = Spec("multivalue-variant foo=bar,baz")
# The specs are abstract and they **could** be constrained
assert b.satisfies(a) and not a.satisfies(b)
# An abstract spec can instead be constrained
assert a.constrain(b)
def test_multivalued_variant_3(self, default_mock_concretization):
a = default_mock_concretization("multivalue-variant foo=bar,baz")
b = Spec("multivalue-variant foo=bar,baz,quux")
assert not a.satisfies(b)
def test_multivalued_variant_4(self):
a = Spec("multivalue-variant foo=bar,baz")
b = Spec("multivalue-variant foo=bar,baz,quux")
# The specs are abstract and they **could** be constrained
assert a.intersects(b)
# An abstract spec can instead be constrained
assert a.constrain(b)
# ...but will fail during concretization if there are
# values in the variant that are not allowed
with pytest.raises(InvalidVariantValueError):
spack.concretize.concretize_one(a)
def test_multivalued_variant_5(self):
# This time we'll try to set a single-valued variant
a = Spec("multivalue-variant fee=bar")
b = Spec("multivalue-variant fee=baz")
# The specs are abstract and they **could** be constrained,
# as before concretization I don't know which type of variant
# I have (if it is not a BV)
assert a.intersects(b)
# A variant cannot be parsed as single-valued until we try to
# concretize. This means that we can constrain the variant above
assert a.constrain(b)
# ...but will fail during concretization if there are
# multiple values set
with pytest.raises(MultipleValuesInExclusiveVariantError):
spack.concretize.concretize_one(a)
def test_copy_satisfies_transitive(self):
spec = spack.concretize.concretize_one("dttop")
copy = spec.copy()
for s, t in zip(spec.traverse(), copy.traverse()):
assert s.satisfies(t)
assert t.satisfies(s)
def test_intersects_virtual(self):
assert Spec("mpich").intersects(Spec("mpi"))
assert Spec("mpich2").intersects(Spec("mpi"))
assert Spec("zmpi").intersects(Spec("mpi"))
def test_intersects_virtual_providers(self):
"""Tests that we can always intersect virtual providers from abstract specs.
Concretization will give meaning to virtuals, and eventually forbid certain
configurations.
"""
assert Spec("netlib-lapack ^openblas").intersects("netlib-lapack ^openblas")
assert Spec("netlib-lapack ^netlib-blas").intersects("netlib-lapack ^openblas")
assert Spec("netlib-lapack ^openblas").intersects("netlib-lapack ^netlib-blas")
assert Spec("netlib-lapack ^netlib-blas").intersects("netlib-lapack ^netlib-blas")
def test_intersectable_concrete_specs_must_have_the_same_hash(self):
"""Ensure that concrete specs are matched *exactly* by hash."""
s1 = spack.concretize.concretize_one("mpileaks")
s2 = s1.copy()
assert s1.satisfies(s2)
assert s2.satisfies(s1)
assert s1.intersects(s2)
# Simulate specs that were installed before and after a change to
# Spack's hashing algorithm. This just reverses s2's hash.
s2._hash = s1.dag_hash()[-1::-1]
assert not s1.satisfies(s2)
assert not s2.satisfies(s1)
assert not s1.intersects(s2)
# ========================================================================
# Indexing specs
# ========================================================================
def test_self_index(self):
s = Spec("callpath")
assert s["callpath"] == s
def test_dep_index(self, default_mock_concretization):
"""Tests __getitem__ and __contains__ for specs."""
s = default_mock_concretization("callpath")
assert s["callpath"] == s
# Real dependencies
for key in ("dyninst", "libdwarf", "libelf"):
assert isinstance(s[key], Spec)
assert s[key].name == key
assert key in s
# Virtual dependencies
assert s["mpi"].name == "mpich"
assert "mpi" in s
@pytest.mark.usefixtures("config")
def test_virtual_index(self):
s = spack.concretize.concretize_one("callpath")
s_mpich = spack.concretize.concretize_one("callpath ^mpich")
s_mpich2 = spack.concretize.concretize_one("callpath ^mpich2")
s_zmpi = spack.concretize.concretize_one("callpath ^zmpi")
assert s["mpi"].name != "mpi"
assert s_mpich["mpi"].name == "mpich"
assert s_mpich2["mpi"].name == "mpich2"
assert s_zmpi["zmpi"].name == "zmpi"
for spec in [s, s_mpich, s_mpich2, s_zmpi]:
assert "mpi" in spec
@pytest.mark.parametrize(
"lhs,rhs",
[
("libelf", "@1.0"),
("libelf", "@1.0:5.0"),
("libelf", "%gcc"),
("libelf%gcc", "%gcc@4.5"),
("libelf", "+debug"),
("libelf", "debug=*"),
("libelf", "~debug"),
("libelf", "debug=2"),
("libelf", 'cppflags="-O3"'),
("libelf", 'cppflags=="-O3"'),
("libelf^foo", "libelf^foo@1.0"),
("libelf^foo", "libelf^foo@1.0:5.0"),
("libelf^foo", "libelf^foo%gcc"),
("libelf^foo%gcc", "libelf^foo%gcc@4.5"),
("libelf^foo", "libelf^foo+debug"),
("libelf^foo", "libelf^foo~debug"),
("libelf", "^foo"),
("mpileaks ^callpath %gcc@14", "mpileaks ^callpath %gcc@14.1"),
("mpileaks %[deptypes=build] mpich", "mpileaks %[deptypes=link] mpich"),
("mpileaks %mpich", "mpileaks %[deptypes=link] mpich"),
],
)
def test_lhs_is_changed_when_constraining(self, lhs, rhs):
lhs, rhs = Spec(lhs), Spec(rhs)
assert lhs.intersects(rhs)
assert rhs.intersects(lhs)
assert not lhs.satisfies(rhs)
assert lhs.constrain(rhs) is True
assert lhs.satisfies(rhs)
@pytest.mark.parametrize(
"lhs,rhs",
[
("libelf", "libelf"),
("libelf@1.0", "@1.0"),
("libelf@1.0:5.0", "@1.0:5.0"),
("libelf%gcc", "%gcc"),
("libelf%gcc@4.5", "%gcc@4.5"),
("libelf+debug", "+debug"),
("libelf~debug", "~debug"),
("libelf debug=2", "debug=2"),
("libelf debug=2", "debug=*"),
('libelf cppflags="-O3"', 'cppflags="-O3"'),
('libelf cppflags=="-O3"', 'cppflags=="-O3"'),
("libelf^foo@1.0", "libelf^foo@1.0"),
("libelf^foo@1.0:5.0", "libelf^foo@1.0:5.0"),
("libelf^foo%gcc", "libelf^foo%gcc"),
("libelf^foo%gcc@4.5", "libelf^foo%gcc@4.5"),
("libelf^foo+debug", "libelf^foo+debug"),
("libelf^foo~debug", "libelf^foo~debug"),
('libelf^foo cppflags="-O3"', 'libelf^foo cppflags="-O3"'),
("mpileaks ^callpath %gcc@14.1", "mpileaks ^callpath %gcc@14"),
("mpileaks %[deptypes=build] gcc@14.1", "mpileaks %gcc@14"),
],
)
def test_lhs_is_not_changed_when_constraining(self, lhs, rhs):
lhs, rhs = Spec(lhs), Spec(rhs)
assert lhs.intersects(rhs)
assert rhs.intersects(lhs)
assert lhs.satisfies(rhs)
assert lhs.constrain(rhs) is False
def test_exceptional_paths_for_constructor(self):
with pytest.raises(TypeError):
Spec((1, 2))
with pytest.raises(ValueError):
Spec("libelf foo")
def test_spec_formatting(self, default_mock_concretization):
spec = default_mock_concretization("multivalue-variant cflags=-O2")
# Testing named strings ie {string} and whether we get
# the correct component
# Mixed case intentional to test both
# Fields are as follow
# fmt_str: the format string to test
# sigil: the portion that is a sigil (may be empty string)
# prop: the property to get
# component: subcomponent of spec from which to get property
package_segments = [
("{NAME}", "", "name", lambda spec: spec),
("{VERSION}", "", "version", lambda spec: spec),
("{compiler}", "", "compiler", lambda spec: spec),
("{compiler_flags}", "", "compiler_flags", lambda spec: spec),
("{variants}", "", "variants", lambda spec: spec),
("{architecture}", "", "architecture", lambda spec: spec),
("{@VERSIONS}", "@", "versions", lambda spec: spec),
("{%compiler}", "%", "compiler", lambda spec: spec),
("{arch=architecture}", "arch=", "architecture", lambda spec: spec),
("{namespace=namespace}", "namespace=", "namespace", lambda spec: spec),
("{compiler.name}", "", "name", lambda spec: spec.compiler),
("{compiler.version}", "", "version", lambda spec: spec.compiler),
(
"{compiler.version.up_to_1}",
"",
"up_to_1",
lambda spec: spec.compiler.version.up_to(1),
),
("{%compiler.name}", "%", "name", lambda spec: spec.compiler),
("{@compiler.version}", "@", "version", lambda spec: spec.compiler),
("{architecture.platform}", "", "platform", lambda spec: spec.architecture),
("{architecture.os}", "", "os", lambda spec: spec.architecture),
("{architecture.target}", "", "target", lambda spec: spec.architecture),
("{prefix}", "", "prefix", lambda spec: spec),
("{external}", "", "external", lambda spec: spec), # test we print "False"
]
hash_segments = [
("{hash:7}", "", lambda s: s.dag_hash(7)),
("{/hash}", "/", lambda s: "/" + s.dag_hash()),
]
variants_segments = [
("{variants.debug}", spec, "debug"),
("{variants.foo}", spec, "foo"),
("{^pkg-a.variants.bvv}", spec["pkg-a"], "bvv"),
("{^pkg-a.variants.foo}", spec["pkg-a"], "foo"),
]
other_segments = [
("{spack_root}", spack.paths.spack_root),
("{spack_install}", spack.store.STORE.layout.root),
]
def depify(depname, fmt_str, sigil):
sig = len(sigil)
opening = fmt_str[: 1 + sig]
closing = fmt_str[1 + sig :]
return spec[depname], opening + f"^{depname}." + closing
def check_prop(check_spec, fmt_str, prop, getter):
actual = spec.format(fmt_str)
expected = getter(check_spec)
assert actual == str(expected).strip()
for named_str, sigil, prop, get_component in package_segments:
getter = lambda s: sigil + str(getattr(get_component(s), prop, ""))
check_prop(spec, named_str, prop, getter)
mpi, fmt_str = depify("mpi", named_str, sigil)
check_prop(mpi, fmt_str, prop, getter)
for named_str, sigil, getter in hash_segments:
assert spec.format(named_str) == getter(spec)
callpath, fmt_str = depify("callpath", named_str, sigil)
assert spec.format(fmt_str) == getter(callpath)
for named_str, test_spec, variant_name in variants_segments:
assert test_spec.format(named_str) == str(test_spec.variants[variant_name])
assert test_spec.format(named_str[:-1] + ".value}") == str(
test_spec.variants[variant_name].value
)
for named_str, expected in other_segments:
actual = spec.format(named_str)
assert expected == actual
@pytest.mark.parametrize(
"fmt_str",
[
"{name}",
"{version}",
"{@version}",
"{namespace}",
"{ namespace=namespace}",
"{ namespace =namespace}",
"{ name space =namespace}",
"{arch}",
"{architecture}",
"{arch=architecture}",
"{ arch=architecture}",
"{ arch =architecture}",
],
)
def test_spec_format_null_attributes(self, fmt_str):
"""Ensure that attributes format to empty strings when their values are null."""
spec = spack.spec.Spec()
assert spec.format(fmt_str) == ""
def test_spec_formatting_spaces_in_key(self, default_mock_concretization):
spec = default_mock_concretization("multivalue-variant cflags=-O2")
# test that spaces are preserved, if they come after some other text, otherwise
# they are trimmed.
# TODO: should we be trimming whitespace from formats? Probably not.
assert spec.format("x{ arch=architecture}") == f"x arch={spec.architecture}"
assert spec.format("x{ namespace=namespace}") == f"x namespace={spec.namespace}"
assert spec.format("x{ name space =namespace}") == f"x name space ={spec.namespace}"
assert spec.format("x{ os =os}") == f"x os ={spec.os}"
@pytest.mark.parametrize(
"fmt_str", ["{@name}", "{@version.concrete}", "{%compiler.version}", "{/hashd}"]
)
def test_spec_formatting_sigil_mismatches(self, default_mock_concretization, fmt_str):
spec = default_mock_concretization("multivalue-variant cflags=-O2")
with pytest.raises(SpecFormatSigilError):
spec.format(fmt_str)
@pytest.mark.parametrize(
"fmt_str",
[
r"{}",
r"name}",
r"\{name}",
r"{name",
r"{name\}",
r"{_concrete}",
r"{dag_hash}",
r"{foo}",
r"{+variants.debug}",
r"{variants.this_variant_does_not_exist}",
],
)
def test_spec_formatting_bad_formats(self, default_mock_concretization, fmt_str):
spec = default_mock_concretization("multivalue-variant cflags=-O2")
with pytest.raises(SpecFormatStringError):
spec.format(fmt_str)
def test_wildcard_is_invalid_variant_value(self):
"""The spec string x=* is parsed as a multi-valued variant with values the empty set.
That excludes * as a literal variant value."""
with pytest.raises(spack.spec_parser.SpecParsingError, match="cannot use reserved value"):
Spec("multivalue-variant foo=*,bar")
def test_errors_in_variant_directive(self):
variant = spack.directives.variant.__wrapped__
class Pkg:
name = "PKG"
# We can't use names that are reserved by Spack
fn = variant("patches")
with pytest.raises(spack.directives.DirectiveError) as exc_info:
fn(Pkg())
assert "The name 'patches' is reserved" in str(exc_info.value)
# We can't have conflicting definitions for arguments
fn = variant("foo", values=spack.variant.any_combination_of("fee", "foom"), default="bar")
with pytest.raises(spack.directives.DirectiveError) as exc_info:
fn(Pkg())
assert " it is handled by an attribute of the 'values' " "argument" in str(exc_info.value)
# We can't leave None as a default value
fn = variant("foo", default=None)
with pytest.raises(spack.directives.DirectiveError) as exc_info:
fn(Pkg())
assert "either a default was not explicitly set, or 'None' was used" in str(exc_info.value)
# We can't use an empty string as a default value
fn = variant("foo", default="")
with pytest.raises(spack.directives.DirectiveError) as exc_info:
fn(Pkg())
assert "the default cannot be an empty string" in str(exc_info.value)
def test_abstract_spec_prefix_error(self):
spec = Spec("libelf")
with pytest.raises(SpecError):
spec.prefix
def test_forwarding_of_architecture_attributes(self):
spec = spack.concretize.concretize_one("libelf target=x86_64")
# Check that we can still access each member through
# the architecture attribute
assert "test" in spec.architecture
assert "debian" in spec.architecture
assert "x86_64" in spec.architecture
# Check that we forward the platform and os attribute correctly
assert spec.platform == "test"
assert spec.os == "debian6"
# Check that the target is also forwarded correctly and supports
# all the operators we expect
assert spec.target == "x86_64"
assert spec.target.family == "x86_64"
assert "avx512" not in spec.target
assert spec.target < "broadwell"
@pytest.mark.parametrize("transitive", [True, False])
def test_splice(self, transitive, default_mock_concretization):
# Tests the new splice function in Spec using a somewhat simple case
# with a variant with a conditional dependency.
spec = default_mock_concretization("splice-t")
dep = default_mock_concretization("splice-h+foo")
# Sanity checking that these are not the same thing.
assert dep.dag_hash() != spec["splice-h"].dag_hash()
# Do the splice.
out = spec.splice(dep, transitive)
# Returned spec should still be concrete.
assert out.concrete
# Traverse the spec and assert that all dependencies are accounted for.
for node in spec.traverse():
assert node.name in out
# If the splice worked, then the dag hash of the spliced dep should
# now match the dag hash of the build spec of the dependency from the
# returned spec.
out_h_build = out["splice-h"].build_spec
assert out_h_build.dag_hash() == dep.dag_hash()
# Transitivity should determine whether the transitive dependency was
# changed.
expected_z = dep["splice-z"] if transitive else spec["splice-z"]
assert out["splice-z"].dag_hash() == expected_z.dag_hash()
# Sanity check build spec of out should be the original spec.
assert out["splice-t"].build_spec.dag_hash() == spec["splice-t"].dag_hash()
# Finally, the spec should know it's been spliced:
assert out.spliced
def test_splice_intransitive_complex(self, setup_complex_splice):
a_red, c_blue = setup_complex_splice
spliced = a_red.splice(c_blue, transitive=False)
assert spliced.satisfies(
"pkg-a color=red ^pkg-b color=red ^pkg-c color=blue "
"^pkg-d color=red ^pkg-e color=red ^pkg-f color=blue ^pkg-g@2 color=red"
)
assert set(spliced.dependencies(deptype=dt.BUILD)) == set()
assert spliced.build_spec == a_red
# We cannot check spliced["b"].build_spec is spliced["b"] because Spec.__getitem__ creates
# a new wrapper object on each invocation. So we select once and check on that object
# For the rest of the unchanged specs we will just check the s._build_spec is None.
b = spliced["pkg-b"]
assert b == a_red["pkg-b"]
assert b.build_spec is b
assert set(b.dependents()) == {spliced}
assert spliced["pkg-c"].satisfies(
"pkg-c color=blue ^pkg-d color=red ^pkg-e color=red "
"^pkg-f color=blue ^pkg-g@2 color=red"
)
assert set(spliced["pkg-c"].dependencies(deptype=dt.BUILD)) == set()
assert spliced["pkg-c"].build_spec == c_blue
assert set(spliced["pkg-c"].dependents()) == {spliced}
assert spliced["pkg-d"] == a_red["pkg-d"]
assert spliced["pkg-d"]._build_spec is None
# Since D had a parent changed, it has a split edge for link vs build dependent
# note: spliced["b"] == b_red, referenced differently to preserve logic
assert set(spliced["pkg-d"].dependents()) == {
spliced["pkg-b"],
spliced["pkg-c"],
a_red["pkg-c"],
}
assert set(spliced["pkg-d"].dependents(deptype=dt.BUILD)) == {
a_red["pkg-b"],
a_red["pkg-c"],
}
assert spliced["pkg-e"] == a_red["pkg-e"]
assert spliced["pkg-e"]._build_spec is None
# Because a copy of e is used, it does not have dependnets in the original specs
assert set(spliced["pkg-e"].dependents()) == {spliced["pkg-b"], spliced["pkg-f"]}
# Build dependent edge to f because f originally dependended on the e this was copied from
assert set(spliced["pkg-e"].dependents(deptype=dt.BUILD)) == {spliced["pkg-b"]}
assert spliced["pkg-f"].satisfies("pkg-f color=blue ^pkg-e color=red ^pkg-g@2 color=red")
assert set(spliced["pkg-f"].dependencies(deptype=dt.BUILD)) == set()
assert spliced["pkg-f"].build_spec == c_blue["pkg-f"]
assert set(spliced["pkg-f"].dependents()) == {spliced["pkg-c"]}
# spliced["pkg-g"] is g2, but spliced["pkg-b"]["pkg-g"] is g1
assert spliced["pkg-g"] == a_red["pkg-g"]
assert spliced["pkg-g"]._build_spec is None
assert set(spliced["pkg-g"].dependents(deptype=dt.LINK)) == {
spliced,
spliced["pkg-c"],
spliced["pkg-f"],
a_red["pkg-c"],
}
assert spliced["pkg-b"]["pkg-g"] == a_red["pkg-b"]["pkg-g"]
assert spliced["pkg-b"]["pkg-g"]._build_spec is None
assert set(spliced["pkg-b"]["pkg-g"].dependents()) == {
spliced["pkg-b"],
spliced["pkg-d"],
spliced["pkg-e"],
}
for edge in spliced.traverse_edges(cover="edges", deptype=dt.LINK | dt.RUN):
# traverse_edges creates a synthetic edge with no deptypes to the root
if edge.depflag:
depflag = dt.LINK
if not edge.parent.spliced:
depflag |= dt.BUILD
assert edge.depflag == depflag
def test_splice_transitive_complex(self, setup_complex_splice):
a_red, c_blue = setup_complex_splice
spliced = a_red.splice(c_blue, transitive=True)
assert spliced.satisfies(
"pkg-a color=red ^pkg-b color=red ^pkg-c color=blue ^pkg-d color=blue "
"^pkg-e color=blue ^pkg-f color=blue ^pkg-g@3 color=blue"
)
assert set(spliced.dependencies(deptype=dt.BUILD)) == set()
assert spliced.build_spec == a_red
assert spliced["pkg-b"].satisfies(
"pkg-b color=red ^pkg-d color=blue ^pkg-e color=blue ^pkg-g@2 color=blue"
)
assert set(spliced["pkg-b"].dependencies(deptype=dt.BUILD)) == set()
assert spliced["pkg-b"].build_spec == a_red["pkg-b"]
assert set(spliced["pkg-b"].dependents()) == {spliced}
# We cannot check spliced["c"].build_spec is spliced["c"] because Spec.__getitem__ creates
# a new wrapper object on each invocation. So we select once and check on that object
# For the rest of the unchanged specs we will just check the s._build_spec is None.
c = spliced["pkg-c"]
assert c == c_blue
assert c.build_spec is c
assert set(c.dependents()) == {spliced}
assert spliced["pkg-d"] == c_blue["pkg-d"]
assert spliced["pkg-d"]._build_spec is None
assert set(spliced["pkg-d"].dependents()) == {spliced["pkg-b"], spliced["pkg-c"]}
assert spliced["pkg-e"] == c_blue["pkg-e"]
assert spliced["pkg-e"]._build_spec is None
assert set(spliced["pkg-e"].dependents()) == {spliced["pkg-b"], spliced["pkg-f"]}
assert spliced["pkg-f"] == c_blue["pkg-f"]
assert spliced["pkg-f"]._build_spec is None
assert set(spliced["pkg-f"].dependents()) == {spliced["pkg-c"]}
# spliced["g"] is g3, but spliced["d"]["g"] is g1
assert spliced["pkg-g"] == c_blue["pkg-g"]
assert spliced["pkg-g"]._build_spec is None
assert set(spliced["pkg-g"].dependents(deptype=dt.LINK)) == {
spliced,
spliced["pkg-b"],
spliced["pkg-c"],
spliced["pkg-e"],
spliced["pkg-f"],
}
# Because a copy of g3 is used, it does not have dependents in the original specs
# It has build dependents on these spliced specs because it is an unchanged dependency
# for them
assert set(spliced["pkg-g"].dependents(deptype=dt.BUILD)) == {
spliced["pkg-c"],
spliced["pkg-e"],
spliced["pkg-f"],
}
assert spliced["pkg-d"]["pkg-g"] == c_blue["pkg-d"]["pkg-g"]
assert spliced["pkg-d"]["pkg-g"]._build_spec is None
assert set(spliced["pkg-d"]["pkg-g"].dependents()) == {spliced["pkg-d"]}
for edge in spliced.traverse_edges(cover="edges", deptype=dt.LINK | dt.RUN):
# traverse_edges creates a synthetic edge with no deptypes to the root
if edge.depflag:
depflag = dt.LINK
if not edge.parent.spliced:
depflag |= dt.BUILD
assert edge.depflag == depflag
@pytest.mark.parametrize("transitive", [True, False])
def test_splice_with_cached_hashes(self, default_mock_concretization, transitive):
spec = default_mock_concretization("splice-t")
dep = default_mock_concretization("splice-h+foo")
# monkeypatch hashes so we can test that they are cached
spec._hash = "aaaaaa"
dep._hash = "bbbbbb"
spec["splice-h"]._hash = "cccccc"
spec["splice-z"]._hash = "dddddd"
dep["splice-z"]._hash = "eeeeee"
out = spec.splice(dep, transitive=transitive)
out_z_expected = (dep if transitive else spec)["splice-z"]
assert out.dag_hash() != spec.dag_hash()
assert (out["splice-h"].dag_hash() == dep.dag_hash()) == transitive
assert out["splice-z"].dag_hash() == out_z_expected.dag_hash()
@pytest.mark.parametrize("transitive", [True, False])
def test_splice_input_unchanged(self, default_mock_concretization, transitive):
spec = default_mock_concretization("splice-t")
dep = default_mock_concretization("splice-h+foo")
orig_spec_hash = spec.dag_hash()
orig_dep_hash = dep.dag_hash()
spec.splice(dep, transitive)
# Post-splice, dag hash should still be different; no changes should be
# made to these specs.
assert spec.dag_hash() == orig_spec_hash
assert dep.dag_hash() == orig_dep_hash
@pytest.mark.parametrize("transitive", [True, False])
def test_splice_subsequent(self, default_mock_concretization, transitive):
spec = default_mock_concretization("splice-t")
dep = default_mock_concretization("splice-h+foo")
out = spec.splice(dep, transitive)
# Now we attempt a second splice.
dep = default_mock_concretization("splice-z+bar")
# Transitivity shouldn't matter since Splice Z has no dependencies.
out2 = out.splice(dep, transitive)
assert out2.concrete
assert out2["splice-z"].dag_hash() != spec["splice-z"].dag_hash()
assert out2["splice-z"].dag_hash() != out["splice-z"].dag_hash()
assert out2["splice-t"].build_spec.dag_hash() == spec["splice-t"].dag_hash()
assert out2.spliced
@pytest.mark.parametrize("transitive", [True, False])
def test_splice_dict(self, default_mock_concretization, transitive):
spec = default_mock_concretization("splice-t")
dep = default_mock_concretization("splice-h+foo")
out = spec.splice(dep, transitive)
# Sanity check all hashes are unique...
assert spec.dag_hash() != dep.dag_hash()
assert out.dag_hash() != dep.dag_hash()
assert out.dag_hash() != spec.dag_hash()
node_list = out.to_dict()["spec"]["nodes"]
root_nodes = [n for n in node_list if n["hash"] == out.dag_hash()]
build_spec_nodes = [n for n in node_list if n["hash"] == spec.dag_hash()]
assert spec.dag_hash() == out.build_spec.dag_hash()
assert len(root_nodes) == 1
assert len(build_spec_nodes) == 1
@pytest.mark.parametrize("transitive", [True, False])
def test_splice_dict_roundtrip(self, default_mock_concretization, transitive):
spec = default_mock_concretization("splice-t")
dep = default_mock_concretization("splice-h+foo")
out = spec.splice(dep, transitive)
# Sanity check all hashes are unique...
assert spec.dag_hash() != dep.dag_hash()
assert out.dag_hash() != dep.dag_hash()
assert out.dag_hash() != spec.dag_hash()
out_rt_spec = Spec.from_dict(out.to_dict()) # rt is "round trip"
assert out_rt_spec.dag_hash() == out.dag_hash()
out_rt_spec_bld_hash = out_rt_spec.build_spec.dag_hash()
out_rt_spec_h_bld_hash = out_rt_spec["splice-h"].build_spec.dag_hash()
out_rt_spec_z_bld_hash = out_rt_spec["splice-z"].build_spec.dag_hash()
# In any case, the build spec for splice-t (root) should point to the
# original spec, preserving build provenance.
assert spec.dag_hash() == out_rt_spec_bld_hash
assert out_rt_spec.dag_hash() != out_rt_spec_bld_hash
# The build spec for splice-h should always point to the introduced
# spec, since that is the spec spliced in.
assert dep["splice-h"].dag_hash() == out_rt_spec_h_bld_hash
# The build spec for splice-z will depend on whether or not the splice
# was transitive.
expected_z_bld_hash = (
dep["splice-z"].dag_hash() if transitive else spec["splice-z"].dag_hash()
)
assert expected_z_bld_hash == out_rt_spec_z_bld_hash
@pytest.mark.parametrize(
"spec,constraint,expected_result",
[
("libelf target=haswell", "target=broadwell", False),
("libelf target=haswell", "target=haswell", True),
("libelf target=haswell", "target=x86_64:", True),
("libelf target=haswell", "target=:haswell", True),
("libelf target=haswell", "target=icelake,:nocona", False),
("libelf target=haswell", "target=haswell,:nocona", True),
# Check that a single target is not treated as the start
# or the end of an open range
("libelf target=haswell", "target=x86_64", False),
("libelf target=x86_64", "target=haswell", False),
],
)
@pytest.mark.regression("13111")
def test_target_constraints(self, spec, constraint, expected_result):
s = Spec(spec)
assert s.intersects(constraint) is expected_result
@pytest.mark.regression("13124")
def test_error_message_unknown_variant(self):
s = Spec("mpileaks +unknown")
with pytest.raises(UnknownVariantError):
spack.concretize.concretize_one(s)
@pytest.mark.regression("18527")
def test_satisfies_dependencies_ordered(self):
d = Spec("zmpi ^fake")
s = Spec("mpileaks")
s._add_dependency(d, depflag=0, virtuals=())
assert s.satisfies("mpileaks ^zmpi ^fake")
@pytest.mark.parametrize("transitive", [True, False])
def test_splice_swap_names(self, default_mock_concretization, transitive):
spec = default_mock_concretization("splice-vt")
dep = default_mock_concretization("splice-a+foo")
out = spec.splice(dep, transitive)
assert dep.name in out
assert transitive == ("+foo" in out["splice-z"])
@pytest.mark.parametrize("transitive", [True, False])
def test_splice_swap_names_mismatch_virtuals(self, default_mock_concretization, transitive):
vt = default_mock_concretization("splice-vt")
vh = default_mock_concretization("splice-vh+foo")
with pytest.raises(spack.spec.SpliceError, match="virtual"):
vt.splice(vh, transitive)
def test_adaptor_optflags(self):
"""Tests that we can obtain the list of optflags, and debugflags,
from the compiler adaptor, and that this list is taken from the
appropriate compiler package.
"""
# pkg-a depends on c, so only the gcc compiler should be chosen
spec = spack.concretize.concretize_one(Spec("pkg-a %gcc"))
assert "-Otestopt" in spec.package.compiler.opt_flags
# This is not set, make sure we get an empty list
for x in spec.package.compiler.debug_flags:
pass
def test_spec_override(self):
init_spec = Spec("pkg-a foo=baz foobar=baz cflags=-O3 cxxflags=-O1")
change_spec = Spec("pkg-a foo=fee cflags=-O2")
new_spec = spack.concretize.concretize_one(Spec.override(init_spec, change_spec))
assert "foo=fee" in new_spec
# This check fails without concretizing: apparently if both specs are
# abstract, then the spec will always be considered to satisfy
# 'variant=value' (regardless of whether it in fact does).
assert "foo=baz" not in new_spec
assert "foobar=baz" in new_spec
assert new_spec.compiler_flags["cflags"] == ["-O2"]
assert new_spec.compiler_flags["cxxflags"] == ["-O1"]
def test_spec_override_with_nonexisting_variant(self):
init_spec = Spec("pkg-a foo=baz foobar=baz cflags=-O3 cxxflags=-O1")
change_spec = Spec("pkg-a baz=fee")
with pytest.raises(ValueError):
Spec.override(init_spec, change_spec)
def test_spec_override_with_variant_not_in_init_spec(self):
init_spec = Spec("pkg-a foo=baz foobar=baz cflags=-O3 cxxflags=-O1")
change_spec = Spec("pkg-a +bvv ~lorem_ipsum")
new_spec = spack.concretize.concretize_one(Spec.override(init_spec, change_spec))
assert "+bvv" in new_spec
assert "~lorem_ipsum" in new_spec
@pytest.mark.parametrize(
"spec_str,specs_in_dag",
[
("hdf5 ^[virtuals=mpi] mpich", [("mpich", "mpich"), ("mpi", "mpich")]),
# Try different combinations with packages that provides a
# disjoint set of virtual dependencies
(
"netlib-scalapack ^mpich ^openblas-with-lapack",
[
("mpi", "mpich"),
("lapack", "openblas-with-lapack"),
("blas", "openblas-with-lapack"),
],
),
(
"netlib-scalapack ^[virtuals=mpi] mpich ^openblas-with-lapack",
[
("mpi", "mpich"),
("lapack", "openblas-with-lapack"),
("blas", "openblas-with-lapack"),
],
),
(
"netlib-scalapack ^mpich ^[virtuals=lapack] openblas-with-lapack",
[
("mpi", "mpich"),
("lapack", "openblas-with-lapack"),
("blas", "openblas-with-lapack"),
],
),
(
"netlib-scalapack ^[virtuals=mpi] mpich ^[virtuals=lapack] openblas-with-lapack",
[
("mpi", "mpich"),
("lapack", "openblas-with-lapack"),
("blas", "openblas-with-lapack"),
],
),
# Test that we can mix dependencies that provide an overlapping
# sets of virtual dependencies
(
"netlib-scalapack ^[virtuals=mpi] intel-parallel-studio "
"^[virtuals=lapack] openblas-with-lapack",
[
("mpi", "intel-parallel-studio"),
("lapack", "openblas-with-lapack"),
("blas", "openblas-with-lapack"),
],
),
(
"netlib-scalapack ^[virtuals=mpi] intel-parallel-studio ^openblas-with-lapack",
[
("mpi", "intel-parallel-studio"),
("lapack", "openblas-with-lapack"),
("blas", "openblas-with-lapack"),
],
),
(
"netlib-scalapack ^intel-parallel-studio ^[virtuals=lapack] openblas-with-lapack",
[
("mpi", "intel-parallel-studio"),
("lapack", "openblas-with-lapack"),
("blas", "openblas-with-lapack"),
],
),
# Test that we can bind more than one virtual to the same provider
(
"netlib-scalapack ^[virtuals=lapack,blas] openblas-with-lapack",
[("lapack", "openblas-with-lapack"), ("blas", "openblas-with-lapack")],
),
],
)
def test_virtual_deps_bindings(self, default_mock_concretization, spec_str, specs_in_dag):
s = default_mock_concretization(spec_str)
for label, expected in specs_in_dag:
assert label in s
assert s[label].satisfies(expected), label
@pytest.mark.parametrize(
"spec_str",
[
# openblas-with-lapack needs to provide blas and lapack together
"netlib-scalapack ^[virtuals=blas] intel-parallel-studio ^openblas-with-lapack",
# intel-* provides blas and lapack together, openblas can provide blas only
"netlib-scalapack ^[virtuals=lapack] intel-parallel-studio ^openblas",
],
)
def test_unsatisfiable_virtual_deps_bindings(self, spec_str):
with pytest.raises(spack.solver.asp.UnsatisfiableSpecError):
spack.concretize.concretize_one(spec_str)
@pytest.mark.parametrize(
"spec_str,abstract_tests,concrete_tests",
[
# Ensure the 'when=+debug' is referred to 'callpath', and not to 'mpileaks',
# and that we can concretize the spec despite 'callpath' has no debug variant
(
"mpileaks+debug ^callpath %[when=+debug virtuals=mpi] zmpi",
[
("^zmpi", False),
("^mpich", False),
("mpileaks+debug %[when=+debug virtuals=mpi] zmpi", False),
],
[("^zmpi", False), ("^[virtuals=mpi] mpich", True)],
),
# Ensure we don't skip conditional edges when testing because we associate them
# with the wrong node (e.g. mpileaks instead of mpich)
(
"mpileaks~debug ^mpich+debug %[when=+debug virtuals=c] llvm",
[("^mpich+debug %[when=+debug virtuals=c] gcc", False)],
[("^mpich %[virtuals=c] gcc", False), ("^mpich %[virtuals=c] llvm", True)],
),
],
)
def test_conditional_dependencies_satisfies(
self, spec_str, abstract_tests, concrete_tests, default_mock_concretization
):
"""Tests satisfaction semantics for conditional specs, in different scenarios."""
s = Spec(spec_str)
for c, result in abstract_tests:
assert s.satisfies(c) is result
concrete = default_mock_concretization(spec_str)
for c, result in concrete_tests:
assert concrete.satisfies(c) is result
@pytest.mark.parametrize(
"spec_str,format_str,expected",
[
("git-test@git.foo/bar", "{name}-{version}", str(pathlib.Path("git-test-git.foo_bar"))),
("git-test@git.foo/bar", "{name}-{version}-{/hash}", None),
("git-test@git.foo/bar", "{name}/{version}", str(pathlib.Path("git-test", "git.foo_bar"))),
# {compiler} is 'none' if a package does not depend on C, C++, or Fortran
(
f"git-test@{'a' * 40}=1.0%gcc",
"{name}/{version}/{compiler}",
str(pathlib.Path("git-test", f"{'a' * 40}_1.0", "none")),
),
(
"git-test@git.foo/bar=1.0%gcc",
"{name}/{version}/{compiler}",
str(pathlib.Path("git-test", "git.foo_bar_1.0", "none")),
),
],
)
def test_spec_format_path(spec_str, format_str, expected, mock_git_test_package):
_check_spec_format_path(spec_str, format_str, expected)
def _check_spec_format_path(spec_str, format_str, expected, path_ctor=None):
spec = Spec(spec_str)
if not expected:
with pytest.raises((spack.spec.SpecFormatPathError, spack.spec.SpecFormatStringError)):
spec.format_path(format_str, _path_ctor=path_ctor)
else:
formatted = spec.format_path(format_str, _path_ctor=path_ctor)
assert formatted == expected
@pytest.mark.parametrize(
"spec_str,format_str,expected",
[
(
"git-test@git.foo/bar",
r"C:\\installroot\{name}\{version}",
r"C:\installroot\git-test\git.foo_bar",
),
(
"git-test@git.foo/bar",
r"\\hostname\sharename\{name}\{version}",
r"\\hostname\sharename\git-test\git.foo_bar",
),
# leading '/' is preserved on windows but converted to '\'
# note that it's still not "absolute" -- absolute windows paths start with a drive.
(
"git-test@git.foo/bar",
r"/installroot/{name}/{version}",
r"\installroot\git-test\git.foo_bar",
),
],
)
def test_spec_format_path_windows(spec_str, format_str, expected, mock_git_test_package):
_check_spec_format_path(spec_str, format_str, expected, path_ctor=pathlib.PureWindowsPath)
@pytest.mark.parametrize(
"spec_str,format_str,expected",
[
(
"git-test@git.foo/bar",
r"/installroot/{name}/{version}",
"/installroot/git-test/git.foo_bar",
),
(
"git-test@git.foo/bar",
r"//installroot/{name}/{version}",
"//installroot/git-test/git.foo_bar",
),
# This is likely unintentional on Linux: Firstly, "\" is not a
# path separator for POSIX, so this is treated as a single path
# component (containing literal "\" characters); secondly,
# Spec.format treats "\" as an escape character, so is
# discarded (unless directly following another "\")
(
"git-test@git.foo/bar",
r"C:\\installroot\package-{name}-{version}",
r"C__installrootpackage-git-test-git.foo_bar",
),
# "\" is not a POSIX separator, and Spec.format treats "\{" as a literal
# "{", which means that the resulting format string is invalid
("git-test@git.foo/bar", r"package\{name}\{version}", None),
],
)
def test_spec_format_path_posix(spec_str, format_str, expected, mock_git_test_package):
_check_spec_format_path(spec_str, format_str, expected, path_ctor=pathlib.PurePosixPath)
@pytest.mark.regression("3887")
@pytest.mark.parametrize("spec_str", ["py-extension2", "extension1", "perl-extension"])
def test_is_extension_after_round_trip_to_dict(config, mock_packages, spec_str):
# x is constructed directly from string, y from a
# round-trip to dict representation
x = spack.concretize.concretize_one(spec_str)
y = Spec.from_dict(x.to_dict())
# Using 'y' since the round-trip make us lose build dependencies
for d in y.traverse():
assert x[d.name].package.is_extension == y[d.name].package.is_extension
def test_malformed_spec_dict():
# FIXME: This test was really testing the specific implementation with an ad-hoc test
with pytest.raises(SpecError, match="malformed"):
Spec.from_dict(
{"spec": {"_meta": {"version": 2}, "nodes": [{"dependencies": {"name": "foo"}}]}}
)
def test_spec_dict_hashless_dep():
# FIXME: This test was really testing the specific implementation with an ad-hoc test
with pytest.raises(SpecError, match="Couldn't parse"):
Spec.from_dict(
{
"spec": {
"_meta": {"version": 2},
"nodes": [
{"name": "foo", "hash": "thehash", "dependencies": [{"name": "bar"}]}
],
}
}
)
@pytest.mark.parametrize(
"anonymous,named,expected",
[
("+plumed", "gromacs", "gromacs+plumed"),
("+plumed ^plumed%gcc", "gromacs", "gromacs+plumed ^plumed%gcc"),
("+plumed", "builtin.gromacs", "builtin.gromacs+plumed"),
],
)
def test_merge_anonymous_spec_with_named_spec(anonymous, named, expected):
s = Spec(anonymous)
changed = s.constrain(named)
assert changed
assert s == Spec(expected)
def test_spec_installed(default_mock_concretization, database):
"""Test whether Spec.installed works."""
# a known installed spec should say that it's installed
specs = database.query()
spec = specs[0]
assert spec.installed
assert spec.copy().installed
# an abstract spec should say it's not installed
spec = Spec("not-a-real-package")
assert not spec.installed
# pkg-a is not in the mock DB and is not installed
spec = default_mock_concretization("pkg-a")
assert not spec.installed
@pytest.mark.regression("30678")
def test_call_dag_hash_on_old_dag_hash_spec(mock_packages, default_mock_concretization):
# create a concrete spec
a = default_mock_concretization("pkg-a")
dag_hashes = {spec.name: spec.dag_hash() for spec in a.traverse()}
# make it look like an old DAG hash spec with no package hash on the spec.
for spec in a.traverse():
assert spec.concrete
spec._package_hash = None
for spec in a.traverse():
assert dag_hashes[spec.name] == spec.dag_hash()
with pytest.raises(ValueError, match="Cannot call package_hash()"):
spec.package_hash()
def test_spec_trim(mock_packages, config):
top = spack.concretize.concretize_one("dt-diamond")
top.trim("dt-diamond-left")
remaining = {x.name for x in top.traverse()}
assert {
"compiler-wrapper",
"dt-diamond",
"dt-diamond-right",
"dt-diamond-bottom",
"gcc-runtime",
"gcc",
} == remaining
top.trim("dt-diamond-right")
remaining = {x.name for x in top.traverse()}
assert {"compiler-wrapper", "dt-diamond", "gcc-runtime", "gcc"} == remaining
@pytest.mark.regression("30861")
def test_concretize_partial_old_dag_hash_spec(mock_packages, config):
# create an "old" spec with no package hash
bottom = spack.concretize.concretize_one("dt-diamond-bottom")
delattr(bottom, "_package_hash")
dummy_hash = "zd4m26eis2wwbvtyfiliar27wkcv3ehk"
bottom._hash = dummy_hash
# add it to an abstract spec as a dependency
top = Spec("dt-diamond")
top.add_dependency_edge(bottom, depflag=0, virtuals=())
# concretize with the already-concrete dependency
top = spack.concretize.concretize_one(top)
for spec in top.traverse():
assert spec.concrete
# make sure dag_hash is untouched
assert spec["dt-diamond-bottom"].dag_hash() == dummy_hash
assert spec["dt-diamond-bottom"]._hash == dummy_hash
# make sure package hash is NOT recomputed
assert not getattr(spec["dt-diamond-bottom"], "_package_hash", None)
def test_package_hash_affects_dunder_and_dag_hash(mock_packages, default_mock_concretization):
a1 = default_mock_concretization("pkg-a")
a2 = default_mock_concretization("pkg-a")
assert hash(a1) == hash(a2)
assert a1.dag_hash() == a2.dag_hash()
a1.clear_caches()
a2.clear_caches()
# tweak the dag hash of one of these specs
new_hash = "00000000000000000000000000000000"
if new_hash == a1._package_hash:
new_hash = "11111111111111111111111111111111"
a1._package_hash = new_hash
assert hash(a1) != hash(a2)
assert a1.dag_hash() != a2.dag_hash()
def test_intersects_and_satisfies_on_concretized_spec(default_mock_concretization):
"""Test that a spec obtained by concretizing an abstract spec, satisfies the abstract spec
but not vice-versa.
"""
a1 = default_mock_concretization("pkg-a@1.0")
a2 = Spec("pkg-a@1.0")
assert a1.intersects(a2)
assert a2.intersects(a1)
assert a1.satisfies(a2)
assert not a2.satisfies(a1)
@pytest.mark.parametrize(
"abstract_spec,spec_str",
[
("v1-provider", "v1-consumer ^conditional-provider+disable-v1"),
("conditional-provider", "v1-consumer ^conditional-provider+disable-v1"),
("^v1-provider", "v1-consumer ^conditional-provider+disable-v1"),
("^conditional-provider", "v1-consumer ^conditional-provider+disable-v1"),
],
)
@pytest.mark.regression("35597")
def test_abstract_provider_in_spec(abstract_spec, spec_str, default_mock_concretization):
s = default_mock_concretization(spec_str)
assert abstract_spec in s
@pytest.mark.parametrize(
"lhs,rhs,expected", [("a", "a", True), ("a", "a@1.0", True), ("a@1.0", "a", False)]
)
def test_abstract_contains_semantic(lhs, rhs, expected, mock_packages):
s, t = Spec(lhs), Spec(rhs)
result = s in t
assert result is expected
@pytest.mark.parametrize(
"factory,lhs_str,rhs_str,results",
[
# Architecture
(ArchSpec, "None-ubuntu20.04-None", "None-None-x86_64", (True, False, False)),
(ArchSpec, "None-ubuntu20.04-None", "linux-None-x86_64", (True, False, False)),
(ArchSpec, "None-None-x86_64:", "linux-None-haswell", (True, False, True)),
(ArchSpec, "None-None-x86_64:haswell", "linux-None-icelake", (False, False, False)),
(ArchSpec, "linux-None-None", "linux-None-None", (True, True, True)),
(ArchSpec, "darwin-None-None", "linux-None-None", (False, False, False)),
(ArchSpec, "None-ubuntu20.04-None", "None-ubuntu20.04-None", (True, True, True)),
(ArchSpec, "None-ubuntu20.04-None", "None-ubuntu22.04-None", (False, False, False)),
# Compiler
(Spec, "gcc", "clang", (False, False, False)),
(Spec, "gcc", "gcc@5", (True, False, True)),
(Spec, "gcc@5", "gcc@5.3", (True, False, True)),
(Spec, "gcc@5", "gcc@5-tag", (True, False, True)),
# Flags (flags are a map, so for convenience we initialize a full Spec)
# Note: the semantic is that of sv variants, not mv variants
(Spec, "cppflags=-foo", "cppflags=-bar", (True, False, False)),
(Spec, "cppflags='-bar -foo'", "cppflags=-bar", (True, True, False)),
(Spec, "cppflags=-foo", "cppflags=-foo", (True, True, True)),
(Spec, "cppflags=-foo", "cflags=-foo", (True, False, False)),
# Versions
(Spec, "@0.94h", "@:0.94i", (True, True, False)),
# Different virtuals intersect if there is at least package providing both
(Spec, "mpi", "lapack", (True, False, False)),
(Spec, "mpi", "pkgconfig", (False, False, False)),
# Intersection among target ranges for different architectures
(Spec, "target=x86_64:", "target=ppc64le:", (False, False, False)),
(Spec, "target=x86_64:", "target=:power9", (False, False, False)),
(Spec, "target=:haswell", "target=:power9", (False, False, False)),
(Spec, "target=:haswell", "target=ppc64le:", (False, False, False)),
# Intersection among target ranges for the same architecture
(Spec, "target=:haswell", "target=x86_64:", (True, True, True)),
(Spec, "target=:haswell", "target=x86_64_v4:", (False, False, False)),
# Edge case of uarch that split in a diamond structure, from a common ancestor
(Spec, "target=:cascadelake", "target=:cannonlake", (False, False, False)),
# Spec with compilers
(Spec, "mpileaks %gcc@5", "mpileaks %gcc@6", (False, False, False)),
(Spec, "mpileaks ^callpath %gcc@5", "mpileaks ^callpath %gcc@6", (False, False, False)),
(Spec, "mpileaks ^callpath %gcc@5", "mpileaks ^callpath %gcc@5.4", (True, False, True)),
],
)
def test_intersects_and_satisfies(mock_packages, factory, lhs_str, rhs_str, results):
lhs = factory(lhs_str)
rhs = factory(rhs_str)
intersects, lhs_satisfies_rhs, rhs_satisfies_lhs = results
assert lhs.intersects(rhs) is intersects
assert rhs.intersects(lhs) is lhs.intersects(rhs)
assert lhs.satisfies(rhs) is lhs_satisfies_rhs
assert rhs.satisfies(lhs) is rhs_satisfies_lhs
@pytest.mark.parametrize(
"factory,lhs_str,rhs_str,result,constrained_str",
[
# Architecture
(ArchSpec, "None-ubuntu20.04-None", "None-None-x86_64", True, "None-ubuntu20.04-x86_64"),
(ArchSpec, "None-None-x86_64", "None-None-x86_64", False, "None-None-x86_64"),
(
ArchSpec,
"None-None-x86_64:icelake",
"None-None-x86_64:icelake",
False,
"None-None-x86_64:icelake",
),
(ArchSpec, "None-ubuntu20.04-None", "linux-None-x86_64", True, "linux-ubuntu20.04-x86_64"),
(
ArchSpec,
"None-ubuntu20.04-nocona:haswell",
"None-None-x86_64:icelake",
False,
"None-ubuntu20.04-nocona:haswell",
),
(
ArchSpec,
"None-ubuntu20.04-nocona,haswell",
"None-None-x86_64:icelake",
False,
"None-ubuntu20.04-nocona,haswell",
),
# Compiler
(Spec, "foo %gcc@5", "foo %gcc@5-tag", True, "foo %gcc@5-tag"),
(Spec, "foo %gcc@5", "foo %gcc@5", False, "foo %gcc@5"),
# Flags
(Spec, "cppflags=-foo", "cppflags=-foo", False, "cppflags=-foo"),
(Spec, "cppflags=-foo", "cflags=-foo", True, "cppflags=-foo cflags=-foo"),
# Target ranges
(Spec, "target=x86_64:", "target=x86_64:", False, "target=x86_64:"),
(Spec, "target=x86_64:", "target=:haswell", True, "target=x86_64:haswell"),
(
Spec,
"target=x86_64:haswell",
"target=x86_64_v2:icelake",
True,
"target=x86_64_v2:haswell",
),
],
)
def test_constrain(factory, lhs_str, rhs_str, result, constrained_str):
lhs = factory(lhs_str)
rhs = factory(rhs_str)
assert lhs.constrain(rhs) is result
assert lhs == factory(constrained_str)
# The intersection must be the same, so check that invariant too
lhs = factory(lhs_str)
rhs = factory(rhs_str)
rhs.constrain(lhs)
assert rhs == factory(constrained_str)
def test_abstract_hash_intersects_and_satisfies(default_mock_concretization):
concrete: Spec = default_mock_concretization("pkg-a")
hash = concrete.dag_hash()
hash_5 = hash[:5]
hash_6 = hash[:6]
# abstract hash that doesn't have a common prefix with the others.
hash_other = f"{'a' if hash_5[0] == 'b' else 'b'}{hash_5[1:]}"
abstract_5 = Spec(f"pkg-a/{hash_5}")
abstract_6 = Spec(f"pkg-a/{hash_6}")
abstract_none = Spec(f"pkg-a/{hash_other}")
abstract = Spec("pkg-a")
def assert_subset(a: Spec, b: Spec):
assert a.intersects(b) and b.intersects(a) and a.satisfies(b) and not b.satisfies(a)
def assert_disjoint(a: Spec, b: Spec):
assert (
not a.intersects(b)
and not b.intersects(a)
and not a.satisfies(b)
and not b.satisfies(a)
)
# left-hand side is more constrained, so its
# concretization space is a subset of the right-hand side's
assert_subset(concrete, abstract_5)
assert_subset(abstract_6, abstract_5)
assert_subset(abstract_5, abstract)
# disjoint concretization space
assert_disjoint(abstract_none, concrete)
assert_disjoint(abstract_none, abstract_5)
def test_edge_equality_does_not_depend_on_virtual_order():
"""Tests that two edges that are constructed with just a different order of the virtuals in
the input parameters are equal to each other.
"""
parent, child = Spec("parent"), Spec("child")
edge1 = DependencySpec(parent, child, depflag=0, virtuals=("mpi", "lapack"))
edge2 = DependencySpec(parent, child, depflag=0, virtuals=("lapack", "mpi"))
assert edge1 == edge2
assert tuple(sorted(edge1.virtuals)) == edge1.virtuals
assert tuple(sorted(edge2.virtuals)) == edge1.virtuals
def test_update_virtuals():
parent, child = Spec("parent"), Spec("child")
edge = DependencySpec(parent, child, depflag=0, virtuals=("mpi", "lapack"))
assert edge.update_virtuals("blas")
assert edge.virtuals == ("blas", "lapack", "mpi")
assert edge.update_virtuals(("c", "fortran", "mpi", "lapack"))
assert edge.virtuals == ("blas", "c", "fortran", "lapack", "mpi")
assert not edge.update_virtuals("mpi")
assert not edge.update_virtuals(("c", "fortran", "mpi", "lapack"))
assert edge.virtuals == ("blas", "c", "fortran", "lapack", "mpi")
def test_virtual_queries_work_for_strings_and_lists():
"""Ensure that ``dependencies()`` works with both virtuals=str and virtuals=[str, ...]."""
parent, child = Spec("parent"), Spec("child")
parent._add_dependency(
child, depflag=dt.BUILD, virtuals=("cxx", "fortran") # multi-char dep names
)
assert not parent.dependencies(virtuals="c") # not in virtuals but shares a char with cxx
for lang in ["cxx", "fortran"]:
assert parent.dependencies(virtuals=lang) # string arg
assert parent.edges_to_dependencies(virtuals=lang) # string arg
assert parent.dependencies(virtuals=[lang]) # list arg
assert parent.edges_to_dependencies(virtuals=[lang]) # string arg
def test_old_format_strings_trigger_error(default_mock_concretization):
s = spack.concretize.concretize_one("pkg-a")
with pytest.raises(SpecFormatStringError):
s.format("${PACKAGE}-${VERSION}-${HASH}")
@pytest.mark.regression("47362")
@pytest.mark.parametrize(
"lhs,rhs",
[
("hdf5 +mpi", "hdf5++mpi"),
("hdf5 cflags==-g", "hdf5 cflags=-g"),
("hdf5 +mpi ++shared", "hdf5+mpi +shared"),
("hdf5 +mpi cflags==-g", "hdf5++mpi cflag=-g"),
],
)
def test_equality_discriminate_on_propagation(lhs, rhs):
"""Tests that == can discriminate abstract specs based on their 'propagation' status"""
s, t = Spec(lhs), Spec(rhs)
assert s != t
assert len({s, t}) == 2
def test_comparison_multivalued_variants():
assert Spec("x=a") < Spec("x=a,b") < Spec("x==a,b") < Spec("x==a,b,c")
@pytest.mark.parametrize(
"specs_in_expected_order",
[
("a", "b", "c", "d", "e"),
("a@1.0", "a@2.0", "b", "c@3.0", "c@4.0"),
("a^d", "b^c", "c^b", "d^a"),
("e^a", "e^b", "e^c", "e^d"),
("e^a@1.0", "e^a@2.0", "e^a@3.0", "e^a@4.0"),
("e^a@1.0 +a", "e^a@1.0 +b", "e^a@1.0 +c", "e^a@1.0 +c"),
("a^b%c", "a^b%d", "a^b%e", "a^b%f"),
("a^b%c@1.0", "a^b%c@2.0", "a^b%c@3.0", "a^b%c@4.0"),
("a^b%c@1.0 +a", "a^b%c@1.0 +b", "a^b%c@1.0 +c", "a^b%c@1.0 +d"),
("a cflags=-O1", "a cflags=-O2", "a cflags=-O3"),
("a %cmake@1.0 ^b %cmake@2.0", "a %cmake@2.0 ^b %cmake@1.0"),
("a^b^c^d", "a^b^c^e", "a^b^c^f"),
("a^b^c^d", "a^b^c^e", "a^b^c^e", "a^b^c^f"),
("a%b%c%d", "a%b%c%e", "a%b%c%e", "a%b%c%f"),
("d.a", "c.b", "b.c", "a.d"), # names before namespaces
],
)
def test_spec_ordering(specs_in_expected_order):
specs_in_expected_order = [Spec(s) for s in specs_in_expected_order]
assert sorted(specs_in_expected_order) == specs_in_expected_order
assert sorted(reversed(specs_in_expected_order)) == specs_in_expected_order
for i in range(len(specs_in_expected_order) - 1):
lhs, rhs = specs_in_expected_order[i : i + 2]
assert lhs <= rhs
assert (lhs < rhs and lhs != rhs) or lhs == rhs
assert rhs >= lhs
assert (rhs > lhs and rhs != lhs) or rhs == lhs
EMPTY_VER = vn.VersionList(":")
EMPTY_VAR = Spec().variants
EMPTY_FLG = Spec().compiler_flags
@pytest.mark.parametrize(
"spec,expected_tuplified",
[
# simple, no dependencies
[("a"), ((("a", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),), ())],
# with some node attributes
[
("a@1.0 +foo cflags='-O3 -g'"),
(
(
(
"a",
None,
vn.VersionList(["1.0"]),
Spec("+foo").variants,
Spec("cflags='-O3 -g'").compiler_flags,
None,
None,
None,
),
),
(),
),
],
# single edge case
[
("a^b"),
(
(
("a", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("b", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
),
((0, 1, 0, (), False, Spec()),),
),
],
# root with multiple deps
[
("a^b^c^d"),
(
(
("a", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("b", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("c", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("d", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
),
(
(0, 1, 0, (), False, Spec()),
(0, 2, 0, (), False, Spec()),
(0, 3, 0, (), False, Spec()),
),
),
],
# root with multiple build deps
[
("a%b%c%d"),
(
(
("a", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("b", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("c", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("d", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
),
(
(0, 1, 0, (), True, Spec()),
(0, 2, 0, (), True, Spec()),
(0, 3, 0, (), True, Spec()),
),
),
],
# dependencies with dependencies
[
("a ^b %c %d ^e %f %g"),
(
(
("a", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("b", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("e", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("c", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("d", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("f", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
("g", None, EMPTY_VER, EMPTY_VAR, EMPTY_FLG, None, None, None),
),
(
(0, 1, 0, (), False, Spec()),
(0, 2, 0, (), False, Spec()),
(1, 3, 0, (), True, Spec()),
(1, 4, 0, (), True, Spec()),
(2, 5, 0, (), True, Spec()),
(2, 6, 0, (), True, Spec()),
),
),
],
],
)
def test_spec_canonical_comparison_form(spec, expected_tuplified):
"""Tests a few expected canonical comparison form of specs"""
assert spack.llnl.util.lang.tuplify(Spec(spec)._cmp_iter) == expected_tuplified
def test_comparison_after_breaking_hash_change():
# We simulate a breaking change in DAG hash computation in Spack. We have two specs that are
# entirely equal modulo DAG hash. When deserializing these specs, we don't want them to compare
# as equal, because DAG hash is used throughout in Spack to distinguish between specs
# (e.g. database, build caches, install dir).
s = Spec("example@=1.0")
s._mark_concrete(True)
# compute the dag hash and a change to it
dag_hash = s.dag_hash()
new_dag_hash = f"{'b' if dag_hash[0] == 'a' else 'a'}{dag_hash[1:]}"
before_breakage = s.to_dict()
after_breakage = s.to_dict()
after_breakage["spec"]["nodes"][0]["hash"] = new_dag_hash
assert before_breakage != after_breakage
x = Spec.from_dict(before_breakage)
y = Spec.from_dict(after_breakage)
assert x != y
assert len({x, y}) == 2
def test_satisfies_and_subscript_with_compilers(default_mock_concretization):
"""Tests the semantic of "satisfies" and __getitem__ for the following spec:
[ ] multivalue-variant@2.3
[bl ] ^callpath@1.0
[bl ] ^dyninst@8.2
[bl ] ^libdwarf@20130729
[bl ] ^libelf@0.8.13
[b ] ^gcc@10.2.1
[ l ] ^gcc-runtime@10.2.1
[bl ] ^mpich@3.0.4
[bl ] ^pkg-a@2.0
[b ] ^gmake@4.4
[bl ] ^pkg-b@1.0
"""
s = default_mock_concretization("multivalue-variant")
# Check a direct build/link dependency
assert s.satisfies("^pkg-a")
assert s.dependencies(name="pkg-a")[0] == s["pkg-a"]
# Transitive build/link dependency
assert s.satisfies("^libelf")
assert s["libdwarf"].dependencies(name="libelf")[0] == s["libelf"]
# Direct build dependencies
assert s.satisfies("^[virtuals=c] gcc")
assert s.satisfies("%[virtuals=c] gcc")
assert s.dependencies(name="gcc")[0] == s["gcc"]
assert s.dependencies(name="gcc")[0] == s["c"]
# Transitive build dependencies
assert not s.satisfies("^gmake")
# "gmake" is not in the link/run subdag + direct build deps
with pytest.raises(KeyError):
_ = s["gmake"]
# We need to pass through "pkg-a" to get "gmake" with [] notation
assert s["pkg-a"].dependencies(name="gmake")[0] == s["pkg-a"]["gmake"]
@pytest.mark.parametrize(
"spec_str,spec_fmt,expected",
[
# Depends on C
("mpileaks", "{name}-{compiler.name}", "mpileaks-gcc"),
("mpileaks", "{name}-{compiler.name}-{compiler.version}", "mpileaks-gcc-10.2.1"),
# No compiler
("pkg-c", "{name}-{compiler.name}", "pkg-c-none"),
("pkg-c", "{name}-{compiler.name}-{compiler.version}", "pkg-c-none-none"),
],
)
def test_spec_format_with_compiler_adaptors(
spec_str, spec_fmt, expected, default_mock_concretization
):
"""Tests the output of spec format, when involving `Spec.compiler` adaptors"""
s = default_mock_concretization(spec_str)
assert s.format(spec_fmt) == expected
@pytest.mark.parametrize(
"lhs,rhs,expected",
[
("mpich %gcc", "mpich %gcc", True),
("mpich %gcc", "mpich ^gcc", False),
("mpich ^callpath %gcc", "mpich %gcc ^callpath", False),
],
)
def test_specs_equality(lhs, rhs, expected):
"""Tests the semantic of == for abstract specs"""
lhs, rhs = Spec(lhs), Spec(rhs)
assert (lhs == rhs) is expected
def test_edge_equality_accounts_for_when_condition():
"""Tests that edges can be distinguished by their 'when' condition."""
parent, child = Spec("parent"), Spec("child")
edge1 = DependencySpec(parent, child, depflag=0, virtuals=(), when=Spec("%c"))
edge2 = DependencySpec(parent, child, depflag=0, virtuals=())
assert edge1 != edge2
def test_long_spec():
"""Test that long_spec preserves dependency types and has correct ordering."""
assert Spec("foo %m %l ^k %n %j").long_spec == "foo %l %m ^k %j %n"
@pytest.mark.parametrize(
"constraints,expected",
[
# Anonymous specs without dependencies
(["+baz", "+bar"], "+baz+bar"),
(["@2.0:", "@:5.1", "+bar"], "@2.0:5.1 +bar"),
# Anonymous specs with dependencies
(["^mpich@3.2", "^mpich@:4.0+foo"], "^mpich@3.2 +foo"),
# Mix a real package with a virtual one. This test
# should fail if we start using the repository
(["^mpich@3.2", "^mpi+foo"], "^mpich@3.2 ^mpi+foo"),
# Non direct dependencies + direct dependencies
(["^mpich", "%mpich"], "%mpich"),
(["^foo", "^bar %foo"], "^foo ^bar%foo"),
(["^foo", "%bar %foo"], "%bar%foo"),
],
)
def test_constrain_symbolically(constraints, expected):
"""Tests the semantics of constraining a spec when we don't resolve virtuals."""
merged = Spec()
for c in constraints:
merged._constrain_symbolically(c)
assert merged == Spec(expected)
reverse_order = Spec()
for c in reversed(constraints):
reverse_order._constrain_symbolically(c)
assert reverse_order == Spec(expected)
@pytest.mark.parametrize(
"parent_str,child_str,kwargs,expected_str,expected_repr",
[
(
"mpileaks",
"callpath",
{"virtuals": ()},
"mpileaks ^callpath",
"DependencySpec('mpileaks', 'callpath', depflag=0, virtuals=())",
),
(
"mpileaks",
"callpath",
{"virtuals": ("mpi", "lapack")},
"mpileaks ^[virtuals=lapack,mpi] callpath",
"DependencySpec('mpileaks', 'callpath', depflag=0, virtuals=('lapack', 'mpi'))",
),
(
"",
"callpath",
{"virtuals": ("mpi", "lapack"), "direct": True},
" %[virtuals=lapack,mpi] callpath",
"DependencySpec('', 'callpath', depflag=0, virtuals=('lapack', 'mpi'), direct=True)",
),
(
"",
"callpath",
{
"virtuals": ("mpi", "lapack"),
"direct": True,
"propagation": PropagationPolicy.PREFERENCE,
},
" %%[virtuals=lapack,mpi] callpath",
"DependencySpec('', 'callpath', depflag=0, virtuals=('lapack', 'mpi'), direct=True,"
" propagation=PropagationPolicy.PREFERENCE)",
),
(
"",
"callpath",
{"virtuals": (), "direct": True, "propagation": PropagationPolicy.PREFERENCE},
" %%callpath",
"DependencySpec('', 'callpath', depflag=0, virtuals=(), direct=True,"
" propagation=PropagationPolicy.PREFERENCE)",
),
(
"mpileaks+foo",
"callpath+bar",
{"virtuals": (), "direct": True, "propagation": PropagationPolicy.PREFERENCE},
"mpileaks+foo %%callpath+bar",
"DependencySpec('mpileaks+foo', 'callpath+bar', depflag=0, virtuals=(), direct=True,"
" propagation=PropagationPolicy.PREFERENCE)",
),
],
)
def test_edge_representation(parent_str, child_str, kwargs, expected_str, expected_repr):
"""Tests the string representations of edges."""
parent = Spec(parent_str) or Spec()
child = Spec(child_str) or Spec()
edge = DependencySpec(parent, child, depflag=0, **kwargs)
assert str(edge) == expected_str
assert repr(edge) == expected_repr
@pytest.mark.parametrize(
"spec_str,assertions",
[
# Check <key>=* semantics for a "regular" variant
("mpileaks foo=abc", [("foo=*", True), ("bar=*", False)]),
# Check the semantics for architecture related key value pairs
(
"mpileaks",
[
("target=*", False),
("os=*", False),
("platform=*", False),
("target=* platform=*", False),
],
),
(
"mpileaks target=x86_64",
[
("target=*", True),
("os=*", False),
("platform=*", False),
("target=* platform=*", False),
],
),
("mpileaks os=debian6", [("target=*", False), ("os=*", True), ("platform=*", False)]),
("mpileaks platform=linux", [("target=*", False), ("os=*", False), ("platform=*", True)]),
("mpileaks platform=linux", [("target=*", False), ("os=*", False), ("platform=*", True)]),
(
"mpileaks platform=linux target=x86_64",
[
("target=*", True),
("os=*", False),
("platform=*", True),
("target=* platform=*", True),
],
),
],
)
def test_attribute_existence_in_satisfies(spec_str, assertions, mock_packages, config):
"""Tests the semantics of <key>=* when used in Spec.satisfies"""
s = Spec(spec_str)
for test, expected in assertions:
assert s.satisfies(test) is expected
|
TestSpecSemantics
|
python
|
tensorflow__tensorflow
|
tensorflow/python/tpu/tpu_test.py
|
{
"start": 4248,
"end": 6591
}
|
class ____(test.TestCase):
def test_prune_unconnected_ops(self):
with ops.Graph().as_default():
a = array_ops.placeholder(dtype=dtypes.float32, name="a")
b = array_ops.placeholder(dtype=dtypes.float32, name="b")
constant_op.constant(1.0, name="constant")
x = variable_scope.get_variable(
name="x",
dtype=dtypes.float32,
shape=[],
use_resource=True,
initializer=init_ops.constant_initializer(2.0))
y = variable_scope.get_variable(
name="y",
dtype=dtypes.float32,
shape=[],
use_resource=True,
initializer=init_ops.constant_initializer(3.0))
math_ops.add(a, b)
math_ops.add(x, y)
graph_def = ops.get_default_graph().as_graph_def()
for node in graph_def.node:
# Attach a TPU_REPLICATE_ATTR to each node.
node.attr[tpu_replication._TPU_REPLICATE_ATTR].s = b"0"
# Rewire placeholder "a" and variable "y" leaving them unconnected.
for (input_index, node_input) in enumerate(node.input):
if node_input == "b":
node.input[input_index] = "constant"
if node_input == "y":
node.input[input_index] = "x"
with ops.Graph().as_default() as graph:
# Reimport the graph and prune unconnected ops.
importer.import_graph_def(graph_def)
tpu.prune_unconnected_ops_from_xla(ops.get_default_graph())
# Verify that ops "a" and "x" still have TPU_REPLICATE_ATTR.
a = graph.get_operation_by_name("import/a").get_attr(
tpu_replication._TPU_REPLICATE_ATTR)
self.assertEqual(b"0", a)
x = graph.get_operation_by_name("import/x").get_attr(
tpu_replication._TPU_REPLICATE_ATTR)
self.assertEqual(b"0", x)
# Verify that ops "b" and "y" have TPU_REPLICATE_ATTR removed.
with self.assertRaisesRegex(
ValueError,
"Operation \'import/b\' has no attr named \'_tpu_replicate\'"):
graph.get_operation_by_name("import/b").get_attr(
tpu_replication._TPU_REPLICATE_ATTR)
with self.assertRaisesRegex(
ValueError,
"Operation \'import/y\' has no attr named \'_tpu_replicate\'"):
graph.get_operation_by_name("import/y").get_attr(
tpu_replication._TPU_REPLICATE_ATTR)
|
TPUGraphPruneTest
|
python
|
ray-project__ray
|
rllib/env/policy_client.py
|
{
"start": 883,
"end": 6615
}
|
class ____:
"""REST client to interact with an RLlib policy server."""
def __init__(
self,
address: str,
inference_mode: str = "local",
update_interval: float = 10.0,
session: Optional[requests.Session] = None,
):
self.address = address
self.session = session
self.env: ExternalEnv = None
if inference_mode == "local":
self.local = True
self._setup_local_rollout_worker(update_interval)
elif inference_mode == "remote":
self.local = False
else:
raise ValueError("inference_mode must be either 'local' or 'remote'")
def start_episode(
self, episode_id: Optional[str] = None, training_enabled: bool = True
) -> str:
if self.local:
self._update_local_policy()
return self.env.start_episode(episode_id, training_enabled)
return self._send(
{
"episode_id": episode_id,
"command": Commands.START_EPISODE,
"training_enabled": training_enabled,
}
)["episode_id"]
def get_action(
self, episode_id: str, observation: Union[EnvObsType, MultiAgentDict]
) -> Union[EnvActionType, MultiAgentDict]:
if self.local:
self._update_local_policy()
if isinstance(episode_id, (list, tuple)):
actions = {
eid: self.env.get_action(eid, observation[eid])
for eid in episode_id
}
return actions
else:
return self.env.get_action(episode_id, observation)
else:
return self._send(
{
"command": Commands.GET_ACTION,
"observation": observation,
"episode_id": episode_id,
}
)["action"]
def log_action(
self,
episode_id: str,
observation: Union[EnvObsType, MultiAgentDict],
action: Union[EnvActionType, MultiAgentDict],
) -> None:
if self.local:
self._update_local_policy()
return self.env.log_action(episode_id, observation, action)
self._send(
{
"command": Commands.LOG_ACTION,
"observation": observation,
"action": action,
"episode_id": episode_id,
}
)
def log_returns(
self,
episode_id: str,
reward: float,
info: Union[EnvInfoDict, MultiAgentDict] = None,
multiagent_done_dict: Optional[MultiAgentDict] = None,
) -> None:
if self.local:
self._update_local_policy()
if multiagent_done_dict is not None:
assert isinstance(reward, dict)
return self.env.log_returns(
episode_id, reward, info, multiagent_done_dict
)
return self.env.log_returns(episode_id, reward, info)
self._send(
{
"command": Commands.LOG_RETURNS,
"reward": reward,
"info": info,
"episode_id": episode_id,
"done": multiagent_done_dict,
}
)
def end_episode(
self, episode_id: str, observation: Union[EnvObsType, MultiAgentDict]
) -> None:
if self.local:
self._update_local_policy()
return self.env.end_episode(episode_id, observation)
self._send(
{
"command": Commands.END_EPISODE,
"observation": observation,
"episode_id": episode_id,
}
)
def update_policy_weights(self) -> None:
"""Query the server for new policy weights, if local inference is enabled."""
self._update_local_policy(force=True)
def _send(self, data):
payload = pickle.dumps(data)
if self.session is None:
response = requests.post(self.address, data=payload)
else:
response = self.session.post(self.address, data=payload)
if response.status_code != 200:
logger.error("Request failed {}: {}".format(response.text, data))
response.raise_for_status()
parsed = pickle.loads(response.content)
return parsed
def _setup_local_rollout_worker(self, update_interval):
self.update_interval = update_interval
self.last_updated = 0
logger.info("Querying server for rollout worker settings.")
kwargs = self._send(
{
"command": Commands.GET_WORKER_ARGS,
}
)["worker_args"]
(self.rollout_worker, self.inference_thread) = _create_embedded_rollout_worker(
kwargs, self._send
)
self.env = self.rollout_worker.env
def _update_local_policy(self, force=False):
assert self.inference_thread.is_alive()
if (
self.update_interval
and time.time() - self.last_updated > self.update_interval
) or force:
logger.info("Querying server for new policy weights.")
resp = self._send(
{
"command": Commands.GET_WEIGHTS,
}
)
weights = resp["weights"]
global_vars = resp["global_vars"]
logger.info(
"Updating rollout worker weights and global vars {}.".format(
global_vars
)
)
self.rollout_worker.set_weights(weights, global_vars)
self.last_updated = time.time()
@OldAPIStack
|
PolicyClient
|
python
|
astropy__astropy
|
astropy/io/fits/verify.py
|
{
"start": 566,
"end": 3809
}
|
class ____:
"""
Shared methods for verification.
"""
def run_option(
self, option="warn", err_text="", fix_text="Fixed.", fix=None, fixable=True
):
"""
Execute the verification with selected option.
"""
text = err_text
if option in ["warn", "exception"]:
fixable = False
# fix the value
elif not fixable:
text = f"Unfixable error: {text}"
else:
if fix:
fix()
text += " " + fix_text
return (fixable, text)
def verify(self, option="warn"):
"""
Verify all values in the instance.
Parameters
----------
option : str
Output verification option. Must be one of ``"fix"``,
``"silentfix"``, ``"ignore"``, ``"warn"``, or
``"exception"``. May also be any combination of ``"fix"`` or
``"silentfix"`` with ``"+ignore"``, ``"+warn"``, or ``"+exception"``
(e.g. ``"fix+warn"``). See :ref:`astropy:verify` for more info.
"""
opt = option.lower()
if opt not in VERIFY_OPTIONS:
raise ValueError(f"Option {option!r} not recognized.")
if opt == "ignore":
return
errs = self._verify(opt)
# Break the verify option into separate options related to reporting of
# errors, and fixing of fixable errors
if "+" in opt:
fix_opt, report_opt = opt.split("+")
elif opt in ["fix", "silentfix"]:
# The original default behavior for 'fix' and 'silentfix' was to
# raise an exception for unfixable errors
fix_opt, report_opt = opt, "exception"
else:
fix_opt, report_opt = None, opt
if fix_opt == "silentfix" and report_opt == "ignore":
# Fixable errors were fixed, but don't report anything
return
if fix_opt == "silentfix":
# Don't print out fixable issues; the first element of each verify
# item is a boolean indicating whether or not the issue was fixable
line_filter = lambda x: not x[0]
elif fix_opt == "fix" and report_opt == "ignore":
# Don't print *unfixable* issues, but do print fixed issues; this
# is probably not very useful but the option exists for
# completeness
line_filter = operator.itemgetter(0)
else:
line_filter = None
unfixable = False
messages = []
for fixable, message in errs.iter_lines(filter=line_filter):
if fixable is not None:
unfixable = not fixable
messages.append(message)
if messages:
messages.insert(0, "Verification reported errors:")
messages.append("Note: astropy.io.fits uses zero-based indexing.\n")
if fix_opt == "silentfix" and not unfixable:
return
elif report_opt == "warn" or (fix_opt == "fix" and not unfixable):
for line in messages:
warnings.warn(line, VerifyWarning)
else:
raise VerifyError("\n" + "\n".join(messages))
|
_Verify
|
python
|
walkccc__LeetCode
|
solutions/2451. Odd String Difference/2451.py
|
{
"start": 0,
"end": 465
}
|
class ____:
def oddString(self, words: list[str]) -> str:
def getDiff(s: str) -> list[int]:
return [ord(b) - ord(a) for a, b in zip(s, s[1:])]
wordAndDiffTuples = [(word, tuple(getDiff(word))) for word in words]
diffTupleCount = collections.Counter()
for _, diffTuple in wordAndDiffTuples:
diffTupleCount[diffTuple] += 1
for word, diffTuple in wordAndDiffTuples:
if diffTupleCount[diffTuple] == 1:
return word
|
Solution
|
python
|
apache__avro
|
lang/py/avro/codecs.py
|
{
"start": 2835,
"end": 3133
}
|
class ____(Codec):
@staticmethod
def compress(data: bytes) -> Tuple[bytes, int]:
return data, len(data)
@staticmethod
def decompress(readers_decoder: avro.io.BinaryDecoder) -> avro.io.BinaryDecoder:
readers_decoder.skip_long()
return readers_decoder
|
NullCodec
|
python
|
ansible__ansible
|
lib/ansible/plugins/inventory/advanced_host_list.py
|
{
"start": 905,
"end": 2273
}
|
class ____(BaseInventoryPlugin):
NAME = 'advanced_host_list'
# advanced_host_list does not set vars, so needs no special trust assistance from the inventory API
def verify_file(self, host_list):
valid = False
b_path = to_bytes(host_list, errors='surrogate_or_strict')
if not os.path.exists(b_path) and ',' in host_list:
valid = True
return valid
def parse(self, inventory, loader, host_list, cache=True):
""" parses the inventory file """
super(InventoryModule, self).parse(inventory, loader, host_list)
try:
for h in host_list.split(','):
h = h.strip()
if h:
try:
(hostnames, port) = self._expand_hostpattern(h)
except AnsibleError as e:
self.display.vvv("Unable to parse address from hostname, leaving unchanged: %s" % to_text(e))
hostnames = [h]
port = None
for host in hostnames:
if host not in self.inventory.hosts:
self.inventory.add_host(host, group='ungrouped', port=port)
except Exception as e:
raise AnsibleParserError("Invalid data from string, could not parse: %s" % to_native(e))
|
InventoryModule
|
python
|
run-llama__llama_index
|
llama-index-integrations/llms/llama-index-llms-litellm/llama_index/llms/litellm/base.py
|
{
"start": 1854,
"end": 21207
}
|
class ____(FunctionCallingLLM):
"""
LiteLLM.
Examples:
`pip install llama-index-llms-litellm`
```python
import os
from llama_index.core.llms import ChatMessage
from llama_index.llms.litellm import LiteLLM
# Set environment variables
os.environ["OPENAI_API_KEY"] = "your-openai-api-key"
os.environ["COHERE_API_KEY"] = "your-cohere-api-key"
# Define a chat message
message = ChatMessage(role="user", content="Hey! how's it going?")
# Initialize LiteLLM with the desired model
llm = LiteLLM(model="gpt-3.5-turbo")
# Call the chat method with the message
chat_response = llm.chat([message])
# Print the response
print(chat_response)
```
"""
model: str = Field(
default=DEFAULT_LITELLM_MODEL,
description=(
"The LiteLLM model to use. "
"For complete list of providers https://docs.litellm.ai/docs/providers"
),
)
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use during generation.",
ge=0.0,
le=1.0,
)
max_tokens: Optional[int] = Field(
description="The maximum number of tokens to generate.",
gt=0,
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the LLM API.",
# for all inputs https://docs.litellm.ai/docs/completion/input
)
max_retries: int = Field(
default=10, description="The maximum number of API retries."
)
_custom_llm_provider: Optional[str] = PrivateAttr(default=None)
def __init__(
self,
model: str = DEFAULT_LITELLM_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
api_type: Optional[str] = None,
api_base: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
if "custom_llm_provider" in kwargs:
if (
kwargs["custom_llm_provider"] != "ollama"
and kwargs["custom_llm_provider"] != "vllm"
): # don't check keys for local models
validate_litellm_api_key(api_key, api_type)
else: # by default assume it's a hosted endpoint
validate_litellm_api_key(api_key, api_type)
additional_kwargs = additional_kwargs or {}
if api_key is not None:
additional_kwargs["api_key"] = api_key
if api_type is not None:
additional_kwargs["api_type"] = api_type
if api_base is not None:
additional_kwargs["api_base"] = api_base
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
self._custom_llm_provider = kwargs.get("custom_llm_provider")
def _get_model_name(self) -> str:
model_name = self.model
if "ft-" in model_name: # legacy fine-tuning
model_name = model_name.split(":")[0]
elif model_name.startswith("ft:"):
model_name = model_name.split(":")[1]
return model_name
@classmethod
def class_name(cls) -> str:
return "litellm_llm"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=openai_modelname_to_contextsize(self._get_model_name()),
num_output=self.max_tokens or -1,
is_chat_model=True,
is_function_calling_model=is_function_calling_model(
self._get_model_name(), self._custom_llm_provider
),
model_name=self.model,
)
def _prepare_chat_with_tools(
self,
tools: List[BaseTool],
user_msg: Optional[Union[str, ChatMessage]] = None,
chat_history: Optional[List[ChatMessage]] = None,
verbose: bool = False,
allow_parallel_tool_calls: bool = False,
tool_required: bool = False,
**kwargs: Any,
) -> Dict[str, Any]:
tool_specs = [
tool.metadata.to_openai_tool(skip_length_check=True) for tool in tools
]
if isinstance(user_msg, str):
user_msg = ChatMessage(role=MessageRole.USER, content=user_msg)
messages = chat_history or []
if user_msg:
messages.append(user_msg)
return {
"messages": messages,
"tools": tool_specs or None,
"parallel_tool_calls": allow_parallel_tool_calls,
"tool_choice": "required" if tool_required else "auto",
**kwargs,
}
def _validate_chat_with_tools_response(
self,
response: ChatResponse,
tools: List[BaseTool],
allow_parallel_tool_calls: bool = False,
**kwargs: Any,
) -> ChatResponse:
"""Validate the response from chat_with_tools."""
if not allow_parallel_tool_calls:
force_single_tool_call(response)
return response
def get_tool_calls_from_response(
self,
response: "ChatResponse",
error_on_no_tool_call: bool = True,
) -> List[ToolSelection]:
"""Predict and call the tool."""
tool_calls = response.message.additional_kwargs.get("tool_calls", [])
if len(tool_calls) < 1:
if error_on_no_tool_call:
raise ValueError(
f"Expected at least one tool call, but got {len(tool_calls)} tool calls."
)
else:
return []
tool_selections = []
for tool_call in tool_calls:
if tool_call["type"] != "function" or "function" not in tool_call:
raise ValueError(f"Invalid tool call of type {tool_call['type']}")
function = tool_call.get("function", {})
tool_name = function.get("name")
arguments = function.get("arguments")
# this should handle both complete and partial jsons
try:
if arguments: # If arguments is not empty/None
argument_dict = json.loads(arguments)
else: # If arguments is None or empty string
argument_dict = {}
except (ValueError, TypeError, JSONDecodeError):
argument_dict = {}
if tool_name: # Only require tool_name, not arguments
tool_selections.append(
ToolSelection(
tool_id=tool_call.get("id") or str(uuid.uuid4()),
tool_name=tool_name,
tool_kwargs=argument_dict,
)
)
if len(tool_selections) == 0 and error_on_no_tool_call:
raise ValueError("No valid tool calls found.")
return tool_selections
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if self._is_chat_model:
chat_fn = self._chat
else:
chat_fn = completion_to_chat_decorator(self._complete)
return chat_fn(messages, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if self._is_chat_model:
stream_chat_fn = self._stream_chat
else:
stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete)
return stream_chat_fn(messages, **kwargs)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
# litellm assumes all llms are chat llms
if self._is_chat_model:
complete_fn = chat_to_completion_decorator(self._chat)
else:
complete_fn = self._complete
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self._is_chat_model:
stream_complete_fn = stream_chat_to_completion_decorator(self._stream_chat)
else:
stream_complete_fn = self._stream_complete
return stream_complete_fn(prompt, **kwargs)
@property
def _is_chat_model(self) -> bool:
# litellm assumes all llms are chat llms
return True
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None:
all_kwargs.pop(
"max_tokens"
) # don't send max_tokens == None, this throws errors for Non OpenAI providers
response = completion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=False,
**all_kwargs,
)
message_dict = response["choices"][0]["message"]
message = from_litellm_message(message_dict)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None:
all_kwargs.pop(
"max_tokens"
) # don't send max_tokens == None, this throws errors for Non OpenAI providers
def gen() -> ChatResponseGen:
content = ""
tool_calls: List[dict] = []
for response in completion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=True,
**all_kwargs,
):
delta = response["choices"][0]["delta"]
role = delta.get("role") or MessageRole.ASSISTANT
content_delta = delta.get("content", "") or ""
content += content_delta
# Handle tool_calls delta
tool_call_delta = delta.get("tool_calls", None)
if tool_call_delta is not None and len(tool_call_delta) > 0:
# Pass the entire list of tool call deltas
tool_calls = update_tool_calls(tool_calls, tool_call_delta)
additional_kwargs = {}
if tool_calls:
additional_kwargs["tool_calls"] = tool_calls
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
raise NotImplementedError("litellm assumes all llms are chat llms.")
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
raise NotImplementedError("litellm assumes all llms are chat llms.")
def _get_max_token_for_prompt(self, prompt: str) -> int:
try:
import tiktoken
except ImportError:
raise ImportError(
"Please install tiktoken to use the max_tokens=None feature."
)
context_window = self.metadata.context_window
try:
encoding = tiktoken.encoding_for_model(self._get_model_name())
except KeyError:
encoding = encoding = tiktoken.get_encoding(
"cl100k_base"
) # default to using cl10k_base
tokens = encoding.encode(prompt)
max_token = context_window - len(tokens)
if max_token <= 0:
raise ValueError(
f"The prompt is too long for the model. "
f"Please use a prompt that is less than {context_window} tokens."
)
return max_token
def _get_response_token_counts(self, raw_response: Any) -> dict:
"""Get the token usage reported by the response."""
if not isinstance(raw_response, dict):
return {}
usage = raw_response.get("usage", {})
return {
"prompt_tokens": usage.get("prompt_tokens", 0),
"completion_tokens": usage.get("completion_tokens", 0),
"total_tokens": usage.get("total_tokens", 0),
}
# ===== Async Endpoints =====
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
achat_fn: Callable[..., Awaitable[ChatResponse]]
if self._is_chat_model:
achat_fn = self._achat
else:
achat_fn = acompletion_to_chat_decorator(self._acomplete)
return await achat_fn(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
astream_chat_fn: Callable[..., Awaitable[ChatResponseAsyncGen]]
if self._is_chat_model:
astream_chat_fn = self._astream_chat
else:
astream_chat_fn = astream_completion_to_chat_decorator(
self._astream_complete
)
return await astream_chat_fn(messages, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if self._is_chat_model:
acomplete_fn = achat_to_completion_decorator(self._achat)
else:
acomplete_fn = self._acomplete
return await acomplete_fn(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
if self._is_chat_model:
astream_complete_fn = astream_chat_to_completion_decorator(
self._astream_chat
)
else:
astream_complete_fn = self._astream_complete
return await astream_complete_fn(prompt, **kwargs)
async def _achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
response = await acompletion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=False,
**all_kwargs,
)
message_dict = response["choices"][0]["message"]
message = from_litellm_message(message_dict)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
async def _astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
async def gen() -> ChatResponseAsyncGen:
content = ""
tool_calls: List[dict] = []
async for response in await acompletion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=True,
**all_kwargs,
):
delta = response["choices"][0]["delta"]
role = delta.get("role") or MessageRole.ASSISTANT
content_delta = delta.get("content", "") or ""
content += content_delta
# Handle tool_calls delta
tool_call_delta = delta.get("tool_calls", None)
if tool_call_delta is not None and len(tool_call_delta) > 0:
# Pass the entire list of tool call deltas
tool_calls = update_tool_calls(tool_calls, tool_call_delta)
additional_kwargs = {}
if tool_calls:
additional_kwargs["tool_calls"] = tool_calls
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
raise NotImplementedError("litellm assumes all llms are chat llms.")
async def _astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError("litellm assumes all llms are chat llms.")
|
LiteLLM
|
python
|
realpython__materials
|
oop-in-java-vs-python/car.py
|
{
"start": 334,
"end": 521
}
|
class ____:
"""The Device class defines objects which have a battery."""
def __init__(self):
"""Define the base voltage for our device."""
self._voltage = 12
|
Device
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1023269,
"end": 1024033
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateEnterpriseMembersCanMakePurchasesSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "enterprise", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise")
"""The enterprise with the updated members can make purchases
setting.
"""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the members can make
purchases setting.
"""
|
UpdateEnterpriseMembersCanMakePurchasesSettingPayload
|
python
|
tiangolo__fastapi
|
docs_src/handling_errors/tutorial003.py
|
{
"start": 82,
"end": 626
}
|
class ____(Exception):
def __init__(self, name: str):
self.name = name
app = FastAPI()
@app.exception_handler(UnicornException)
async def unicorn_exception_handler(request: Request, exc: UnicornException):
return JSONResponse(
status_code=418,
content={"message": f"Oops! {exc.name} did something. There goes a rainbow..."},
)
@app.get("/unicorns/{name}")
async def read_unicorn(name: str):
if name == "yolo":
raise UnicornException(name=name)
return {"unicorn_name": name}
|
UnicornException
|
python
|
django__django
|
tests/middleware/tests.py
|
{
"start": 16823,
"end": 21023
}
|
class ____(SimpleTestCase):
rf = RequestFactory()
def setUp(self):
self.req = self.rf.get("/regular_url/that/does/not/exist")
def get_response(self, req):
return self.client.get(req.path)
def test_404_error_reporting(self):
self.req.META["HTTP_REFERER"] = "/another/url/"
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
self.assertIn("Broken", mail.outbox[0].subject)
def test_404_error_reporting_no_referer(self):
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
def test_404_error_reporting_ignored_url(self):
self.req.path = self.req.path_info = "foo_url/that/does/not/exist"
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
def test_custom_request_checker(self):
class SubclassedMiddleware(BrokenLinkEmailsMiddleware):
ignored_user_agent_patterns = (
re.compile(r"Spider.*"),
re.compile(r"Robot.*"),
)
def is_ignorable_request(self, request, uri, domain, referer):
"""Check user-agent in addition to normal checks."""
if super().is_ignorable_request(request, uri, domain, referer):
return True
user_agent = request.META["HTTP_USER_AGENT"]
return any(
pattern.search(user_agent)
for pattern in self.ignored_user_agent_patterns
)
self.req.META["HTTP_REFERER"] = "/another/url/"
self.req.META["HTTP_USER_AGENT"] = "Spider machine 3.4"
SubclassedMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
self.req.META["HTTP_USER_AGENT"] = "My user agent"
SubclassedMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
def test_referer_equal_to_requested_url(self):
"""
Some bots set the referer to the current URL to avoid being blocked by
an referer check (#25302).
"""
self.req.META["HTTP_REFERER"] = self.req.path
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
# URL with scheme and domain should also be ignored
self.req.META["HTTP_REFERER"] = "http://testserver%s" % self.req.path
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
# URL with a different scheme should be ignored as well because bots
# tend to use http:// in referers even when browsing HTTPS websites.
self.req.META["HTTP_X_PROTO"] = "https"
self.req.META["SERVER_PORT"] = 443
with self.settings(SECURE_PROXY_SSL_HEADER=("HTTP_X_PROTO", "https")):
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
def test_referer_equal_to_requested_url_on_another_domain(self):
self.req.META["HTTP_REFERER"] = "http://anotherserver%s" % self.req.path
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
@override_settings(APPEND_SLASH=True)
def test_referer_equal_to_requested_url_without_trailing_slash_with_append_slash(
self,
):
self.req.path = self.req.path_info = "/regular_url/that/does/not/exist/"
self.req.META["HTTP_REFERER"] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 0)
@override_settings(APPEND_SLASH=False)
def test_referer_equal_to_requested_url_without_trailing_slash_with_no_append_slash(
self,
):
self.req.path = self.req.path_info = "/regular_url/that/does/not/exist/"
self.req.META["HTTP_REFERER"] = self.req.path_info[:-1]
BrokenLinkEmailsMiddleware(self.get_response)(self.req)
self.assertEqual(len(mail.outbox), 1)
@override_settings(ROOT_URLCONF="middleware.cond_get_urls")
|
BrokenLinkEmailsMiddlewareTest
|
python
|
getsentry__sentry
|
src/sentry/management/commands/generate_controlsilo_urls.py
|
{
"start": 604,
"end": 1495
}
|
class ____:
callable: Callable
pattern: str
name: str | None
@property
def url_name(self) -> str:
return self.name or ""
def describe_pattern(pattern):
return str(pattern.pattern)
# Matches (?P<name>[pattern]) style expressions.
named_group_matcher = re.compile(r"\(\?P(<\w+>)([^\)]+)\)")
def simplify_regex(pattern: str) -> str:
"""
Convert python regex named capture groups into
simple patterns that will work with our javascript
code.
"""
pattern = pattern.replace("/^", "/")
named_groups = [(m.start(0), m.end(0)) for m in named_group_matcher.finditer(pattern)]
updated = pattern
# Reverse the list so we don't corrupt byte offsets when replacing values
for start, end in reversed(named_groups):
updated = updated[0:start] + "[^/]+" + updated[end:]
return updated.replace("\\", "\\\\")
|
PatternInfo
|
python
|
fluentpython__example-code
|
16-coroutine/taxi_sim0.py
|
{
"start": 2297,
"end": 10112
}
|
class ____:
def __init__(self, procs_map):
self.events = queue.PriorityQueue()
self.procs = dict(procs_map)
def run(self, end_time): # <1>
"""Schedule and display events until time is up"""
# schedule the first event for each cab
for _, proc in sorted(self.procs.items()): # <2>
first_event = next(proc) # <3>
self.events.put(first_event) # <4>
# main loop of the simulation
time = 0
while time < end_time: # <5>
if self.events.empty(): # <6>
print('*** end of events ***')
break
# get and display current event
current_event = self.events.get() # <7>
print('taxi:', current_event.proc, # <8>
current_event.proc * ' ', current_event)
# schedule next action for current proc
time = current_event.time # <9>
proc = self.procs[current_event.proc] # <10>
try:
next_event = proc.send(time) # <11>
except StopIteration:
del self.procs[current_event.proc] # <12>
else:
self.events.put(next_event) # <13>
else: # <14>
msg = '*** end of simulation time: {} events pending ***'
print(msg.format(self.events.qsize()))
# END TAXI_SIMULATOR
def main(end_time=DEFAULT_END_TIME, num_taxis=DEFAULT_NUMBER_OF_TAXIS,
seed=None):
"""Initialize random generator, build procs and run simulation"""
if seed is not None:
random.seed(seed) # get reproducible results
taxis = {i: taxi_process(i, (i+1)*2, i*DEPARTURE_INTERVAL)
for i in range(num_taxis)}
sim = Simulator(taxis)
sim.run(end_time)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Taxi fleet simulator.')
parser.add_argument('-e', '--end-time', type=int,
default=DEFAULT_END_TIME,
help='simulation end time; default = %s'
% DEFAULT_END_TIME)
parser.add_argument('-t', '--taxis', type=int,
default=DEFAULT_NUMBER_OF_TAXIS,
help='number of taxis running; default = %s'
% DEFAULT_NUMBER_OF_TAXIS)
parser.add_argument('-s', '--seed', type=int, default=None,
help='random generator seed (for testing)')
args = parser.parse_args()
main(args.end_time, args.taxis, args.seed)
"""
Notes for the ``taxi_process`` coroutine::
<1> `taxi_process` will be called once per taxi, creating a generator
object to represent its operations. `ident` is the number of the taxi
(eg. 0, 1, 2 in the sample run); `trips` is the number of trips this
taxi will make before going home; `start_time` is when the taxi
leaves the garage.
<2> The first `Event` yielded is `'leave garage'`. This suspends the
coroutine, and lets the simulation main loop proceed to the next
scheduled event. When it's time to reactivate this process, the main
loop will `send` the current simulation time, which is assigned to
`time`.
<3> This block will be repeated once for each trip.
<4> The ending time of the search for a passenger is computed.
<5> An `Event` signaling passenger pick up is yielded. The coroutine
pauses here. When the time comes to reactivate this coroutine,
the main loop will again `send` the current time.
<6> The ending time of the trip is computed, taking into account the
current `time`.
<7> An `Event` signaling passenger drop off is yielded. Coroutine
suspended again, waiting for the main loop to send the time of when
it's time to continue.
<8> The `for` loop ends after the given number of trips, and a final
`'going home'` event is yielded, to happen 1 minute after the current
time. The coroutine will suspend for the last time. When reactivated,
it will be sent the time from the simulation main loop, but here I
don't assign it to any variable because it will not be useful.
<9> When the coroutine falls off the end, the coroutine object raises
`StopIteration`.
Notes for the ``Simulator.run`` method::
<1> The simulation `end_time` is the only required argument for `run`.
<2> Use `sorted` to retrieve the `self.procs` items ordered by the
integer key; we don't care about the key, so assign it to `_`.
<3> `next(proc)` primes each coroutine by advancing it to the first
yield, so it's ready to be sent data. An `Event` is yielded.
<4> Add each event to the `self.events` `PriorityQueue`. The first
event for each taxi is `'leave garage'`, as seen in the sample run
(ex_taxi_process>>).
<5> Main loop of the simulation: run until the current `time` equals
or exceeds the `end_time`.
<6> The main loop may also exit if there are no pending events in the
queue.
<7> Get `Event` with the smallest `time` in the queue; this is the
`current_event`.
<8> Display the `Event`, identifying the taxi and adding indentation
according to the taxi id.
<9> Update the simulation time with the time of the `current_event`.
<10> Retrieve the coroutine for this taxi from the `self.procs`
dictionary.
<11> Send the `time` to the coroutine. The coroutine will yield the
`next_event` or raise `StopIteration` it's finished.
<12> If `StopIteration` was raised, delete the coroutine from the
`self.procs` dictionary.
<13> Otherwise, put the `next_event` in the queue.
<14> If the loop exits because the simulation time passed, display the
number of events pending (which may be zero by coincidence,
sometimes).
Sample run from the command line, seed=24, total elapsed time=160::
# BEGIN TAXI_SAMPLE_RUN
$ python3 taxi_sim.py -s 24 -e 160
taxi: 0 Event(time=0, proc=0, action='leave garage')
taxi: 0 Event(time=5, proc=0, action='pick up passenger')
taxi: 1 Event(time=5, proc=1, action='leave garage')
taxi: 1 Event(time=6, proc=1, action='pick up passenger')
taxi: 2 Event(time=10, proc=2, action='leave garage')
taxi: 2 Event(time=11, proc=2, action='pick up passenger')
taxi: 2 Event(time=23, proc=2, action='drop off passenger')
taxi: 0 Event(time=24, proc=0, action='drop off passenger')
taxi: 2 Event(time=24, proc=2, action='pick up passenger')
taxi: 2 Event(time=26, proc=2, action='drop off passenger')
taxi: 0 Event(time=30, proc=0, action='pick up passenger')
taxi: 2 Event(time=31, proc=2, action='pick up passenger')
taxi: 0 Event(time=43, proc=0, action='drop off passenger')
taxi: 0 Event(time=44, proc=0, action='going home')
taxi: 2 Event(time=46, proc=2, action='drop off passenger')
taxi: 2 Event(time=49, proc=2, action='pick up passenger')
taxi: 1 Event(time=70, proc=1, action='drop off passenger')
taxi: 2 Event(time=70, proc=2, action='drop off passenger')
taxi: 2 Event(time=71, proc=2, action='pick up passenger')
taxi: 2 Event(time=79, proc=2, action='drop off passenger')
taxi: 1 Event(time=88, proc=1, action='pick up passenger')
taxi: 2 Event(time=92, proc=2, action='pick up passenger')
taxi: 2 Event(time=98, proc=2, action='drop off passenger')
taxi: 2 Event(time=99, proc=2, action='going home')
taxi: 1 Event(time=102, proc=1, action='drop off passenger')
taxi: 1 Event(time=104, proc=1, action='pick up passenger')
taxi: 1 Event(time=135, proc=1, action='drop off passenger')
taxi: 1 Event(time=136, proc=1, action='pick up passenger')
taxi: 1 Event(time=151, proc=1, action='drop off passenger')
taxi: 1 Event(time=152, proc=1, action='going home')
*** end of events ***
# END TAXI_SAMPLE_RUN
"""
|
Simulator
|
python
|
econchick__interrogate
|
src/interrogate/visit.py
|
{
"start": 1441,
"end": 9301
}
|
class ____(ast.NodeVisitor):
"""NodeVisitor for a Python file to find docstrings.
:param str filename: filename to parse coverage.
:param config.InterrogateConfig config: configuration.
"""
def __init__(self, filename: str, config: InterrogateConfig):
self.filename = filename
self.config = config
self.stack: list[CovNode] = []
self.nodes: list[CovNode] = []
@staticmethod
def _has_doc(node: DocumentableNode) -> bool:
"""Return if node has docstrings."""
return (
ast.get_docstring(node) is not None
and ast.get_docstring(node).strip() != "" # type: ignore
)
def _visit_helper(self, node: DocumentableNode) -> None:
"""Recursively visit AST node for docstrings."""
if not hasattr(node, "name"):
node_name = os.path.basename(self.filename)
else:
node_name = node.name
parent = None
path = node_name
if self.stack:
parent = self.stack[-1]
parent_path = parent.path
if parent_path.endswith(".py"):
path = parent_path + ":" + node_name
else:
path = parent_path + "." + node_name
lineno = None
if hasattr(node, "lineno"):
lineno = node.lineno
node_type = type(node).__name__
cov_node = CovNode(
name=node_name,
path=path,
covered=self._has_doc(node),
level=len(self.stack),
node_type=node_type,
lineno=lineno,
is_nested_func=self._is_nested_func(parent, node_type),
is_nested_cls=self._is_nested_cls(parent, node_type),
parent=parent,
)
self.stack.append(cov_node)
self.nodes.append(cov_node)
self.generic_visit(node)
self.stack.pop()
def _is_nested_func(self, parent: CovNode | None, node_type: str) -> bool:
"""Is node a nested func/method of another func/method."""
if parent is None:
return False
# is it a nested function?
if parent.node_type == "FunctionDef" and node_type == "FunctionDef":
return True
return False
def _is_nested_cls(self, parent: CovNode | None, node_type: str) -> bool:
"""Is node a nested func/method of another func/method."""
if parent is None:
return False
# is it a nested class?
if (
parent.node_type in ("ClassDef", "FunctionDef")
and node_type == "ClassDef"
):
return True
return False
def _is_private(self, node: DocumentableFuncOrClass) -> bool:
"""Is node private (i.e. __MyClass, __my_func)."""
if node.name.endswith("__"):
return False
if not node.name.startswith("__"):
return False
return True
def _is_semiprivate(self, node: DocumentableFuncOrClass) -> bool:
"""Is node semiprivate (i.e. _MyClass, _my_func)."""
if node.name.endswith("__"):
return False
if node.name.startswith("__"):
return False
if not node.name.startswith("_"):
return False
return True
def _is_ignored_common(self, node: DocumentableFuncOrClass) -> bool:
"""Commonly-shared ignore checkers."""
is_private = self._is_private(node)
is_semiprivate = self._is_semiprivate(node)
if self.config.ignore_private and is_private:
return True
if self.config.ignore_semiprivate and is_semiprivate:
return True
if self.config.ignore_regex:
for regexp in self.config.ignore_regex:
regex_result = regexp.match(node.name)
if regex_result:
return True
return False
def _has_property_decorators(self, node: DocumentableFuncOrClass) -> bool:
"""Detect if node has property get/setter/deleter decorators."""
if not hasattr(node, "decorator_list"):
return False
for dec in node.decorator_list:
if hasattr(dec, "id"):
if dec.id == "property":
return True
if hasattr(dec, "attr"):
if dec.attr == "setter":
return True
if dec.attr == "deleter":
return True
return False
def _has_setters(self, node: DocumentableFuncOrClass) -> bool:
"""Detect if node has property get/setter decorators."""
if not hasattr(node, "decorator_list"):
return False
for dec in node.decorator_list:
if hasattr(dec, "attr"):
if dec.attr == "setter":
return True
return False
def _has_overload_decorator(self, node: DocumentableFuncOrClass) -> bool:
"""Detect if node has a typing.overload decorator."""
if not hasattr(node, "decorator_list"):
return False
for dec in node.decorator_list:
if (
hasattr(dec, "attr")
and hasattr(dec, "value")
and hasattr(dec.value, "id")
and dec.value.id == "typing"
and dec.attr == "overload"
):
# @typing.overload decorator
return True
if hasattr(dec, "id") and dec.id == "overload":
# @overload decorator
return True
return False
def _is_func_ignored(self, node: DocumentableFuncOrClass) -> bool:
"""Should the AST visitor ignore this func/method node."""
is_init = node.name == "__init__"
is_magic = all(
[
node.name.startswith("__"),
node.name.endswith("__"),
node.name != "__init__",
]
)
has_property_decorators = self._has_property_decorators(node)
has_setters = self._has_setters(node)
has_overload = self._has_overload_decorator(node)
if self.config.ignore_init_method and is_init:
return True
if self.config.ignore_magic and is_magic:
return True
if self.config.ignore_property_decorators and has_property_decorators:
return True
if self.config.ignore_property_setters and has_setters:
return True
if self.config.ignore_overloaded_functions and has_overload:
return True
return self._is_ignored_common(node)
def _is_class_ignored(self, node: DocumentableFuncOrClass) -> bool:
"""Should the AST visitor ignore this class node."""
return self._is_ignored_common(node)
def visit_Module(self, node: DocumentableNode) -> None:
"""Visit module for docstrings.
:param ast.Module node: a module AST node.
"""
self._visit_helper(node)
def visit_ClassDef(self, node: DocumentableFuncOrClass) -> None:
"""Visit class for docstrings.
:param ast.ClassDef node: a class AST node.
"""
if self._is_class_ignored(node):
return
self._visit_helper(node)
def visit_FunctionDef(self, node: DocumentableFuncOrClass) -> None:
"""Visit function or method for docstrings.
:param ast.FunctionDef node: a function/method AST node.
"""
if self._is_func_ignored(node):
return
self._visit_helper(node)
def visit_AsyncFunctionDef(self, node: DocumentableFuncOrClass) -> None:
"""Visit async function or method for docstrings.
:param ast.AsyncFunctionDef node: a async function/method AST node.
"""
if self._is_func_ignored(node):
return
self._visit_helper(node)
|
CoverageVisitor
|
python
|
facebook__pyre-check
|
client/language_server/protocol.py
|
{
"start": 1086,
"end": 1217
}
|
class ____(json_rpc.JSONRPCException):
@override
def error_code(self) -> int:
return -32002
|
ServerNotInitializedError
|
python
|
huggingface__transformers
|
src/transformers/models/wav2vec2_conformer/modular_wav2vec2_conformer.py
|
{
"start": 27658,
"end": 28501
}
|
class ____(Wav2Vec2ConformerPreTrainedModel, Wav2Vec2Model):
def __init__(self, config: Wav2Vec2ConformerConfig):
Wav2Vec2ConformerPreTrainedModel.__init__(self, config)
self.config = config
self.feature_extractor = Wav2Vec2ConformerFeatureEncoder(config)
self.feature_projection = Wav2Vec2ConformerFeatureProjection(config)
# model only needs masking vector if mask prob is > 0.0
if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0:
self.masked_spec_embed = nn.Parameter(torch.Tensor(config.hidden_size).uniform_())
self.encoder = Wav2Vec2ConformerEncoder(config)
self.adapter = Wav2Vec2ConformerAdapter(config) if config.add_adapter else None
# Initialize weights and apply final processing
self.post_init()
|
Wav2Vec2ConformerModel
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/runtime/websocket_session_manager_test.py
|
{
"start": 924,
"end": 1833
}
|
class ____(SessionStorage):
"""A simple SessionStorage implementation used for testing.
Essentially just a thin wrapper around a dict. This class exists so that we don't
accidentally have our WebsocketSessionManager tests rely on a real SessionStorage
implementation.
"""
def __init__(self):
self._cache = {}
def get(self, session_id):
return self._cache.get(session_id, None)
def save(self, session_info):
self._cache[session_info.session.id] = session_info
def delete(self, session_id):
del self._cache[session_id]
def list(self):
return list(self._cache.values())
@patch(
"streamlit.runtime.app_session.asyncio.get_running_loop",
new=MagicMock(),
)
@patch("streamlit.runtime.app_session.LocalSourcesWatcher", new=MagicMock())
@patch("streamlit.runtime.app_session.ScriptRunner", new=MagicMock())
|
MockSessionStorage
|
python
|
getsentry__sentry
|
src/sentry_plugins/sessionstack/plugin.py
|
{
"start": 6575,
"end": 6645
}
|
class ____(ContextType):
type = "sessionstack"
|
SessionStackContextType
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-sqlalchemy/prefect_sqlalchemy/credentials.py
|
{
"start": 4970,
"end": 7359
}
|
class ____(BaseModel):
"""
Parameters to use to create a SQLAlchemy engine URL.
Attributes:
driver: The driver name to use.
database: The name of the database to use.
username: The user name used to authenticate.
password: The password used to authenticate.
host: The host address of the database.
port: The port to connect to the database.
query: A dictionary of string keys to string values to be passed to the dialect
and/or the DBAPI upon connect.
"""
driver: Union[AsyncDriver, SyncDriver, str] = Field(
default=..., description="The driver name to use."
)
database: Optional[str] = Field(
default=None, description="The name of the database to use."
)
username: Optional[str] = Field(
default=None, description="The user name used to authenticate."
)
password: Optional[SecretStr] = Field(
default=None, description="The password used to authenticate."
)
host: Optional[str] = Field(
default=None, description="The host address of the database."
)
port: Optional[int] = Field(
default=None, description="The port to connect to the database."
)
query: Optional[Dict[str, str]] = Field(
default=None,
description=(
"A dictionary of string keys to string values to be passed to the dialect "
"and/or the DBAPI upon connect. To specify non-string parameters to a "
"Python DBAPI directly, use connect_args."
),
)
def create_url(self) -> URL:
"""
Create a fully formed connection URL.
Returns:
The SQLAlchemy engine URL.
"""
driver = self.driver
drivername = driver.value if isinstance(driver, Enum) else driver
password = self.password.get_secret_value() if self.password else None
url_params = dict(
drivername=drivername,
username=self.username,
password=password,
database=self.database,
host=self.host,
port=self.port,
query=self.query,
)
return URL.create(
**{
url_key: url_param
for url_key, url_param in url_params.items()
if url_param is not None
}
)
|
ConnectionComponents
|
python
|
readthedocs__readthedocs.org
|
readthedocs/integrations/migrations/0013_set_timestamp_fields_as_no_null.py
|
{
"start": 184,
"end": 1071
}
|
class ____(migrations.Migration):
safe = Safe.always()
dependencies = [
("integrations", "0012_migrate_timestamp_fields"),
]
operations = [
migrations.AlterField(
model_name="integration",
name="created",
field=django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True,
default=django.utils.timezone.now,
verbose_name="created",
),
preserve_default=False,
),
migrations.AlterField(
model_name="integration",
name="modified",
field=django_extensions.db.fields.ModificationDateTimeField(
auto_now=True,
default=django.utils.timezone.now,
verbose_name="modified",
),
preserve_default=False,
),
]
|
Migration
|
python
|
sympy__sympy
|
sympy/integrals/manualintegrate.py
|
{
"start": 15523,
"end": 15875
}
|
class ____(OrthogonalPolyRule):
a: Expr
b: Expr
def eval(self) -> Expr:
n, a, b, x = self.n, self.a, self.b, self.variable
return Piecewise(
(2*jacobi(n + 1, a - 1, b - 1, x)/(n + a + b), Ne(n + a + b, 0)),
(x, Eq(n, 0)),
((a + b + 2)*x**2/4 + (a - b)*x/2, Eq(n, 1)))
@dataclass
|
JacobiRule
|
python
|
pytorch__pytorch
|
torch/distributed/pipelining/schedules.py
|
{
"start": 55636,
"end": 73793
}
|
class ____(_PipelineSchedule):
"""
Base class for multi-stage schedules.
Implements the `step` method.
Gradients are scaled by num_microbatches depending on the `scale_grads` argument, defaulting to True. This setting
should match the configuration of your loss_fn, which may either average losses (scale_grads=True)
or sum losses (scale_grads=False).
"""
def __init__(
self,
stages: list[_PipelineStageBase],
n_microbatches: int,
loss_fn: Callable | None = None,
args_chunk_spec: tuple[TensorChunkSpec, ...] | None = None,
kwargs_chunk_spec: dict[str, TensorChunkSpec] | None = None,
output_merge_spec: dict[str, Any] | tuple[Any] | None = None,
use_full_backward: bool | None = None,
scale_grads: bool = True,
backward_requires_autograd: bool = True,
):
# Init parent
super().__init__(
n_microbatches=n_microbatches,
loss_fn=loss_fn,
args_chunk_spec=args_chunk_spec,
kwargs_chunk_spec=kwargs_chunk_spec,
output_merge_spec=output_merge_spec,
scale_grads=scale_grads,
)
# Self attributes
self._stages = stages
self._num_stages = stages[0].num_stages
self.pp_group_size = stages[0].group_size
self.rank = stages[0].group_rank
# Set the pipeline stage states
self.stage_index_to_group_rank = generate_stage_to_rank_mapping(
self.pp_group_size, self._num_stages
)
for stage in self._stages:
stage.stage_index_to_group_rank = self.stage_index_to_group_rank
self._stages_forward_initialized = False
self._stages_backward_initialized = False
# avoid putting a reference to 'self' inside the lambda, it creates a ref cycle
has_loss: bool = self._loss_fn is not None
self._should_compute_loss = lambda stage: stage.is_last and has_loss
# This will be set during init of derived schedules
self.pipeline_order: dict[int, list[_Action | None]] = {}
# When using a custom backward function, we may or may not need autograd to be used
# for the backward pass. This flag is used to determine whether or torch.is_grad_enabled()
# check should be performed before the step function.
self._backward_requires_autograd = backward_requires_autograd
if use_full_backward is not None:
logger.warning(
"Deprecation warning: 'use_full_backward' is no longer supported. "
"Simply stop passing it, and everything should still work fine."
)
def _initialize_stages(self, args: tuple[Any, ...], kwargs):
if not self._stages_forward_initialized:
# Prepare the communication needed for the pipeline schedule execution
# This is needed because during execution we always perform a series of batch P2P ops
# The first call of the batched P2P needs to involve the global group
all_ops: list[dist.P2POp] = []
for stage in self._stages:
all_ops.extend(stage._get_init_p2p_neighbors_ops())
_wait_batch_p2p(_batch_p2p(all_ops))
# may be 'none' value (if this stage sends its output shapes to the next stage via P2P)
# or real value (if this stage and next stage are on the same device)
next_stage_args: tuple[Any, ...] = tuple()
for stage in self._stages:
if stage.is_first:
next_stage_args = stage._prepare_forward_infra(
self._n_microbatches, args, kwargs
)
else:
next_stage_args = stage._prepare_forward_infra(
self._n_microbatches, next_stage_args, kwargs
)
self._stages_forward_initialized = True
if self._has_backward and not self._stages_backward_initialized:
for stage in self._stages:
stage._prepare_backward_infra(self._n_microbatches)
self._stages_backward_initialized = True
def _validate_and_set_stage_mapping(
self, actions: dict[int, list[_Action | None]]
) -> None:
"""
Allocates the stage index to rank mapping which is needed for communication
"""
self.stage_index_to_group_rank = _validate_schedule(
actions,
self.pp_group_size,
self._num_stages,
self._n_microbatches,
)
for stage in self._stages:
stage.stage_index_to_group_rank = self.stage_index_to_group_rank
def _dump_csv(self, filename):
"""Dump a CSV representation of the schedule into a file with the provided filename."""
with open(filename, "w", newline="") as csvfile:
writer = csv.writer(csvfile)
for rank in self.pipeline_order:
writer.writerow(self.pipeline_order[rank])
def _load_csv(self, filename, format="compute_only"):
"""Load a CSV representation of the schedule from a file with the provided filename.
This API will most likely get renamed/refactored so is marked as internal for now.
format must be "compute_only" for PipelineScheduleMulti.
"""
assert format == "compute_only"
with open(filename, newline="") as csvfile:
reader = csv.reader(csvfile)
for rank, row in enumerate(reader):
self.pipeline_order[rank] = [_Action.from_str(s) for s in row]
# Validates the order of the pipeline actions and infers the stage_to_rank_mapping.
# This will overwrite the default stage_to_rank_mapping created in the constructor
self._validate_and_set_stage_mapping(self.pipeline_order)
def step(
self,
*args,
target=None,
losses: list | None = None,
return_outputs: bool = True,
**kwargs,
):
"""
Run one iteration of the pipeline schedule with *whole-batch* input.
Will chunk the input into microbatches automatically, and go through the
microbatches according to the schedule implementation.
args: positional arguments to the model (as in non-pipeline case).
kwargs: keyword arguments to the model (as in non-pipeline case).
target: target for the loss function.
losses: a list to store the losses for each microbatch.
return_outputs: whether to return the outputs from the last stage.
"""
if (
self._has_backward
and self._backward_requires_autograd
and not torch.is_grad_enabled()
):
raise RuntimeError(
"step() requires gradients to be enabled for backward computation; "
"it should not be used under torch.no_grad() context. "
"Please call eval() instead."
)
# Set the same has_backward flag for stage object
for stage in self._stages:
stage.has_backward = self._has_backward
# Clean per iteration
for stage in self._stages:
stage.clear_runtime_states()
# Split inputs into microbatches
args_split, kwargs_split = self._split_inputs(args, kwargs)
# Split target into microbatches
if target is not None:
targets_split = list(torch.tensor_split(target, self._n_microbatches))
else:
targets_split = None
# Run microbatches
self._step_microbatches(
args_split, kwargs_split, targets_split, losses, return_outputs
)
# Return merged results per original format
for stage in self._stages:
if stage.is_last and return_outputs:
return self._merge_outputs(stage.output_chunks)
# Does not contain the last stage or we do not return output chunks
return None
def _step_microbatches(
self,
arg_mbs: list | None = None,
kwarg_mbs: list | None = None,
target_mbs: list | None = None,
losses: list | None = None,
return_outputs: bool = True,
):
"""
Operate on the microbatches for looped schedules (multiple stages on each rank).
TODO: Does not use sorted_batch_isend_irecv(). As a result, this schedule does
not support models with skip connections.
"""
arg_mbs, kwarg_mbs = self._check_inputs(arg_mbs, kwarg_mbs, target_mbs, losses)
self._initialize_stages(arg_mbs[0], kwarg_mbs[0])
# Based on the plan in Step 1 created in __init__:
# 2. Perform communication based on the pipeline_order
stage_index_to_stage: dict[int, _PipelineStageBase] = {
stage.stage_index: stage for stage in self._stages
}
# determine prev_rank and next_rank based on which ranks are next to
# the stages in the pipeline_order
all_prev_ranks: set[int] = set()
all_next_ranks: set[int] = set()
for stage_index in stage_index_to_stage:
# TODO: assumption that stages only communicate from distances of +1/-1 (no skip connections)
if stage_index > 0:
all_prev_ranks.add(self.stage_index_to_group_rank[stage_index - 1])
if stage_index < self._num_stages - 1:
all_next_ranks.add(self.stage_index_to_group_rank[stage_index + 1])
# count either full_backward or backward_weight together, to determine when to sync DP grads
backward_counter: Counter[int] = Counter()
for time_step, action in enumerate(self.pipeline_order[self.rank]):
try:
ops: list[dist.P2POp] = []
if action is not None:
computation_type = action.computation_type
mb_index = action.microbatch_index
stage_index = action.stage_index
assert mb_index is not None, (
"All currently supported action types require valid microbatch_index"
)
if computation_type == _ComputationType.FORWARD:
# perform forward computation
stage = stage_index_to_stage[stage_index]
output = stage.forward_one_chunk(
mb_index,
arg_mbs[mb_index],
kwarg_mbs[mb_index],
save_forward_output=return_outputs,
)
self._maybe_compute_loss(stage, output, target_mbs, mb_index)
ops.extend(stage.get_fwd_send_ops(mb_index))
elif computation_type == _ComputationType.FULL_BACKWARD:
# perform backward computation
stage = stage_index_to_stage[stage_index]
loss = self._maybe_get_loss(stage, mb_index)
backward_counter[stage_index] += 1
last_backward = (
backward_counter[stage_index] == self._n_microbatches
)
grad_scale_factor = (
self._n_microbatches if self.scale_grads else 1
)
stage.backward_one_chunk(
mb_index,
loss=loss,
full_backward=True,
last_backward=last_backward,
)
if last_backward:
stage.scale_grads(grad_scale_factor)
ops.extend(stage.get_bwd_send_ops(mb_index))
elif computation_type == _ComputationType.BACKWARD_INPUT:
# perform backward computation
stage = stage_index_to_stage[stage_index]
loss = self._maybe_get_loss(stage, mb_index)
stage.backward_one_chunk(
mb_index,
loss=loss,
full_backward=False,
last_backward=False,
)
ops.extend(stage.get_bwd_send_ops(mb_index))
elif computation_type == _ComputationType.BACKWARD_WEIGHT:
# perform weight update
stage = stage_index_to_stage[stage_index]
backward_counter[stage_index] += 1
last_backward = (
backward_counter[stage_index] == self._n_microbatches
)
grad_scale_factor = (
self._n_microbatches if self.scale_grads else 1
)
stage.backward_weight_one_chunk(
mb_index,
last_backward=last_backward,
)
if last_backward:
stage.scale_grads(grad_scale_factor)
else:
raise ValueError(f"Unknown computation type {computation_type}")
# Look at the neighboring ranks for this current timestep and determine whether
# this current rank needs to do any recv communication
for prev_rank in all_prev_ranks:
prev_rank_ops = self.pipeline_order[prev_rank]
prev_rank_action = None
if time_step < len(prev_rank_ops):
prev_rank_action = prev_rank_ops[time_step]
if prev_rank_action is not None:
computation_type = prev_rank_action.computation_type
mb_index = prev_rank_action.microbatch_index
stage_index = prev_rank_action.stage_index
assert mb_index is not None, (
"All currently supported action types require valid microbatch_index"
)
# Only handle sends for the forward from a previous rank
if computation_type == _ComputationType.FORWARD:
# If not the last stage, then receive fwd activations
if stage_index + 1 in stage_index_to_stage:
# TODO: We are assuming that stage will always receive from stage-1
# however that is not necessarily true of get_fwd_recv_ops
stage = stage_index_to_stage[stage_index + 1]
ops.extend(stage.get_fwd_recv_ops(mb_index))
elif computation_type in (
FULL_BACKWARD,
BACKWARD_INPUT,
BACKWARD_WEIGHT,
):
# Previous rank doing backward has no influence for the current rank forward recv
pass
else:
raise ValueError(
f"Unknown computation type {computation_type}"
)
for next_rank in all_next_ranks:
next_rank_ops = self.pipeline_order[next_rank]
next_rank_action = None
if time_step < len(next_rank_ops):
next_rank_action = next_rank_ops[time_step]
if next_rank_action is not None:
computation_type = next_rank_action.computation_type
mb_index = next_rank_action.microbatch_index
stage_index = next_rank_action.stage_index
assert mb_index is not None, (
"All currently supported action types require valid microbatch_index"
)
# Only handle receives for the backwards from a next rank
if computation_type in (FORWARD, BACKWARD_WEIGHT):
# Next rank doing forward or weight update has no influence for the current rank backward recv
pass
elif computation_type in (BACKWARD_INPUT, FULL_BACKWARD):
# If not the first stage, then receive bwd gradients
if stage_index - 1 in stage_index_to_stage:
# TODO: We are assuming that stage will always receive from stage+1
# however that is not necessarily true of get_bwd_recv_ops
stage = stage_index_to_stage[stage_index - 1]
ops.extend(stage.get_bwd_recv_ops(mb_index))
else:
raise ValueError(
f"Unknown computation type {computation_type}"
)
# do the communication
_wait_batch_p2p(_batch_p2p(ops))
except Exception as e:
logger.error( # noqa: G200
"[Rank %s] pipeline schedule %s caught the following exception '%s' \
at time_step %s when running action %s",
self.rank,
self.__class__.__name__,
str(e),
time_step,
action,
)
logger.error(
"%s",
_format_pipeline_order(
self.pipeline_order, error_step_number=time_step
),
)
raise e
# Return losses if there is a container passed in
self._update_losses(self._stages, losses)
|
PipelineScheduleMulti
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/file_manager.py
|
{
"start": 748,
"end": 1507
}
|
class ____(ABC):
"""A reference to a file as manipulated by a FileManager.
Subclasses may handle files that are resident on the local file system, in an object store, or
in any arbitrary place where a file can be stored.
This exists to handle the very common case where you wish to write a computation that reads,
transforms, and writes files, but where you also want the same code to work in local development
as well as on a cluster where the files will be stored in a globally available object store
such as S3.
"""
@public
@property
@abstractmethod
def path_desc(self) -> str:
"""A representation of the file path for display purposes only."""
raise NotImplementedError()
@public
|
FileHandle
|
python
|
django__django
|
tests/migrations/test_migrations_squashed_partially_applied/0001_initial.py
|
{
"start": 43,
"end": 967
}
|
class ____(migrations.Migration):
operations = [
migrations.CreateModel(
name="MyModel1",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
],
),
migrations.CreateModel(
name="MyModel2",
fields=[
(
"id",
models.BigAutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("field_1", models.IntegerField()),
],
),
]
|
Migration
|
python
|
pypa__warehouse
|
warehouse/organizations/services.py
|
{
"start": 1230,
"end": 28708
}
|
class ____:
def __init__(self, db_session):
self.db = db_session
def get_organization(self, organization_id):
"""
Return the organization object that represents the given organizationid,
or None if there is no organization for that ID.
"""
return self.db.get(Organization, organization_id)
def get_organization_application(self, organization_application_id):
"""
Return the organization application object that represents the given
organization_application_id, or None if there is no application for that ID.
"""
return self.db.get(OrganizationApplication, organization_application_id)
def get_organization_by_name(self, name):
"""
Return the organization object corresponding with the given organization name,
or None if there is no organization with that name.
"""
organization_id = self.find_organizationid(name)
return (
None if organization_id is None else self.get_organization(organization_id)
)
def get_organization_applications_by_name(
self, name, submitted_by=None, undecided=False
):
"""
Return the organization object corresponding with the given organization name,
or None if there is no organization with that name.
"""
normalized_name = func.normalize_pep426_name(name)
query = self.db.query(OrganizationApplication).filter(
OrganizationApplication.normalized_name == normalized_name
)
if submitted_by is not None:
query = query.filter(OrganizationApplication.submitted_by == submitted_by)
if undecided is True:
query = query.filter(
OrganizationApplication.status
== (OrganizationApplicationStatus.Submitted)
)
return query.order_by(OrganizationApplication.normalized_name).all()
def find_organizationid(self, name):
"""
Find the unique organization identifier for the given normalized name or None
if there is no organization with the given name.
"""
normalized_name = func.normalize_pep426_name(name)
try:
(organization_id,) = (
self.db.query(OrganizationNameCatalog.organization_id)
.filter(OrganizationNameCatalog.normalized_name == normalized_name)
.one()
)
except NoResultFound:
return
return organization_id
def get_organizations(self):
"""
Return a list of all organization objects, or None if there are none.
"""
return self.db.scalars(select(Organization).order_by(Organization.name)).all()
def get_organizations_by_user(self, user_id):
"""
Return a list of all organization objects associated with a given user id.
"""
return (
self.db.query(Organization)
.join(OrganizationRole, OrganizationRole.organization_id == Organization.id)
.filter(OrganizationRole.user_id == user_id)
.order_by(Organization.name)
.all()
)
def add_organization_application(
self,
name,
display_name,
orgtype,
link_url,
description,
usage,
membership_size,
submitted_by,
):
"""
Accepts organization application details, creates an OrganizationApplication
with those attributes.
"""
organization_application = OrganizationApplication(
name=name,
display_name=display_name,
orgtype=orgtype,
link_url=link_url,
description=description,
usage=usage,
membership_size=membership_size,
submitted_by=submitted_by,
)
self.db.add(organization_application)
return organization_application
def approve_organization_application(self, organization_application_id, request):
"""
Performs operations necessary to approve an OrganizationApplication
"""
organization_application = self.get_organization_application(
organization_application_id
)
organization = Organization(
name=organization_application.name,
display_name=organization_application.display_name,
orgtype=organization_application.orgtype,
link_url=organization_application.link_url,
description=organization_application.description,
is_active=True,
)
self.db.add(organization)
organization.record_event(
tag=EventTag.Organization.OrganizationCreate,
request=request,
additional={
"created_by_user_id": str(organization_application.submitted_by.id),
"redact_ip": True,
},
)
self.db.flush() # flush the db now so organization.id is available
organization_application.status = OrganizationApplicationStatus.Approved
organization_application.organization = organization
self.add_catalog_entry(organization.id)
organization.record_event(
tag=EventTag.Organization.CatalogEntryAdd,
request=request,
additional={
"submitted_by_user_id": str(organization_application.submitted_by.id),
"redact_ip": True,
},
)
self.add_organization_role(
organization.id,
organization_application.submitted_by.id,
OrganizationRoleType.Owner,
)
organization.record_event(
tag=EventTag.Organization.OrganizationRoleAdd,
request=request,
additional={
"submitted_by_user_id": str(organization_application.submitted_by.id),
"role_name": "Owner",
"target_user_id": str(organization_application.submitted_by.id),
"redact_ip": True,
},
)
organization_application.submitted_by.record_event(
tag=EventTag.Account.OrganizationRoleAdd,
request=request,
additional={
"submitted_by_user_id": str(organization_application.submitted_by.id),
"organization_name": organization.name,
"role_name": "Owner",
"redact_ip": True,
},
)
organization.record_event(
tag=EventTag.Organization.OrganizationApprove,
request=request,
additional={"approved_by_user_id": str(request.user.id)},
)
message = request.params.get("message", "")
send_new_organization_approved_email(
request,
organization_application.submitted_by,
organization_name=organization.name,
message=message,
)
for competing_application in self.get_organization_applications_by_name(
organization_application.name, undecided=True
):
self.decline_organization_application(competing_application.id, request)
return organization
def defer_organization_application(self, organization_application_id, request):
"""
Performs operations necessary to defer an OrganizationApplication
"""
organization_application = self.get_organization_application(
organization_application_id
)
organization_application.status = OrganizationApplicationStatus.Deferred
return organization_application
def request_more_information(self, organization_application_id, request):
"""
Performs operations necessary to request more information of an
OrganizationApplication
"""
organization_application = self.get_organization_application(
organization_application_id
)
organization_application.status = (
OrganizationApplicationStatus.MoreInformationNeeded
)
message = request.params.get("message", "")
if not message:
raise ValueError
organization_application.record_observation(
request=request,
actor=request.user,
summary="Organization request needs more information",
kind=ObservationKind.InformationRequest,
payload={"message": message},
)
send_new_organization_moreinformationneeded_email(
request,
organization_application.submitted_by,
organization_name=organization_application.name,
organization_application_id=organization_application.id,
message=message,
)
return organization_application
def decline_organization_application(self, organization_application_id, request):
"""
Performs operations necessary to decline an OrganizationApplication
"""
organization_application = self.get_organization_application(
organization_application_id
)
organization_application.status = OrganizationApplicationStatus.Declined
message = request.params.get("message", "")
send_new_organization_declined_email(
request,
organization_application.submitted_by,
organization_name=organization_application.name,
message=message,
)
return organization_application
def add_catalog_entry(self, organization_id):
"""
Adds the organization name to the organization name catalog
"""
organization = self.get_organization(organization_id)
catalog_entry = OrganizationNameCatalog(
normalized_name=organization.normalized_name,
organization_id=organization.id,
)
try:
# Check if this organization name has already been used
catalog_entry = (
self.db.query(OrganizationNameCatalog)
.filter(
OrganizationNameCatalog.normalized_name
== organization.normalized_name,
)
.one()
)
if catalog_entry.organization_id != organization.id:
raise ValueError(
f'Organization name "{organization.normalized_name}" has been used'
)
except NoResultFound:
self.db.add(catalog_entry)
return catalog_entry
def get_organization_role(self, organization_role_id):
"""
Return the org role object that represents the given org role id,
or None if there is no organization role for that ID.
"""
return self.db.get(OrganizationRole, organization_role_id)
def get_organization_role_by_user(self, organization_id, user_id):
"""
Gets an organization role for a specified org and user
"""
try:
organization_role = (
self.db.query(OrganizationRole)
.filter(
OrganizationRole.organization_id == organization_id,
OrganizationRole.user_id == user_id,
)
.one()
)
except NoResultFound:
return
return organization_role
def get_organization_roles(self, organization_id):
"""
Gets a list of organization roles for a specified org
"""
return (
self.db.query(OrganizationRole)
.join(User)
.filter(OrganizationRole.organization_id == organization_id)
.all()
)
def add_organization_role(self, organization_id, user_id, role_name):
"""
Adds an organization role for the specified org and user
"""
role = OrganizationRole(
organization_id=organization_id,
user_id=user_id,
role_name=role_name,
)
self.db.add(role)
return role
def delete_organization_role(self, organization_role_id):
"""
Delete an organization role for a specified organization role id
"""
role = self.get_organization_role(organization_role_id)
for team_role in self.get_organization_team_roles_by_user(
role.organization.id, role.user_id
):
self.db.delete(team_role)
self.db.delete(role)
def get_organization_invite(self, organization_invite_id):
"""
Return the org invite object that represents the given org invite id,
or None if there is no organization invite for that ID.
"""
return self.db.get(OrganizationInvitation, organization_invite_id)
def get_organization_invite_by_user(self, organization_id, user_id):
"""
Gets an organization invite for a specified org and user
"""
try:
organization_invite = (
self.db.query(OrganizationInvitation)
.filter(
OrganizationInvitation.organization_id == organization_id,
OrganizationInvitation.user_id == user_id,
)
.one()
)
except NoResultFound:
return
return organization_invite
def get_organization_invites(self, organization_id):
"""
Gets a list of organization invites for a specified org
"""
return (
self.db.query(OrganizationInvitation)
.join(User)
.filter(OrganizationInvitation.organization_id == organization_id)
.all()
)
def get_organization_invites_by_user(self, user_id):
"""
Gets a list of organization invites for a specified user
"""
return (
self.db.query(OrganizationInvitation)
.filter(
OrganizationInvitation.invite_status
== OrganizationInvitationStatus.Pending,
OrganizationInvitation.user_id == user_id,
)
.all()
)
def add_organization_invite(self, organization_id, user_id, invite_token):
"""
Adds an organization invitation for the specified user and org
"""
# organization = self.get_organization(organization_id)
organization_invite = OrganizationInvitation(
organization_id=organization_id,
user_id=user_id,
token=invite_token,
invite_status=OrganizationInvitationStatus.Pending,
)
self.db.add(organization_invite)
return organization_invite
def delete_organization_invite(self, organization_invite_id):
"""
Delete an organization invite for the specified org invite id
"""
organization_invite = self.get_organization_invite(organization_invite_id)
self.db.delete(organization_invite)
def delete_organization(self, organization_id):
"""
Delete an organization for the specified organization id
"""
organization = self.get_organization(organization_id)
# Delete invitations
self.db.query(OrganizationInvitation).filter_by(
organization=organization
).delete()
# Null out organization id for all name catalog entries
self.db.query(OrganizationNameCatalog).filter(
OrganizationNameCatalog.organization_id == organization_id
).update({OrganizationNameCatalog.organization_id: None})
# Delete projects
self.db.query(OrganizationProject).filter_by(organization=organization).delete()
# Delete roles
self.db.query(OrganizationRole).filter_by(organization=organization).delete()
# Delete billing data if it exists
if organization.subscriptions:
for subscription in organization.subscriptions:
# Delete subscription items
self.db.query(StripeSubscriptionItem).filter_by(
subscription=subscription
).delete()
# Delete link to organization
self.db.query(OrganizationStripeSubscription).filter_by(
subscription=subscription
).delete()
# Delete customer link to organization
self.db.query(OrganizationStripeCustomer).filter_by(
organization=organization
).delete()
# Delete subscription object
self.db.query(StripeSubscription).filter(
StripeSubscription.id == subscription.id
).delete()
# Delete teams (and related data)
self.delete_teams_by_organization(organization_id)
# Delete organization
self.db.delete(organization)
def rename_organization(self, organization_id, name):
"""
Performs operations necessary to rename an Organization
"""
organization = self.get_organization(organization_id)
organization.name = name
try:
self.db.flush() # flush db now so organization.normalized_name available
self.add_catalog_entry(organization_id)
except UniqueViolation:
raise ValueError(f'Organization name "{name}" has been used')
return organization
def update_organization(self, organization_id, **changes):
"""
Accepts a organization object and attempts to update an organization with those
attributes
"""
organization = self.get_organization(organization_id)
for attr, value in changes.items():
if attr == NAME_FIELD:
# Call rename function to ensure name catalag entry is added
self.rename_organization(organization_id, value)
setattr(organization, attr, value)
return organization
def get_organization_project(self, organization_id, project_id):
"""
Return the organization project object that represents the given
organization project id or None
"""
return (
self.db.query(OrganizationProject)
.filter(
OrganizationProject.organization_id == organization_id,
OrganizationProject.project_id == project_id,
)
.first()
)
def add_organization_project(self, organization_id, project_id):
"""
Adds an association between the specified organization and project
"""
organization_project = OrganizationProject(
organization_id=organization_id,
project_id=project_id,
)
self.db.add(organization_project)
self.db.flush() # Flush db so we can address the organization related object
# Mark Organization as dirty, so purges will happen
orm.attributes.flag_dirty(organization_project.organization)
return organization_project
def delete_organization_project(self, organization_id, project_id):
"""
Delete association between specified organization and project
"""
organization_project = self.get_organization_project(
organization_id, project_id
)
self.db.delete(organization_project)
def record_tos_engagement(
self,
organization_id,
revision: str,
engagement: TermsOfServiceEngagement,
) -> None:
"""
Add a record of end user being flashed about, notified of, viewing, or agreeing
to a terms of service change on behalf of an organization.
"""
if not isinstance(engagement, TermsOfServiceEngagement):
raise ValueError(f"{engagement} is not a TermsOfServiceEngagement")
self.db.add(
OrganizationTermsOfServiceEngagement(
organization_id=organization_id,
revision=revision,
created=datetime.datetime.now(datetime.UTC),
engagement=engagement,
)
)
def get_organization_subscription(self, organization_id, subscription_id):
"""
Return the organization subscription object that represents the given
organization subscription id or None
"""
return (
self.db.query(OrganizationStripeSubscription)
.filter(
OrganizationStripeSubscription.organization_id == organization_id,
OrganizationStripeSubscription.subscription_id == subscription_id,
)
.first()
)
def add_organization_subscription(self, organization_id, subscription_id):
"""
Adds an association between the specified organization and subscription
"""
organization_subscription = OrganizationStripeSubscription(
organization_id=organization_id,
subscription_id=subscription_id,
)
self.db.add(organization_subscription)
return organization_subscription
def delete_organization_subscription(self, organization_id, subscription_id):
"""
Delete association between specified organization and subscription
"""
organization_subscription = self.get_organization_subscription(
organization_id, subscription_id
)
self.db.delete(organization_subscription)
def get_organization_stripe_customer(self, organization_id):
"""
Return the organization stripe customer object that is
associated to the given organization id or None
"""
return (
self.db.query(OrganizationStripeCustomer)
.filter(
OrganizationStripeCustomer.organization_id == organization_id,
)
.first()
)
def add_organization_stripe_customer(self, organization_id, stripe_customer_id):
"""
Adds an association between the specified organization and customer
"""
organization_stripe_customer = OrganizationStripeCustomer(
organization_id=organization_id,
stripe_customer_id=stripe_customer_id,
)
self.db.add(organization_stripe_customer)
return organization_stripe_customer
def get_teams_by_organization(self, organization_id):
"""
Return a list of all team objects for the specified organization,
or None if there are none.
"""
return (
self.db.execute(select(Team).where(Team.organization_id == organization_id))
.scalars()
.all()
)
def get_team(self, team_id):
"""
Return a team object for the specified identifier,
"""
return self.db.get(Team, team_id)
def find_teamid(self, organization_id, team_name):
"""
Find the unique team identifier for the given organization and
team name or None if there is no such team.
"""
normalized_name = func.normalize_team_name(team_name)
try:
(team_id,) = (
self.db.query(Team.id)
.filter(
Team.organization_id == organization_id,
Team.normalized_name == normalized_name,
)
.one()
)
except NoResultFound:
return
return team_id
def get_teams_by_user(self, user_id):
"""
Return a list of all team objects associated with a given user id.
"""
return (
self.db.query(Team)
.join(TeamRole, TeamRole.team_id == Team.id)
.filter(TeamRole.user_id == user_id)
.order_by(Team.name)
.all()
)
def get_organization_team_roles_by_user(self, organization_id, user_id):
return (
self.db.query(TeamRole)
.join(Team, Team.id == TeamRole.team_id)
.filter(
TeamRole.user_id == user_id, Team.organization_id == organization_id
)
.all()
)
def add_team(self, organization_id, name):
"""
Attempts to create a team with the specified name in an organization
"""
team = Team(
name=name,
organization_id=organization_id,
)
self.db.add(team)
return team
def rename_team(self, team_id, name):
"""
Performs operations necessary to rename a Team
"""
team = self.get_team(team_id)
team.name = name
return team
def delete_team(self, team_id):
"""
Delete team for the specified team id and all associated objects
"""
team = self.get_team(team_id)
# Delete team members
self.db.execute(delete(TeamRole).filter_by(team=team))
# Delete projects
self.db.execute(delete(TeamProjectRole).filter_by(team=team))
# Delete team
self.db.execute(delete(Team).where(Team.id == team_id))
def delete_teams_by_organization(self, organization_id):
"""
Delete all teams for the specified organization id
"""
teams = self.get_teams_by_organization(organization_id)
for team in teams:
self.delete_team(team.id)
def get_team_role(self, team_role_id):
"""
Return the team role object that represents the given team role id,
"""
return self.db.get(TeamRole, team_role_id)
def get_team_roles(self, team_id):
"""
Gets a list of organization roles for a specified org
"""
return (
self.db.query(TeamRole).join(User).filter(TeamRole.team_id == team_id).all()
)
def add_team_role(self, team_id, user_id, role_name):
"""
Add the team role object to a team for a specified team id and user id
"""
member = TeamRole(
team_id=team_id,
user_id=user_id,
role_name=role_name,
)
self.db.add(member)
return member
def delete_team_role(self, team_role_id):
"""
Remove the team role for a specified team id and user id
"""
member = self.get_team_role(team_role_id)
self.db.delete(member)
def get_team_project_role(self, team_project_role_id):
"""
Return the team project role object that
represents the given team project role id,
"""
return self.db.get(TeamProjectRole, team_project_role_id)
def add_team_project_role(self, team_id, project_id, role_name):
"""
Adds a team project role for the specified team and project
"""
team_project_role = TeamProjectRole(
team_id=team_id,
project_id=project_id,
role_name=role_name,
)
self.db.add(team_project_role)
return team_project_role
def delete_team_project_role(self, team_project_role_id):
"""
Remove a team project role for a specified team project role id
"""
team_project_role = self.get_team_project_role(team_project_role_id)
self.db.delete(team_project_role)
def database_organization_factory(context, request):
return DatabaseOrganizationService(request.db)
|
DatabaseOrganizationService
|
python
|
justquick__django-activity-stream
|
actstream/tests/test_drf.py
|
{
"start": 5166,
"end": 6515
}
|
class ____(BaseDRFTestCase):
def test_follow(self):
body = {
'content_type_id': self.site_ct.id,
'object_id': self.comment.id
}
post = self.auth_client.post(reverse('follow-follow'), body)
assert post.status_code == 201
follow = Follow.objects.order_by('-id').first()
assert follow.follow_object == self.comment
assert follow.user == self.user1
assert follow.user == self.user1
assert follow.user == self.user1
def test_is_following(self):
url = reverse('follow-is-following', args=[self.site_ct.id, self.comment.id])
resp = self.auth_client.get(url)
data = loads(resp.data)
assert not data['is_following']
url = reverse('follow-is-following', args=[self.user_ct.id, self.user2.id])
resp = self.auth_client.get(url)
data = loads(resp.data)
assert data['is_following']
def test_followers(self):
followers = self.auth_client.get(reverse('follow-followers')).data
assert len(followers) == 1
assert followers[0]['username'] == 'Four'
def test_following(self):
following = self.auth_client.get(reverse('follow-following')).data
assert len(following) == 1
assert following[0]['follow_object']['username'] == 'Two'
|
DRFFollowTestCase
|
python
|
crytic__slither
|
slither/detectors/shadowing/state.py
|
{
"start": 1052,
"end": 2870
}
|
class ____(AbstractDetector):
"""
Shadowing of state variable
"""
ARGUMENT = "shadowing-state"
HELP = "State variables shadowing"
IMPACT = DetectorClassification.HIGH
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#state-variable-shadowing"
WIKI_TITLE = "State variable shadowing"
WIKI_DESCRIPTION = "Detection of state variables shadowed."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract BaseContract{
address owner;
modifier isOwner(){
require(owner == msg.sender);
_;
}
}
contract DerivedContract is BaseContract{
address owner;
constructor(){
owner = msg.sender;
}
function withdraw() isOwner() external{
msg.sender.transfer(this.balance);
}
}
```
`owner` of `BaseContract` is never assigned and the modifier `isOwner` does not work."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Remove the state variable shadowing."
def _detect(self) -> List[Output]:
"""Detect shadowing
Recursively visit the calls
Returns:
list: {'vuln', 'filename,'contract','func', 'shadow'}
"""
results = []
for c in self.contracts:
shadowing = detect_shadowing(c)
if shadowing:
for all_variables in shadowing:
shadow = all_variables[0]
variables = all_variables[1:]
info: DETECTOR_INFO = [shadow, " shadows:\n"]
for var in variables:
info += ["\t- ", var, "\n"]
res = self.generate_result(info)
results.append(res)
return results
|
StateShadowing
|
python
|
joke2k__faker
|
tests/providers/test_ssn.py
|
{
"start": 44781,
"end": 45101
}
|
class ____(unittest.TestCase):
num_sample_runs = 10
def setUp(self):
self.fake = Faker("az_AZ")
self.samples = [self.fake.ssn() for _ in range(self.num_sample_runs)]
Faker.seed(0)
def check_length(self):
for sample in self.samples:
assert len(sample) == 7
|
TestAzAz
|
python
|
PyCQA__pylint
|
tests/functional/a/attribute_defined_outside_init.py
|
{
"start": 661,
"end": 774
}
|
class ____:
def __init__(self):
i = self._init
i()
def _init(self):
self.z = 44
|
E
|
python
|
django__django
|
django/contrib/gis/db/backends/spatialite/models.py
|
{
"start": 1349,
"end": 1930
}
|
class ____(models.Model, SpatialRefSysMixin):
"""
The 'spatial_ref_sys' table from SpatiaLite.
"""
srid = models.IntegerField(primary_key=True)
auth_name = models.CharField(max_length=256)
auth_srid = models.IntegerField()
ref_sys_name = models.CharField(max_length=256)
proj4text = models.CharField(max_length=2048)
srtext = models.CharField(max_length=2048)
class Meta:
app_label = "gis"
db_table = "spatial_ref_sys"
managed = False
@property
def wkt(self):
return self.srtext
|
SpatialiteSpatialRefSys
|
python
|
python-attrs__attrs
|
tests/test_make.py
|
{
"start": 42186,
"end": 43174
}
|
class ____:
"""
Tests for `fields_dict`.
"""
@given(simple_classes())
def test_instance(self, C):
"""
Raises `TypeError` on non-classes.
"""
with pytest.raises(TypeError) as e:
fields_dict(C())
assert "Passed object must be a class." == e.value.args[0]
def test_handler_non_attrs_class(self):
"""
Raises `ValueError` if passed a non-*attrs* instance.
"""
with pytest.raises(NotAnAttrsClassError) as e:
fields_dict(object)
assert (
f"{object!r} is not an attrs-decorated class."
) == e.value.args[0]
@given(simple_classes())
def test_fields_dict(self, C):
"""
Returns an ordered dict of ``{attribute_name: Attribute}``.
"""
d = fields_dict(C)
assert isinstance(d, dict)
assert list(fields(C)) == list(d.values())
assert [a.name for a in fields(C)] == list(d)
|
TestFieldsDict
|
python
|
apache__airflow
|
providers/teradata/src/airflow/providers/teradata/operators/teradata_compute_cluster.py
|
{
"start": 1719,
"end": 2319
}
|
class ____(Enum):
SETUP = 1
STATE = 2
# Handler to handle single result set of a SQL query
def _single_result_row_handler(cursor):
records = cursor.fetchone()
if isinstance(records, list):
return records[0]
if records is None:
return records
raise TypeError(f"Unexpected results: {cursor.fetchone()!r}")
# Providers given operation is setup or state operation
def _determine_operation_context(operation):
if operation == Constants.CC_CREATE_OPR or operation == Constants.CC_DROP_OPR:
return _Operation.SETUP
return _Operation.STATE
|
_Operation
|
python
|
python-attrs__attrs
|
tests/test_converters.py
|
{
"start": 4365,
"end": 5873
}
|
class ____:
def test_missing_default(self):
"""
Raises TypeError if neither default nor factory have been passed.
"""
with pytest.raises(TypeError, match="Must pass either"):
default_if_none()
def test_too_many_defaults(self):
"""
Raises TypeError if both default and factory are passed.
"""
with pytest.raises(TypeError, match="but not both"):
default_if_none(True, lambda: 42)
def test_factory_takes_self(self):
"""
Raises ValueError if passed Factory has takes_self=True.
"""
with pytest.raises(ValueError, match="takes_self"):
default_if_none(Factory(list, takes_self=True))
@pytest.mark.parametrize("val", [1, 0, True, False, "foo", "", object()])
def test_not_none(self, val):
"""
If a non-None value is passed, it's handed down.
"""
c = default_if_none("nope")
assert val == c(val)
c = default_if_none(factory=list)
assert val == c(val)
def test_none_value(self):
"""
Default values are returned when a None is passed.
"""
c = default_if_none(42)
assert 42 == c(None)
def test_none_factory(self):
"""
Factories are used if None is passed.
"""
c = default_if_none(factory=list)
assert [] == c(None)
c = default_if_none(default=Factory(list))
assert [] == c(None)
|
TestDefaultIfNone
|
python
|
xlwings__xlwings
|
xlwings/constants.py
|
{
"start": 45622,
"end": 46153
}
|
class ____:
xlDMYFormat = 4 # from enum XlColumnDataType
xlDYMFormat = 7 # from enum XlColumnDataType
xlEMDFormat = 10 # from enum XlColumnDataType
xlGeneralFormat = 1 # from enum XlColumnDataType
xlMDYFormat = 3 # from enum XlColumnDataType
xlMYDFormat = 6 # from enum XlColumnDataType
xlSkipColumn = 9 # from enum XlColumnDataType
xlTextFormat = 2 # from enum XlColumnDataType
xlYDMFormat = 8 # from enum XlColumnDataType
xlYMDFormat = 5 # from enum XlColumnDataType
|
ColumnDataType
|
python
|
huggingface__transformers
|
src/transformers/models/lxmert/modeling_lxmert.py
|
{
"start": 17671,
"end": 18089
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[config.hidden_act]
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
|
LxmertIntermediate
|
python
|
redis__redis-py
|
tests/test_command_policies.py
|
{
"start": 3115,
"end": 6711
}
|
class ____:
def test_resolves_correctly_policies(self, r, monkeypatch):
# original nodes selection method
determine_nodes = r._determine_nodes
determined_nodes = []
primary_nodes = r.get_primaries()
calls = iter(list(range(len(primary_nodes))))
def wrapper(*args, request_policy: RequestPolicy, **kwargs):
nonlocal determined_nodes
determined_nodes = determine_nodes(
*args, request_policy=request_policy, **kwargs
)
return determined_nodes
# Mock random.choice to always return a pre-defined sequence of nodes
monkeypatch.setattr(random, "choice", lambda seq: seq[next(calls)])
with patch.object(r, "_determine_nodes", side_effect=wrapper, autospec=True):
# Routed to a random primary node
r.ft().create_index(
(
NumericField("random_num"),
TextField("title"),
TextField("body"),
TextField("parent"),
)
)
assert determined_nodes[0] == primary_nodes[0]
# Routed to another random primary node
info = r.ft().info()
if is_resp2_connection(r):
assert info["index_name"] == "idx"
else:
assert info[b"index_name"] == b"idx"
assert determined_nodes[0] == primary_nodes[1]
expected_node = r.get_nodes_from_slot("ft.suglen", *["FT.SUGLEN", "foo"])
r.ft().suglen("foo")
assert determined_nodes[0] == expected_node[0]
# Indexing a document
r.hset(
"search",
mapping={
"title": "RediSearch",
"body": "Redisearch impements a search engine on top of redis",
"parent": "redis",
"random_num": 10,
},
)
r.hset(
"ai",
mapping={
"title": "RedisAI",
"body": "RedisAI executes Deep Learning/Machine Learning models and managing their data.", # noqa
"parent": "redis",
"random_num": 3,
},
)
r.hset(
"json",
mapping={
"title": "RedisJson",
"body": "RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.", # noqa
"parent": "redis",
"random_num": 8,
},
)
req = AggregateRequest("redis").group_by("@parent").cursor(1)
if is_resp2_connection(r):
cursor = r.ft().aggregate(req).cursor
else:
cursor = Cursor(r.ft().aggregate(req)[1])
# Ensure that aggregate node was cached.
assert determined_nodes[0] == r._aggregate_nodes[0]
r.ft().aggregate(cursor)
# Verify that FT.CURSOR dispatched to the same node.
assert determined_nodes[0] == r._aggregate_nodes[0]
# Error propagates to a user
with pytest.raises(ResponseError, match="Cursor not found, id:"):
r.ft().aggregate(cursor)
assert determined_nodes[0] == primary_nodes[2]
# Core commands also randomly distributed across masters
r.randomkey()
assert determined_nodes[0] == primary_nodes[0]
|
TestClusterWithPolicies
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/elements/camera_input_test.py
|
{
"start": 4032,
"end": 6985
}
|
class ____(DeltaGeneratorTestCase):
def test_camera_input_with_width_pixels(self):
"""Test that camera_input can be displayed with a specific width in pixels."""
st.camera_input("Label", width=500)
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.PIXEL_WIDTH.value
)
assert c.width_config.pixel_width == 500
def test_camera_input_with_width_stretch(self):
"""Test that camera_input can be displayed with a width of 'stretch'."""
st.camera_input("Label", width="stretch")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
def test_camera_input_with_default_width(self):
"""Test that the default width is used when not specified."""
st.camera_input("Label")
c = self.get_delta_from_queue().new_element
assert (
c.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert c.width_config.use_stretch
@parameterized.expand(
[
"invalid",
-1,
0,
100.5,
]
)
def test_width_config_invalid(self, invalid_width):
"""Test width config with various invalid values."""
with pytest.raises(StreamlitInvalidWidthError):
st.camera_input("the label", width=invalid_width)
def test_stable_id_with_key(self):
"""Test that the widget ID is stable when a stable key is provided."""
with patch(
"streamlit.elements.lib.utils._register_element_id",
return_value=MagicMock(),
):
# First render with certain params
st.camera_input(
label="Label 1",
key="camera_input_key",
help="Help 1",
disabled=False,
width="stretch",
on_change=lambda: None,
args=("arg1", "arg2"),
kwargs={"kwarg1": "kwarg1"},
label_visibility="visible",
)
c1 = self.get_delta_from_queue().new_element.camera_input
id1 = c1.id
# Second render with different params but same key
st.camera_input(
label="Label 2",
key="camera_input_key",
help="Help 2",
disabled=True,
width=200,
on_change=lambda: None,
args=("arg_1", "arg_2"),
kwargs={"kwarg_1": "kwarg_1"},
label_visibility="hidden",
)
c2 = self.get_delta_from_queue().new_element.camera_input
id2 = c2.id
assert id1 == id2
|
CameraInputWidthTest
|
python
|
scikit-learn__scikit-learn
|
sklearn/externals/_arff.py
|
{
"start": 12564,
"end": 12945
}
|
class ____(ArffException):
'''Error raised when an attribute name is provided twice the attribute
declaration.'''
def __init__(self, value, value2):
super().__init__()
self.message = (
('Bad @ATTRIBUTE name %s at line' % value) +
' %d, this name is already in use in line' +
(' %d.' % value2)
)
|
BadAttributeName
|
python
|
sphinx-doc__sphinx
|
tests/roots/test-ext-autodoc/target/inheritance.py
|
{
"start": 541,
"end": 622
}
|
class ____(list): # NoQA: FURB189
def meth(self):
"""docstring"""
|
MyList
|
python
|
mlflow__mlflow
|
mlflow/server/graphql/graphql_no_batching.py
|
{
"start": 343,
"end": 2959
}
|
class ____(NamedTuple):
root_fields: int
max_aliases: int
def scan_query(ast_node: DocumentNode) -> QueryInfo:
"""
Scan a GraphQL query and return its information.
"""
root_fields = 0
max_aliases = 0
total_selections = 0
for definition in ast_node.definitions:
if selection_set := getattr(definition, "selection_set", None):
stack = [(selection_set, 1)]
while stack:
selection_set, depth = stack.pop()
# check current level depth
if depth > _MAX_DEPTH:
raise GraphQLError(f"Query exceeds maximum depth of {_MAX_DEPTH}")
selections = getattr(selection_set, "selections", [])
# check current level aliases
current_aliases = 0
for selection in selections:
if isinstance(selection, FieldNode):
if depth == 1:
root_fields += 1
if selection.alias:
current_aliases += 1
if selection.selection_set:
stack.append((selection.selection_set, depth + 1))
total_selections += 1
if total_selections > _MAX_SELECTIONS:
raise GraphQLError(
f"Query exceeds maximum total selections of {_MAX_SELECTIONS}"
)
max_aliases = max(max_aliases, current_aliases)
return QueryInfo(root_fields, max_aliases)
def check_query_safety(ast_node: DocumentNode) -> ExecutionResult | None:
try:
query_info = scan_query(ast_node)
except GraphQLError as e:
return ExecutionResult(
data=None,
errors=[e],
)
if query_info.root_fields > MLFLOW_SERVER_GRAPHQL_MAX_ROOT_FIELDS.get():
msg = "root fields"
env_var = MLFLOW_SERVER_GRAPHQL_MAX_ROOT_FIELDS
value = query_info.root_fields
elif query_info.max_aliases > MLFLOW_SERVER_GRAPHQL_MAX_ALIASES.get():
msg = "aliases"
env_var = MLFLOW_SERVER_GRAPHQL_MAX_ALIASES
value = query_info.max_aliases
else:
return None
return ExecutionResult(
data=None,
errors=[
GraphQLError(
f"GraphQL queries should have at most {env_var.get()} {msg}, "
f"got {value} {msg}. To increase the limit, set the "
f"{env_var.name} environment variable."
)
],
)
|
QueryInfo
|
python
|
huggingface__transformers
|
tests/models/rwkv/test_modeling_rwkv.py
|
{
"start": 7592,
"end": 14205
}
|
class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (RwkvModel, RwkvForCausalLM) if is_torch_available() else ()
pipeline_model_mapping = (
{"feature-extraction": RwkvModel, "text-generation": RwkvForCausalLM} if is_torch_available() else {}
)
test_missing_keys = False
def setUp(self):
self.model_tester = RwkvModelTester(self)
self.config_tester = ConfigTester(
self, config_class=RwkvConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"]
)
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, (list, tuple)):
max_value, min_value = max(member), min(member)
if not isinstance(container, list):
raise TypeError("container should be a list or tuple")
elif len(container) != 2:
raise ValueError("container should have 2 elements")
expected_min, expected_max = container
is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max)
if not is_inside_interval:
standardMsg = f"{safe_repr(member)} not found in {safe_repr(container)}"
self.fail(self._formatMessage(msg, standardMsg))
def test_config(self):
self.config_tester.run_common_tests()
def test_rwkv_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_rwkv_model(*config_and_inputs)
def test_rwkv_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causl_lm(*config_and_inputs)
def test_state_equivalency(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_state_equivalency(*config_and_inputs)
def test_attention_outputs(self):
r"""
Overriding the test_attention_outputs test as the attention outputs of Rwkv are different from other models
it has a shape `batch_size, seq_len, hidden_size`.
"""
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
seq_len = getattr(self.model_tester, "seq_length", None)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
attentions = outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[batch_size, seq_len, config.hidden_size],
)
out_len = len(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
batch_size = inputs["input_ids"].shape[0]
with torch.no_grad():
outputs = model(**inputs)
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[batch_size, seq_len, config.hidden_size],
)
@slow
def test_model_from_pretrained(self):
model_name = "RWKV/rwkv-4-169m-pile"
model = RwkvModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def test_beam_sample_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_beam_sample_generate_dict_output()
self.has_attentions = old_has_attentions
def test_beam_search_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_beam_search_generate_dict_output()
self.has_attentions = old_has_attentions
def test_greedy_generate_dict_outputs(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_greedy_generate_dict_outputs()
self.has_attentions = old_has_attentions
def test_sample_generate_dict_output(self):
# This model has a custom attention output shape AND config flags, let's skip those checks
old_has_attentions = self.has_attentions
self.has_attentions = False
super().test_sample_generate_dict_output()
self.has_attentions = old_has_attentions
@unittest.skip("This model doesn't support padding")
def test_left_padding_compatibility(self):
pass
@slow
|
RwkvModelTest
|
python
|
huggingface__transformers
|
src/transformers/models/big_bird/modeling_big_bird.py
|
{
"start": 95732,
"end": 99697
}
|
class ____(BigBirdPreTrainedModel, GenerationMixin):
_tied_weights_keys = {
"cls.predictions.decoder.bias": "cls.predictions.bias",
"cls.predictions.decoder.weight": "bert.embeddings.word_embeddings.weight",
}
def __init__(self, config):
super().__init__(config)
if not config.is_decoder:
logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`")
self.bert = BigBirdModel(config)
self.cls = BigBirdOnlyMLMHead(config)
# Initialize weights and apply final processing
self.post_init()
def get_output_embeddings(self):
return self.cls.predictions.decoder
def set_output_embeddings(self, new_embeddings):
self.cls.predictions.decoder = new_embeddings
self.cls.predictions.bias = new_embeddings.bias
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[CausalLMOutputWithCrossAttentions, tuple[torch.FloatTensor]]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
`[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.cls(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return CausalLMOutputWithCrossAttentions(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
|
BigBirdForCausalLM
|
python
|
tensorflow__tensorflow
|
tensorflow/python/autograph/pyct/anno.py
|
{
"start": 2357,
"end": 5919
}
|
class ____(NoValue):
"""Container for static analysis annotation keys.
The enum values are used strictly for documentation purposes.
"""
# Symbols
# These flags are boolean.
IS_PARAM = 'Symbol is a parameter to the function being analyzed.'
# Scopes
# Scopes are represented by objects of type activity.Scope.
SCOPE = 'The scope for the annotated node. See activity.py.'
# TODO(mdan): Drop these in favor of accessing the child's SCOPE.
ARGS_SCOPE = 'The scope for the argument list of a function call.'
COND_SCOPE = 'The scope for the test node of a conditional statement.'
BODY_SCOPE = (
'The scope for the main body of a statement (True branch for if '
'statements, main body for loops).')
ORELSE_SCOPE = (
'The scope for the orelse body of a statement (False branch for if '
'statements, orelse body for loops).')
# Static analysis annotations.
DEFINITIONS = (
'Reaching definition information. See reaching_definitions.py.')
ORIG_DEFINITIONS = (
'The value of DEFINITIONS that applied to the original code before any'
' conversion.')
DEFINED_FNS_IN = (
'Local function definitions that may exist when exiting the node. See'
' reaching_fndefs.py')
DEFINED_VARS_IN = (
'Symbols defined when entering the node. See reaching_definitions.py.')
LIVE_VARS_OUT = ('Symbols live when exiting the node. See liveness.py.')
LIVE_VARS_IN = ('Symbols live when entering the node. See liveness.py.')
TYPES = 'Static type information. See type_inference.py.'
CLOSURE_TYPES = 'Types of closure symbols at each detected call site.'
VALUE = 'Static value information. See type_inference.py.'
FAIL = object()
def keys(node, field_name='___pyct_anno'):
if not hasattr(node, field_name):
return frozenset()
return frozenset(getattr(node, field_name).keys())
def getanno(node, key, default=FAIL, field_name='___pyct_anno'):
if (default is FAIL or (hasattr(node, field_name) and
(key in getattr(node, field_name)))):
return getattr(node, field_name)[key]
return default
def hasanno(node, key, field_name='___pyct_anno'):
return hasattr(node, field_name) and key in getattr(node, field_name)
def setanno(node, key, value, field_name='___pyct_anno'):
annotations = getattr(node, field_name, {})
setattr(node, field_name, annotations)
annotations[key] = value
# So that the annotations survive gast_to_ast() and ast_to_gast()
if field_name not in node._fields:
node._fields += (field_name,)
def delanno(node, key, field_name='___pyct_anno'):
annotations = getattr(node, field_name)
del annotations[key]
if not annotations:
delattr(node, field_name)
node._fields = tuple(f for f in node._fields if f != field_name)
def copyanno(from_node, to_node, key, field_name='___pyct_anno'):
if hasanno(from_node, key, field_name=field_name):
setanno(
to_node,
key,
getanno(from_node, key, field_name=field_name),
field_name=field_name)
def dup(node, copy_map, field_name='___pyct_anno'):
"""Recursively copies annotations in an AST tree.
Args:
node: ast.AST
copy_map: Dict[Hashable, Hashable], maps a source anno key to a destination
key. All annotations with the source key will be copied to identical
annotations with the destination key.
field_name: str
"""
for n in gast.walk(node):
for k in copy_map:
if hasanno(n, k, field_name):
setanno(n, copy_map[k], getanno(n, k, field_name), field_name)
|
Static
|
python
|
pytorch__pytorch
|
torch/_functorch/_aot_autograd/autograd_cache.py
|
{
"start": 14335,
"end": 20541
}
|
class ____(FxGraphCachePickler):
def __init__(self, gm: torch.fx.GraphModule):
super().__init__(gm)
# pyrefly: ignore [bad-override]
self.dispatch_table: dict
self.dispatch_table.update(
{
AOTConfig: functools.partial(self._reduce_aot_config),
torch.Tensor: functools.partial(self._reduce_tensor),
}
)
def _reduce_aot_config(self, aot_config: AOTConfig):
"""
Reduce the config to a stable key for caching.
"""
return (
_ident,
(
aot_config.num_params_buffers,
aot_config.keep_inference_input_mutations,
aot_config.is_export,
aot_config.no_tangents,
aot_config.dynamic_shapes,
aot_config.aot_autograd_arg_pos_to_source,
aot_config.enable_log,
aot_config.pre_dispatch,
),
)
def _reduce_tensor(self, tensor):
"""
Reduce the tensor to a stable key for caching.
"""
metadata = extract_tensor_metadata_for_cache_key(tensor)
return (_ident, (metadata,))
@contextlib.contextmanager
def normalize_placeholder_names(gm: torch.fx.GraphModule):
"""
Context manager that normalizes the placeholder names in the graph module.
This is used while generating a cache key for AOTAutogradCache, so that two graphs
that are isomorphic when normalizing names can hit the same cache entry.
This is safe because nothing underneath AOTAutograd uses the node names on the
original dynamo graph: AOTAutograd re-traces with its own nodes, and guards are
in terms of original sources rather than placeholder names.
"""
# Standalone inductor: we're bypassing AOTAutogradCache anyway, so return the graph
# as-is
if not config.autograd_cache_normalize_inputs or not hasattr(gm, "graph"):
yield
return
# Track all the old state of placeholders
old_placeholder_names = []
old_used_names = copy(gm.graph._graph_namespace._used_names)
i = 0
for n in gm.graph.find_nodes(op="placeholder", sort=True):
if n.type != torch.SymInt:
# _rename renames the node in the body of the function,
# but it doesn't change the raw name from node.target
# So we also set the raw_name of node.target to a new placeholder name
new_placeholder_name = f"p_{i}"
old_placeholder_names.append((n.name, n.target))
n.target = new_placeholder_name
n._rename(new_placeholder_name)
i += 1
gm.recompile()
try:
yield
finally:
# Used_names contains all our old placeholder names,
# so we clear it temporarily when we put them back
gm.graph._graph_namespace._used_names = set()
# Restore the placeholder names
i = 0
for n in gm.graph.find_nodes(op="placeholder", sort=True):
if n.type != torch.SymInt:
(name, target) = old_placeholder_names[i]
n.target = target
n._rename(name)
i += 1
assert i == len(old_placeholder_names)
# Now restore the old namespace's used names
gm.graph._graph_namespace._used_names = old_used_names
gm.recompile()
def autograd_cache_key(
gm: torch.fx.GraphModule,
example_inputs,
config: AOTConfig,
fx_config: _CompileFxKwargs,
# TODO: add args and parameters
) -> tuple[str, list[str]]:
"""
Generate a unique hash of the FX graph for caching.
"""
check_cacheable(gm)
if has_triton_package():
# Due to https://github.com/triton-lang/triton/issues/3729,
# if triton is < 3.2.0, AOTAutogradCache may cause us to
# attempt to load a cache entry without initializing
# the CUDA context on the autograd thread.
# Without caching, we naturally do this initialization when
# tracing through the graph with the autograd engine.
import triton
if triton.__version__ < "3.2.0":
raise BypassAOTAutogradCache("AOTAutogradCache requires triton 3.2.0")
details = AOTAutogradCacheDetails(gm, example_inputs, config, fx_config)
pickler = AOTAutogradCachePickler(gm)
# The prefix distinguishes among the other kinds of objects we cache
key = "a" + pickler.get_hash(details)
debug_lines = pickler.debug_lines(details)
log.debug(
"Autograd graph cache hash details for key %s:\n%s",
key,
LazyString(lambda: "\n".join(debug_lines)),
)
return key, debug_lines
@contextlib.contextmanager
def sanitize_gm_for_cache(gm: torch.fx.GraphModule):
"""
Clears a few fields in a dynamo supplied Graph Module that are not stable between graph inputs, but don't
affect inductor or aotdispatch correctness.
These fields **can** be used by code calling into aotdispatch (namely, dynamo), so we can't null them out completely.
To ensure that these fields are not accessed by inductor or aotdispatch, we clear them during AOTAutogradCache.load,
and then put them back before returning. This way, we generate a cache key based off of a canonical graph
without these fields, and also guarantee they aren't used to affect the cache's output.
"""
# Mapping from each field to a default value
IGNORED_FIELDS: dict[str, Any] = {
"meta": {}, # metadata used by export
"compile_subgraph_reason": None, # Used by dynamo only for logging, no change in inductor/autograd behavior
"_param_name_to_source": None, # Encapsulated by aot_config.aot_autograd_arg_pos_to_source
"_backend_id": None,
}
saved_fields = {}
for field, default_value in IGNORED_FIELDS.items():
saved_fields[field] = getattr(gm, field, None)
# Clear the field
setattr(gm, field, default_value)
try:
with normalize_placeholder_names(gm):
yield
finally:
for field, value in saved_fields.items():
setattr(gm, field, value)
@CacheArtifactFactory.register
|
AOTAutogradCachePickler
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/multiindex_object.py
|
{
"start": 5030,
"end": 5382
}
|
class ____:
params = ["int64", "Int64"]
param_names = ["dtype"]
def setup(self, dtype):
a = array(np.tile(np.arange(100), 1000), dtype=dtype)
b = array(np.tile(np.arange(1000), 100), dtype=dtype)
self.mi = MultiIndex.from_arrays([a, b])
def time_sort_values(self, dtype):
self.mi.sort_values()
|
SortValues
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/operators/test_datacatalog.py
|
{
"start": 36883,
"end": 38596
}
|
class ____:
@mock.patch("airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook")
def test_assert_valid_hook_call(self, mock_hook) -> None:
mock_hook.return_value.update_tag_template.return_value.name = TEST_TAG_TEMPLATE_LINK.format(
project_id=TEST_PROJECT_ID,
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
)
with pytest.warns(AirflowProviderDeprecationWarning):
task = CloudDataCatalogUpdateTagTemplateOperator(
task_id="task_id",
tag_template=TagTemplate(name=TEST_TAG_TEMPLATE_ID),
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
task.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.update_tag_template.assert_called_once_with(
tag_template=TagTemplate(name=TEST_TAG_TEMPLATE_ID),
update_mask=TEST_UPDATE_MASK,
location=TEST_LOCATION,
tag_template_id=TEST_TAG_TEMPLATE_ID,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
|
TestCloudDataCatalogUpdateTagTemplateOperator
|
python
|
walkccc__LeetCode
|
solutions/2416. Sum of Prefix Scores of Strings/2416.py
|
{
"start": 103,
"end": 636
}
|
class ____:
def sumPrefixScores(self, words: list[str]) -> list[int]:
root = TrieNode()
def insert(word: str) -> None:
node: TrieNode = root
for c in word:
node = node.children.setdefault(c, TrieNode())
node.count += 1
for word in words:
insert(word)
def getScore(word: str) -> int:
node: TrieNode = root
score = 0
for c in word:
node = node.children[c]
score += node.count
return score
return [getScore(word) for word in words]
|
Solution
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/constructor33.py
|
{
"start": 325,
"end": 498
}
|
class ____:
def __new__(cls, *args: Any, **kwargs: Any) -> Self: ...
def __init__(self, base: list[str], joined: str) -> None: ...
A(temp := ["x"], " ".join(temp))
|
A
|
python
|
numba__numba
|
numba/core/typeinfer.py
|
{
"start": 34811,
"end": 36307
}
|
class ____(dict):
def set_context(self, context):
self.context = context
def __getitem__(self, name):
if name not in self:
self[name] = TypeVar(self.context, name)
return super(TypeVarMap, self).__getitem__(name)
def __setitem__(self, name, value):
assert isinstance(name, str)
if name in self:
raise KeyError("Cannot redefine typevar %s" % name)
else:
super(TypeVarMap, self).__setitem__(name, value)
# A temporary mapping of {function name: dispatcher object}
_temporary_dispatcher_map = {}
# A temporary mapping of {function name: dispatcher object reference count}
# Reference: https://github.com/numba/numba/issues/3658
_temporary_dispatcher_map_ref_count = defaultdict(int)
@contextlib.contextmanager
def register_dispatcher(disp):
"""
Register a Dispatcher for inference while it is not yet stored
as global or closure variable (e.g. during execution of the @jit()
call). This allows resolution of recursive calls with eager
compilation.
"""
assert callable(disp)
assert callable(disp.py_func)
name = disp.py_func.__name__
_temporary_dispatcher_map[name] = disp
_temporary_dispatcher_map_ref_count[name] += 1
try:
yield
finally:
_temporary_dispatcher_map_ref_count[name] -= 1
if not _temporary_dispatcher_map_ref_count[name]:
del _temporary_dispatcher_map[name]
typeinfer_extensions = {}
|
TypeVarMap
|
python
|
astropy__astropy
|
astropy/utils/masked/tests/test_masked.py
|
{
"start": 20263,
"end": 22817
}
|
class ____(MaskedArraySetup):
def test_reshape(self):
ma_reshape = self.ma.reshape((6,))
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting(self):
ma_reshape = self.ma.copy()
ma_reshape.shape = (6,)
expected_data = self.a.reshape((6,))
expected_mask = self.mask_a.reshape((6,))
assert ma_reshape.shape == expected_data.shape
assert_array_equal(ma_reshape.unmasked, expected_data)
assert_array_equal(ma_reshape.mask, expected_mask)
def test_shape_setting_failure(self):
ma = self.ma.copy()
with pytest.raises(ValueError, match="cannot reshape"):
ma.shape = (5,)
assert ma.shape == self.ma.shape
assert ma.mask.shape == self.ma.shape
# Here, mask can be reshaped but array cannot.
ma2 = Masked(np.broadcast_to([[1.0], [2.0]], self.a.shape), mask=self.mask_a)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma2.shape = (6,)
assert ma2.shape == self.ma.shape
assert ma2.mask.shape == self.ma.shape
# Here, array can be reshaped but mask cannot.
ma3 = Masked(
self.a.copy(), mask=np.broadcast_to([[True], [False]], self.mask_a.shape)
)
with pytest.raises(AttributeError, match="ncompatible shape"):
ma3.shape = (6,)
assert ma3.shape == self.ma.shape
assert ma3.mask.shape == self.ma.shape
def test_ravel(self):
ma_ravel = self.ma.ravel()
expected_data = self.a.ravel()
expected_mask = self.mask_a.ravel()
assert ma_ravel.shape == expected_data.shape
assert_array_equal(ma_ravel.unmasked, expected_data)
assert_array_equal(ma_ravel.mask, expected_mask)
def test_transpose(self):
ma_transpose = self.ma.transpose()
expected_data = self.a.transpose()
expected_mask = self.mask_a.transpose()
assert ma_transpose.shape == expected_data.shape
assert_array_equal(ma_transpose.unmasked, expected_data)
assert_array_equal(ma_transpose.mask, expected_mask)
def test_iter(self):
for ma, d, m in zip(self.ma, self.a, self.mask_a):
assert_array_equal(ma.unmasked, d)
assert_array_equal(ma.mask, m)
|
TestMaskedArrayShaping
|
python
|
pandas-dev__pandas
|
pandas/tests/groupby/test_libgroupby.py
|
{
"start": 3377,
"end": 3999
}
|
class ____(GroupVarTestMixin):
__test__ = True
algo = staticmethod(group_var)
dtype = np.float64
rtol = 1e-5
def test_group_var_large_inputs(self):
prng = np.random.default_rng(2)
out = np.array([[np.nan]], dtype=self.dtype)
counts = np.array([0], dtype="int64")
values = (prng.random(10**6) + 10**12).astype(self.dtype)
values.shape = (10**6, 1)
labels = np.zeros(10**6, dtype="intp")
self.algo(out, counts, values, labels)
assert counts[0] == 10**6
tm.assert_almost_equal(out[0, 0], 1.0 / 12, rtol=0.5e-3)
|
TestGroupVarFloat64
|
python
|
falconry__falcon
|
tests/test_httperror.py
|
{
"start": 4698,
"end": 4855
}
|
class ____:
def on_get(self, req, resp):
raise falcon.HTTPMethodNotAllowed(['PUT'], headers={'x-ping': 'pong'})
|
MethodNotAllowedResourceWithHeaders
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.