language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
getsentry__sentry
|
tests/sentry/issues/endpoints/test_group_tombstone.py
|
{
"start": 184,
"end": 1463
}
|
class ____(APITestCase):
def test_simple(self) -> None:
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
group = self.create_group(project=self.project)
tombstone = GroupTombstone.objects.create(
project_id=group.project_id,
level=group.level,
message=group.message,
culprit=group.culprit,
data=group.data,
previous_group_id=group.id,
)
GroupHash.objects.create(
project=group.project, hash="x" * 32, group=group, group_tombstone_id=tombstone.id
)
path = reverse(
"sentry-api-0-group-tombstones",
kwargs={
"organization_id_or_slug": self.org.slug,
"project_id_or_slug": self.project.slug,
},
)
response = self.client.get(path)
assert response.status_code == 200, response
assert response.data[0]["message"] == group.message
|
GroupTombstoneTest
|
python
|
getsentry__sentry
|
tests/sentry/api/endpoints/test_oauth_userinfo.py
|
{
"start": 256,
"end": 3997
}
|
class ____(APITestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.path = reverse(
"sentry-api-0-oauth-userinfo",
)
self.client = APIClient()
def test_requires_access_token(self) -> None:
response = self.client.get(self.path)
assert response.status_code == 400
assert response.data["detail"]["code"] == "parameter-validation-error"
assert (
response.data["detail"]["message"] == "Bearer token not found in authorization header"
)
def test_declines_invalid_token(self) -> None:
self.client.credentials(HTTP_AUTHORIZATION="Bearer abcd")
response = self.client.get(self.path)
assert response.status_code == 404
assert response.data["detail"] == "Access token not found"
def test_declines_if_no_openid_scope(self) -> None:
token_without_openid_scope = ApiToken.objects.create(user=self.user, scope_list=[])
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + token_without_openid_scope.token)
response = self.client.get(self.path)
assert response.status_code == 403
assert response.data["detail"]["code"] == "insufficient-scope"
assert response.data["detail"]["message"] == "openid scope is required for userinfo access"
def test_gets_sub_with_openid_scope(self) -> None:
"""
Ensures we get `sub`, and only `sub`, if the only scope is openid.
"""
openid_only_token = ApiToken.objects.create(user=self.user, scope_list=["openid"])
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + openid_only_token.token)
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data == {"sub": self.user.id}
def test_gets_email_information(self) -> None:
email_token = ApiToken.objects.create(user=self.user, scope_list=["openid", "email"])
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + email_token.token)
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data == {
"sub": self.user.id,
"email": self.user.email,
"email_verified": True,
}
def test_gets_profile_information(self) -> None:
profile_token = ApiToken.objects.create(user=self.user, scope_list=["openid", "profile"])
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + profile_token.token)
response = self.client.get(self.path)
assert response.status_code == 200
assert response.data["avatar_type"] == 0
assert response.data["avatar_url"] is None
assert isinstance(response.data["date_joined"], datetime.datetime)
assert response.data["name"] == ""
assert response.data["sub"] == self.user.id
def test_gets_multiple_scopes(self) -> None:
all_access_token = ApiToken.objects.create(
user=self.user, scope_list=["openid", "profile", "email"]
)
self.client.credentials(HTTP_AUTHORIZATION="Bearer " + all_access_token.token)
response = self.client.get(self.path)
assert response.status_code == 200
# profile information
assert response.data["avatar_type"] == 0
assert response.data["avatar_url"] is None
assert isinstance(response.data["date_joined"], datetime.datetime)
assert response.data["name"] == ""
# email information
assert response.data["email"] == self.user.email
assert response.data["email_verified"]
# openid information
assert response.data["sub"] == self.user.id
|
OAuthUserInfoTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/util/options_test.py
|
{
"start": 978,
"end": 1268
}
|
class ____(options.OptionsBase):
x = options.create_option(
name="x",
ty=int,
docstring="the answer to everything",
default_factory=lambda: 42)
y = options.create_option(
name="y", ty=float, docstring="a tasty pie", default_factory=lambda: 3.14)
|
_TestOptions
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/postprocessor/structured_llm_rerank.py
|
{
"start": 2368,
"end": 8282
}
|
class ____(BaseNodePostprocessor):
"""Structured LLM-based reranker."""
top_n: int = Field(description="Top N nodes to return.")
choice_select_prompt: SerializeAsAny[BasePromptTemplate] = Field(
description="Choice select prompt."
)
choice_batch_size: int = Field(description="Batch size for choice select.")
llm: LLM = Field(description="The LLM to rerank with.")
_document_relevance_list_cls: type = PrivateAttr()
_format_node_batch_fn: Callable = PrivateAttr()
_parse_choice_select_answer_fn: Callable = PrivateAttr()
_raise_on_prediction_failure: bool = PrivateAttr()
def __init__(
self,
llm: Optional[LLM] = None,
choice_select_prompt: Optional[BasePromptTemplate] = None,
choice_batch_size: int = 10,
format_node_batch_fn: Optional[Callable] = None,
parse_choice_select_answer_fn: Optional[Callable] = None,
document_relevance_list_cls: Optional[type] = None,
raise_on_structured_prediction_failure: bool = True,
top_n: int = 10,
) -> None:
choice_select_prompt = choice_select_prompt or STRUCTURED_CHOICE_SELECT_PROMPT
llm = llm or Settings.llm
if not llm.metadata.is_function_calling_model:
logger.warning(
"StructuredLLMRerank constructed with a non-function-calling LLM. This may not work as expected."
)
super().__init__(
llm=llm,
choice_select_prompt=choice_select_prompt,
choice_batch_size=choice_batch_size,
top_n=top_n,
)
self._format_node_batch_fn = (
format_node_batch_fn or default_format_node_batch_fn
)
self._parse_choice_select_answer_fn = (
parse_choice_select_answer_fn
or default_parse_structured_choice_select_answer
)
self._document_relevance_list_cls = (
document_relevance_list_cls or DocumentRelevanceList
)
self._raise_on_structured_prediction_failure = (
raise_on_structured_prediction_failure
)
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
return {"choice_select_prompt": self.choice_select_prompt}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
if "choice_select_prompt" in prompts:
self.choice_select_prompt = prompts["choice_select_prompt"]
@classmethod
def class_name(cls) -> str:
return "StructuredLLMRerank"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
dispatcher.event(
ReRankStartEvent(
query=query_bundle,
nodes=nodes,
top_n=self.top_n,
model_name=self.llm.metadata.model_name,
)
)
if query_bundle is None:
raise ValueError("Query bundle must be provided.")
if len(nodes) == 0:
return []
initial_results: List[NodeWithScore] = []
with self.callback_manager.event(
CBEventType.RERANKING,
payload={
EventPayload.NODES: nodes,
EventPayload.MODEL_NAME: self.llm.metadata.model_name,
EventPayload.QUERY_STR: query_bundle.query_str,
EventPayload.TOP_K: self.top_n,
},
) as event:
for idx in range(0, len(nodes), self.choice_batch_size):
nodes_batch = [
node.node for node in nodes[idx : idx + self.choice_batch_size]
]
query_str = query_bundle.query_str
fmt_batch_str = self._format_node_batch_fn(nodes_batch)
# call each batch independently
result: Union[BaseModel, str] = self.llm.structured_predict(
output_cls=self._document_relevance_list_cls,
prompt=self.choice_select_prompt,
context_str=fmt_batch_str,
query_str=query_str,
)
# in case structured prediction fails, a str of the raised exception is returned
if isinstance(result, str):
if self._raise_on_structured_prediction_failure:
raise ValueError(
f"Structured prediction failed for nodes {idx} - {idx + self.choice_batch_size}: {result}"
)
logger.warning(
f"Structured prediction failed for nodes {idx} - {idx + self.choice_batch_size}: {result}"
)
# add all nodes with score 0
initial_results.extend(
[NodeWithScore(node=node, score=0.0) for node in nodes_batch]
)
continue
raw_choices, relevances = self._parse_choice_select_answer_fn(
result, len(nodes_batch)
)
choice_idxs = [int(choice) - 1 for choice in raw_choices]
choice_nodes = [nodes_batch[idx] for idx in choice_idxs]
relevances = relevances or [1.0 for _ in choice_nodes]
initial_results.extend(
[
NodeWithScore(node=node, score=relevance)
for node, relevance in zip(choice_nodes, relevances)
]
)
reranked_nodes = sorted(
initial_results, key=lambda x: x.score or 0.0, reverse=True
)[: self.top_n]
event.on_end(payload={EventPayload.NODES: reranked_nodes})
dispatcher.event(ReRankEndEvent(nodes=reranked_nodes))
return reranked_nodes
|
StructuredLLMRerank
|
python
|
apache__airflow
|
providers/common/compat/tests/unit/common/compat/lineage/test_hook.py
|
{
"start": 11871,
"end": 22637
}
|
class ____:
def test_add_asset_basic_functionality(self, collector):
"""Test basic add_input_asset and add_output_asset functionality."""
mock_context = mock.MagicMock()
collector.add_input_asset(mock_context, uri="s3://bucket/input-file")
collector.add_output_asset(mock_context, uri="s3://bucket/output-file")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == "s3://bucket/input-file"
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert len(lineage.outputs) == 1
assert lineage.outputs[0].asset.uri == "s3://bucket/output-file"
assert lineage.outputs[0].count == 1
assert lineage.outputs[0].context == mock_context
def test_add_asset_count_tracking(self, collector):
"""Test that duplicate assets are counted correctly."""
mock_context = mock.MagicMock()
# Add same input multiple times
collector.add_input_asset(mock_context, uri="s3://bucket/input")
collector.add_input_asset(mock_context, uri="s3://bucket/input")
collector.add_input_asset(mock_context, uri="s3://bucket/input")
# Add same output multiple times
collector.add_output_asset(mock_context, uri="s3://bucket/output")
collector.add_output_asset(mock_context, uri="s3://bucket/output")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == "s3://bucket/input"
assert lineage.inputs[0].count == 3
assert lineage.inputs[0].context == mock_context
assert len(lineage.outputs) == 1
assert lineage.outputs[0].asset.uri == "s3://bucket/output"
assert lineage.outputs[0].count == 2
assert lineage.outputs[0].context == mock_context
def test_add_asset_different_uris(self, collector):
"""Test that different URIs are tracked separately."""
mock_context = mock.MagicMock()
# Add different input URIs
collector.add_input_asset(mock_context, uri="s3://bucket/file1")
collector.add_input_asset(mock_context, uri="s3://bucket/file2")
collector.add_input_asset(mock_context, uri="postgres://example.com:5432/database/default/table")
# Add different output URIs
collector.add_output_asset(mock_context, uri="s3://output/file1")
collector.add_output_asset(mock_context, uri="s3://output/file2")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 3
assert lineage.inputs[0].asset.uri == "s3://bucket/file1"
assert lineage.inputs[1].asset.uri == "s3://bucket/file2"
assert lineage.inputs[2].asset.uri == "postgres://example.com:5432/database/default/table"
assert len(lineage.outputs) == 2
assert lineage.outputs[0].asset.uri == "s3://output/file1"
assert lineage.outputs[1].asset.uri == "s3://output/file2"
def test_add_asset_different_contexts(self, collector):
"""Test that different contexts are tracked separately."""
mock_context1 = mock.MagicMock()
mock_context2 = mock.MagicMock()
# Add same URI with different contexts
collector.add_input_asset(mock_context1, uri="s3://bucket/file")
collector.add_input_asset(mock_context2, uri="s3://bucket/file")
collector.add_output_asset(mock_context1, uri="s3://output/file")
collector.add_output_asset(mock_context2, uri="s3://output/file")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 2
assert lineage.inputs[0].asset.uri == "s3://bucket/file"
assert lineage.inputs[0].context == mock_context1
assert lineage.inputs[0].count == 1
assert lineage.inputs[1].asset.uri == "s3://bucket/file"
assert lineage.inputs[1].context == mock_context2
assert lineage.inputs[1].count == 1
assert len(lineage.outputs) == 2
assert lineage.outputs[0].asset.uri == "s3://output/file"
assert lineage.outputs[0].context == mock_context1
assert lineage.outputs[0].count == 1
assert lineage.outputs[1].asset.uri == "s3://output/file"
assert lineage.outputs[1].context == mock_context2
assert lineage.outputs[1].count == 1
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3.0+")
def test_add_asset_with_name_and_group(self, collector):
"""Test adding assets with name and group parameters."""
mock_context = mock.MagicMock()
collector.add_input_asset(mock_context, uri="s3://bucket/file", name="my-input", group="input-group")
collector.add_output_asset(
mock_context, uri="s3://output/file", name="my-output", group="output-group"
)
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == "s3://bucket/file"
assert lineage.inputs[0].asset.name == "my-input"
assert lineage.inputs[0].asset.group == "input-group"
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert len(lineage.outputs) == 1
assert lineage.outputs[0].asset.uri == "s3://output/file"
assert lineage.outputs[0].asset.name == "my-output"
assert lineage.outputs[0].asset.group == "output-group"
assert lineage.outputs[0].count == 1
assert lineage.outputs[0].context == mock_context
def test_add_asset_with_extra_metadata(self, collector):
"""Test adding assets with extra metadata."""
mock_context = mock.MagicMock()
collector.add_input_asset(
mock_context,
uri="postgres://example.com:5432/database/default/table",
asset_extra={"schema": "public", "table": "users"},
)
collector.add_output_asset(
mock_context,
uri="postgres://example.com:5432/database/default/table",
asset_extra={"schema": "public", "table": "results"},
)
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 1
assert lineage.inputs[0].asset.uri == "postgres://example.com:5432/database/default/table"
assert lineage.inputs[0].asset.extra == {"schema": "public", "table": "users"}
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert len(lineage.outputs) == 1
assert lineage.outputs[0].asset.uri == "postgres://example.com:5432/database/default/table"
assert lineage.outputs[0].asset.extra == {"schema": "public", "table": "results"}
assert lineage.outputs[0].count == 1
assert lineage.outputs[0].context == mock_context
def test_add_asset_different_extra_values(self, collector):
"""Test that assets with different extra values are tracked separately."""
mock_context = mock.MagicMock()
# Same URI but different extra metadata
collector.add_input_asset(mock_context, uri="s3://bucket/file", asset_extra={"version": "1"})
collector.add_input_asset(mock_context, uri="s3://bucket/file", asset_extra={"version": "2"})
collector.add_output_asset(mock_context, uri="s3://output/file", asset_extra={"format": "parquet"})
collector.add_output_asset(mock_context, uri="s3://output/file", asset_extra={"format": "csv"})
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == 2
assert lineage.inputs[0].asset.uri == "s3://bucket/file"
assert lineage.inputs[0].asset.extra == {"version": "1"}
assert lineage.inputs[0].count == 1
assert lineage.inputs[0].context == mock_context
assert lineage.inputs[1].asset.uri == "s3://bucket/file"
assert lineage.inputs[1].asset.extra == {"version": "2"}
assert lineage.inputs[1].count == 1
assert lineage.inputs[1].context == mock_context
assert len(lineage.outputs) == 2
assert lineage.outputs[0].asset.uri == "s3://output/file"
assert lineage.outputs[0].asset.extra == {"format": "parquet"}
assert lineage.outputs[0].count == 1
assert lineage.outputs[0].context == mock_context
assert lineage.outputs[1].asset.uri == "s3://output/file"
assert lineage.outputs[1].asset.extra == {"format": "csv"}
assert lineage.outputs[1].count == 1
assert lineage.outputs[1].context == mock_context
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3.0+")
def test_add_asset_max_limit_af3(self, collector):
"""Test that asset operations respect maximum limit."""
mock_context = mock.MagicMock()
max_limit = 100
added_assets = max_limit + 50
# Limitation on collected assets was added in AF3 #45798
expected_number = max_limit
# Add more than max allowed inputs
for i in range(added_assets):
collector.add_input_asset(mock_context, uri=f"s3://bucket/input-{i}")
# Add more than max allowed outputs
for i in range(added_assets):
collector.add_output_asset(mock_context, uri=f"s3://bucket/output-{i}")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == expected_number
assert len(lineage.outputs) == expected_number
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Test requires < Airflow 3.0")
def test_add_asset_max_limit_af2(self, collector):
"""Test that asset operations do not respect maximum limit."""
mock_context = mock.MagicMock()
max_limit = 100
added_assets = max_limit + 50
# Limitation on collected assets was added in AF3 #45798
expected_number = added_assets
# Add more than max allowed inputs
for i in range(added_assets):
collector.add_input_asset(mock_context, uri=f"s3://bucket/input-{i}")
# Add more than max allowed outputs
for i in range(added_assets):
collector.add_output_asset(mock_context, uri=f"s3://bucket/output-{i}")
assert collector.has_collected
lineage = collector.collected_assets
assert len(lineage.inputs) == expected_number
assert len(lineage.outputs) == expected_number
|
TestCollectorAddAssets
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/sobol_ops_test.py
|
{
"start": 1186,
"end": 6877
}
|
class ____(test_util.TensorFlowTestCase):
def test_basic(self):
for dtype in [np.float64, np.float32]:
expected = np.array([[.5, .5], [.75, .25], [.25, .75], [.375, .375]])
sample = self.evaluate(math_ops.sobol_sample(2, 4, dtype=dtype))
self.assertAllClose(expected, sample, 0.001)
def test_more_known_values(self):
for dtype in [np.float64, np.float32]:
sample = math_ops.sobol_sample(5, 31, dtype=dtype)
expected = [[0.50, 0.50, 0.50, 0.50, 0.50],
[0.75, 0.25, 0.25, 0.25, 0.75],
[0.25, 0.75, 0.75, 0.75, 0.25],
[0.375, 0.375, 0.625, 0.875, 0.375],
[0.875, 0.875, 0.125, 0.375, 0.875],
[0.625, 0.125, 0.875, 0.625, 0.625],
[0.125, 0.625, 0.375, 0.125, 0.125],
[0.1875, 0.3125, 0.9375, 0.4375, 0.5625],
[0.6875, 0.8125, 0.4375, 0.9375, 0.0625],
[0.9375, 0.0625, 0.6875, 0.1875, 0.3125],
[0.4375, 0.5625, 0.1875, 0.6875, 0.8125],
[0.3125, 0.1875, 0.3125, 0.5625, 0.9375],
[0.8125, 0.6875, 0.8125, 0.0625, 0.4375],
[0.5625, 0.4375, 0.0625, 0.8125, 0.1875],
[0.0625, 0.9375, 0.5625, 0.3125, 0.6875],
[0.09375, 0.46875, 0.46875, 0.65625, 0.28125],
[0.59375, 0.96875, 0.96875, 0.15625, 0.78125],
[0.84375, 0.21875, 0.21875, 0.90625, 0.53125],
[0.34375, 0.71875, 0.71875, 0.40625, 0.03125],
[0.46875, 0.09375, 0.84375, 0.28125, 0.15625],
[0.96875, 0.59375, 0.34375, 0.78125, 0.65625],
[0.71875, 0.34375, 0.59375, 0.03125, 0.90625],
[0.21875, 0.84375, 0.09375, 0.53125, 0.40625],
[0.15625, 0.15625, 0.53125, 0.84375, 0.84375],
[0.65625, 0.65625, 0.03125, 0.34375, 0.34375],
[0.90625, 0.40625, 0.78125, 0.59375, 0.09375],
[0.40625, 0.90625, 0.28125, 0.09375, 0.59375],
[0.28125, 0.28125, 0.15625, 0.21875, 0.71875],
[0.78125, 0.78125, 0.65625, 0.71875, 0.21875],
[0.53125, 0.03125, 0.40625, 0.46875, 0.46875],
[0.03125, 0.53125, 0.90625, 0.96875, 0.96875]]
self.assertAllClose(expected, self.evaluate(sample), .001)
def test_skip(self):
dim = 10
n = 50
skip = 17
for dtype in [np.float64, np.float32]:
sample_noskip = math_ops.sobol_sample(dim, n + skip, dtype=dtype)
sample_skip = math_ops.sobol_sample(dim, n, skip, dtype=dtype)
self.assertAllClose(
self.evaluate(sample_noskip)[skip:, :], self.evaluate(sample_skip))
def test_static_shape(self):
s = math_ops.sobol_sample(10, 100, dtype=np.float32)
self.assertAllEqual([100, 10], s.shape.as_list())
def test_static_shape_using_placeholder_for_dim(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)])
def f(dim):
s = math_ops.sobol_sample(dim, 100, dtype=dtypes.float32)
assert s.shape.as_list() == [100, None]
return s
self.assertAllEqual([100, 10], self.evaluate(f(10)).shape)
def test_static_shape_using_placeholder_for_num_results(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)])
def f(num_results):
s = math_ops.sobol_sample(10, num_results, dtype=dtypes.float32)
assert s.shape.as_list() == [None, 10]
return s
self.assertAllEqual([100, 10], self.evaluate(f(100)).shape)
def test_static_shape_using_only_placeholders(self):
@def_function.function(
input_signature=[tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32)] *
2)
def f(dim, num_results):
s = math_ops.sobol_sample(dim, num_results, dtype=dtypes.float32)
assert s.shape.as_list() == [None, None]
return s
self.assertAllEqual([100, 10], self.evaluate(f(10, 100)).shape)
def test_dynamic_shape(self):
s = math_ops.sobol_sample(10, 100, dtype=dtypes.float32)
self.assertAllEqual([100, 10], self.evaluate(s).shape)
def test_default_dtype(self):
# Create an op without specifying the dtype. Dtype should be float32 in
# this case.
s = math_ops.sobol_sample(10, 100)
self.assertEqual(dtypes.float32, s.dtype)
@test_util.run_in_graph_and_eager_modes
def test_non_scalar_input(self):
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
r'Shape must be rank 0 but is rank 1|'
r'\w+ must be a scalar'):
self.evaluate(gen_math_ops.sobol_sample(
dim=7,
num_results=constant_op.constant([1, 0]),
skip=constant_op.constant([1])))
@test_util.run_in_graph_and_eager_modes
def test_dim_num_results_overflow(self):
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
r'num_results\*dim must be less than 2147483647'):
self.evaluate(
gen_math_ops.sobol_sample(
dim=2560, num_results=16384000, skip=0, dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes
def test_num_results_skip_overflow(self):
with self.assertRaisesRegex(
(ValueError, errors.InvalidArgumentError),
r'num_results\+skip must be less than 2147483647',
):
self.evaluate(
gen_math_ops.sobol_sample(
dim=1, num_results=1, skip=2147483647, dtype=dtypes.float32
)
)
if __name__ == '__main__':
googletest.main()
|
SobolSampleOpTest
|
python
|
google__jax
|
jax/_src/numpy/einsum.py
|
{
"start": 1190,
"end": 24373
}
|
class ____(opt_einsum.paths.PathOptimizer):
"""Unoptimized path for einsum."""
def __call__(self, inputs, *args, **kwargs):
return [(0, 1)] * (len(inputs) - 1)
@overload
def einsum(
subscript: str, /,
*operands: ArrayLike,
out: None = None,
optimize: str | bool | list[tuple[int, ...]] = "auto",
precision: lax.PrecisionLike = None,
preferred_element_type: DTypeLike | None = None,
_dot_general: Callable[..., Array] = lax.dot_general,
out_sharding=None,
) -> Array: ...
@overload
def einsum(
arr: ArrayLike,
axes: Sequence[Any], /,
*operands: ArrayLike | Sequence[Any],
out: None = None,
optimize: str | bool | list[tuple[int, ...]] = "auto",
precision: lax.PrecisionLike = None,
preferred_element_type: DTypeLike | None = None,
_dot_general: Callable[..., Array] = lax.dot_general,
out_sharding=None,
) -> Array: ...
@export
def einsum(
subscripts, /,
*operands,
out: None = None,
optimize: str | bool | list[tuple[int, ...]] = "auto",
precision: lax.PrecisionLike = None,
preferred_element_type: DTypeLike | None = None,
_dot_general: Callable[..., Array] = lax.dot_general,
out_sharding=None,
) -> Array:
"""Einstein summation
JAX implementation of :func:`numpy.einsum`.
``einsum`` is a powerful and generic API for computing various reductions,
inner products, outer products, axis reorderings, and combinations thereof
across one or more input arrays. It has a somewhat complicated overloaded API;
the arguments below reflect the most common calling convention. The Examples
section below demonstrates some of the alternative calling conventions.
Args:
subscripts: string containing axes names separated by commas.
*operands: sequence of one or more arrays corresponding to the subscripts.
optimize: specify how to optimize the order of computation. In JAX this defaults
to ``"auto"`` which produces optimized expressions via the opt_einsum_
package. Other options are ``True`` (same as ``"optimal"``), ``False``
(unoptimized), or any string supported by ``opt_einsum``, which
includes ``"optimal"``, ``"greedy"``, ``"eager"``, and others. It may also
be a pre-computed path (see :func:`~jax.numpy.einsum_path`).
precision: either ``None`` (default), which means the default precision for
the backend, a :class:`~jax.lax.Precision` enum value (``Precision.DEFAULT``,
``Precision.HIGH`` or ``Precision.HIGHEST``).
preferred_element_type: either ``None`` (default), which means the default
accumulation type for the input types, or a datatype, indicating to
accumulate results to and return a result with that datatype.
out: unsupported by JAX
_dot_general: optionally override the ``dot_general`` callable used by ``einsum``.
This parameter is experimental, and may be removed without warning at any time.
Returns:
array containing the result of the einstein summation.
See also:
:func:`jax.numpy.einsum_path`
Examples:
The mechanics of ``einsum`` are perhaps best demonstrated by example. Here we
show how to use ``einsum`` to compute a number of quantities from one or more
arrays. For more discussion and examples of ``einsum``, see the documentation
of :func:`numpy.einsum`.
>>> M = jnp.arange(16).reshape(4, 4)
>>> x = jnp.arange(4)
>>> y = jnp.array([5, 4, 3, 2])
**Vector product**
>>> jnp.einsum('i,i', x, y)
Array(16, dtype=int32)
>>> jnp.vecdot(x, y)
Array(16, dtype=int32)
Here are some alternative ``einsum`` calling conventions to compute the same
result:
>>> jnp.einsum('i,i->', x, y) # explicit form
Array(16, dtype=int32)
>>> jnp.einsum(x, (0,), y, (0,)) # implicit form via indices
Array(16, dtype=int32)
>>> jnp.einsum(x, (0,), y, (0,), ()) # explicit form via indices
Array(16, dtype=int32)
**Matrix product**
>>> jnp.einsum('ij,j->i', M, x) # explicit form
Array([14, 38, 62, 86], dtype=int32)
>>> jnp.matmul(M, x)
Array([14, 38, 62, 86], dtype=int32)
Here are some alternative ``einsum`` calling conventions to compute the same
result:
>>> jnp.einsum('ij,j', M, x) # implicit form
Array([14, 38, 62, 86], dtype=int32)
>>> jnp.einsum(M, (0, 1), x, (1,), (0,)) # explicit form via indices
Array([14, 38, 62, 86], dtype=int32)
>>> jnp.einsum(M, (0, 1), x, (1,)) # implicit form via indices
Array([14, 38, 62, 86], dtype=int32)
**Outer product**
>>> jnp.einsum("i,j->ij", x, y)
Array([[ 0, 0, 0, 0],
[ 5, 4, 3, 2],
[10, 8, 6, 4],
[15, 12, 9, 6]], dtype=int32)
>>> jnp.outer(x, y)
Array([[ 0, 0, 0, 0],
[ 5, 4, 3, 2],
[10, 8, 6, 4],
[15, 12, 9, 6]], dtype=int32)
Some other ways of computing outer products:
>>> jnp.einsum("i,j", x, y) # implicit form
Array([[ 0, 0, 0, 0],
[ 5, 4, 3, 2],
[10, 8, 6, 4],
[15, 12, 9, 6]], dtype=int32)
>>> jnp.einsum(x, (0,), y, (1,), (0, 1)) # explicit form via indices
Array([[ 0, 0, 0, 0],
[ 5, 4, 3, 2],
[10, 8, 6, 4],
[15, 12, 9, 6]], dtype=int32)
>>> jnp.einsum(x, (0,), y, (1,)) # implicit form via indices
Array([[ 0, 0, 0, 0],
[ 5, 4, 3, 2],
[10, 8, 6, 4],
[15, 12, 9, 6]], dtype=int32)
**1D array sum**
>>> jnp.einsum("i->", x) # requires explicit form
Array(6, dtype=int32)
>>> jnp.einsum(x, (0,), ()) # explicit form via indices
Array(6, dtype=int32)
>>> jnp.sum(x)
Array(6, dtype=int32)
**Sum along an axis**
>>> jnp.einsum("...j->...", M) # requires explicit form
Array([ 6, 22, 38, 54], dtype=int32)
>>> jnp.einsum(M, (..., 0), (...,)) # explicit form via indices
Array([ 6, 22, 38, 54], dtype=int32)
>>> M.sum(-1)
Array([ 6, 22, 38, 54], dtype=int32)
**Matrix transpose**
>>> y = jnp.array([[1, 2, 3],
... [4, 5, 6]])
>>> jnp.einsum("ij->ji", y) # explicit form
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
>>> jnp.einsum("ji", y) # implicit form
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
>>> jnp.einsum(y, (1, 0)) # implicit form via indices
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
>>> jnp.einsum(y, (0, 1), (1, 0)) # explicit form via indices
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
>>> jnp.transpose(y)
Array([[1, 4],
[2, 5],
[3, 6]], dtype=int32)
**Matrix diagonal**
>>> jnp.einsum("ii->i", M)
Array([ 0, 5, 10, 15], dtype=int32)
>>> jnp.diagonal(M)
Array([ 0, 5, 10, 15], dtype=int32)
**Matrix trace**
>>> jnp.einsum("ii", M)
Array(30, dtype=int32)
>>> jnp.trace(M)
Array(30, dtype=int32)
**Tensor products**
>>> x = jnp.arange(30).reshape(2, 3, 5)
>>> y = jnp.arange(60).reshape(3, 4, 5)
>>> jnp.einsum('ijk,jlk->il', x, y) # explicit form
Array([[ 3340, 3865, 4390, 4915],
[ 8290, 9940, 11590, 13240]], dtype=int32)
>>> jnp.tensordot(x, y, axes=[(1, 2), (0, 2)])
Array([[ 3340, 3865, 4390, 4915],
[ 8290, 9940, 11590, 13240]], dtype=int32)
>>> jnp.einsum('ijk,jlk', x, y) # implicit form
Array([[ 3340, 3865, 4390, 4915],
[ 8290, 9940, 11590, 13240]], dtype=int32)
>>> jnp.einsum(x, (0, 1, 2), y, (1, 3, 2), (0, 3)) # explicit form via indices
Array([[ 3340, 3865, 4390, 4915],
[ 8290, 9940, 11590, 13240]], dtype=int32)
>>> jnp.einsum(x, (0, 1, 2), y, (1, 3, 2)) # implicit form via indices
Array([[ 3340, 3865, 4390, 4915],
[ 8290, 9940, 11590, 13240]], dtype=int32)
**Chained dot products**
>>> w = jnp.arange(5, 9).reshape(2, 2)
>>> x = jnp.arange(6).reshape(2, 3)
>>> y = jnp.arange(-2, 4).reshape(3, 2)
>>> z = jnp.array([[2, 4, 6], [3, 5, 7]])
>>> jnp.einsum('ij,jk,kl,lm->im', w, x, y, z)
Array([[ 481, 831, 1181],
[ 651, 1125, 1599]], dtype=int32)
>>> jnp.einsum(w, (0, 1), x, (1, 2), y, (2, 3), z, (3, 4)) # implicit, via indices
Array([[ 481, 831, 1181],
[ 651, 1125, 1599]], dtype=int32)
>>> w @ x @ y @ z # direct chain of matmuls
Array([[ 481, 831, 1181],
[ 651, 1125, 1599]], dtype=int32)
>>> jnp.linalg.multi_dot([w, x, y, z])
Array([[ 481, 831, 1181],
[ 651, 1125, 1599]], dtype=int32)
.. _opt_einsum: https://github.com/dgasmith/opt_einsum
"""
operands = (subscripts, *operands)
if out is not None:
raise NotImplementedError("The 'out' argument to jnp.einsum is not supported.")
spec = operands[0] if isinstance(operands[0], str) else None
path_type = 'optimal' if optimize is True else Unoptimized() if optimize is False else optimize
# Extract __jax_array__ before passing to contract_path()
operands = tuple(op.__jax_array__() if hasattr(op, "__jax_array__") else op
for op in operands)
# Allow handling of shape polymorphism
non_constant_dim_types = {
type(d) for op in operands if not isinstance(op, str)
for d in np.shape(op) if not core.is_constant_dim(d)
}
if not non_constant_dim_types:
contract_path = opt_einsum.contract_path
else:
ty = next(iter(non_constant_dim_types))
contract_path = _poly_einsum_handlers.get(ty, _default_poly_einsum_handler)
# using einsum_call=True here is an internal api for opt_einsum... sorry
operands, contractions = contract_path(
*operands, einsum_call=True, use_blas=True, optimize=path_type)
contractions = tuple((a, frozenset(b), c) for a, b, c, *_ in contractions) # pytype: disable=attribute-error
num_contractions = len(contractions)
out_sharding = canonicalize_sharding(out_sharding, 'einsum')
if out_sharding is not None and not isinstance(out_sharding, NamedSharding):
raise NotImplementedError(
"`out_sharding` argument of `einsum` only supports NamedSharding"
" instances.")
jit_einsum = api.jit(_einsum, static_argnums=(1, 2, 3, 4, 5), inline=True)
if spec is not None:
jit_einsum = api.named_call(jit_einsum, name=spec)
operand_arrays = list(util.ensure_arraylike_tuple("einsum", operands))
if num_contractions > 1 and out_sharding is not None:
# TODO(yashkatariya): If the out_sharding is unreduced, figure out a way to
# run the dot_general unreduced_rule on these einsums because right now we
# drop into Auto mode skipping the checks happening in the rule.
return auto_axes(
jit_einsum,
axes=out_sharding.mesh.explicit_axes,
out_sharding=out_sharding,
)(operand_arrays, contractions=contractions, precision=precision,
preferred_element_type=preferred_element_type, _dot_general=_dot_general,
out_sharding=None)
else:
return jit_einsum(operand_arrays, contractions, precision,
preferred_element_type, _dot_general, out_sharding)
# Enable other modules to override einsum_contact_path.
# Indexed by the type of the non constant dimension
_poly_einsum_handlers = {} # type: ignore
def _default_poly_einsum_handler(*operands, **kwargs):
dummy = collections.namedtuple('dummy', ['shape', 'dtype'])
dummies = [dummy(tuple(d if type(d) is int else 8 for d in x.shape), x.dtype)
if hasattr(x, 'dtype') else x for x in operands]
mapping = {id(d): i for i, d in enumerate(dummies)}
out_dummies, contractions = opt_einsum.contract_path(*dummies, **kwargs)
contract_operands = [operands[mapping[id(d)]] for d in out_dummies]
return contract_operands, contractions
@overload
def einsum_path(
subscripts: str, /,
*operands: ArrayLike,
optimize: bool | str | list[tuple[int, ...]] = ...,
) -> tuple[list[tuple[int, ...]], Any]: ...
@overload
def einsum_path(
arr: ArrayLike,
axes: Sequence[Any], /,
*operands: ArrayLike | Sequence[Any],
optimize: bool | str | list[tuple[int, ...]] = ...,
) -> tuple[list[tuple[int, ...]], Any]: ...
@export
def einsum_path(
subscripts, /,
*operands,
optimize: bool | str | list[tuple[int, ...]] = 'auto'
) -> tuple[list[tuple[int, ...]], Any]:
"""Evaluates the optimal contraction path without evaluating the einsum.
JAX implementation of :func:`numpy.einsum_path`. This function calls into
the opt_einsum_ package, and makes use of its optimization routines.
Args:
subscripts: string containing axes names separated by commas.
*operands: sequence of one or more arrays corresponding to the subscripts.
optimize: specify how to optimize the order of computation. In JAX this defaults
to ``"auto"``. Other options are ``True`` (same as ``"optimize"``), ``False``
(unoptimized), or any string supported by ``opt_einsum``, which
includes ``"optimize"``,, ``"greedy"``, ``"eager"``, and others.
Returns:
A tuple containing the path that may be passed to :func:`~jax.numpy.einsum`, and a
printable object representing this optimal path.
Examples:
>>> key1, key2, key3 = jax.random.split(jax.random.key(0), 3)
>>> x = jax.random.randint(key1, minval=-5, maxval=5, shape=(2, 3))
>>> y = jax.random.randint(key2, minval=-5, maxval=5, shape=(3, 100))
>>> z = jax.random.randint(key3, minval=-5, maxval=5, shape=(100, 5))
>>> path, path_info = jnp.einsum_path("ij,jk,kl", x, y, z, optimize="optimal")
>>> print(path)
[(1, 2), (0, 1)]
>>> print(path_info)
Complete contraction: ij,jk,kl->il
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 9.000e+3
Optimized FLOP count: 3.060e+3
Theoretical speedup: 2.941e+0
Largest intermediate: 1.500e+1 elements
--------------------------------------------------------------------------------
scaling BLAS current remaining
--------------------------------------------------------------------------------
3 GEMM kl,jk->lj ij,lj->il
3 GEMM lj,ij->il il->il
Use the computed path in :func:`~jax.numpy.einsum`:
>>> jnp.einsum("ij,jk,kl", x, y, z, optimize=path)
Array([[-754, 324, -142, 82, 50],
[ 408, -50, 87, -29, 7]], dtype=int32)
.. _opt_einsum: https://github.com/dgasmith/opt_einsum
"""
if isinstance(optimize, bool):
optimize = 'optimal' if optimize else Unoptimized()
return opt_einsum.contract_path(subscripts, *operands, optimize=optimize)
def _removechars(s, chars):
return s.translate(str.maketrans(dict.fromkeys(chars)))
def _einsum(
operands: list[Array],
contractions: Sequence[tuple[tuple[int, ...], frozenset[str], str]],
precision,
preferred_element_type,
_dot_general=lax.dot_general,
out_sharding=None,
):
if preferred_element_type is None:
preferred_element_type, output_weak_type = dtypes.result_type(
*operands, return_weak_type_flag=True)
else:
preferred_element_type = dtypes.check_and_canonicalize_user_dtype(
preferred_element_type, 'einsum'
)
output_weak_type = False
def sum(x, axes):
if dtypes.result_type(x, preferred_element_type) != x.dtype:
x = x.astype(preferred_element_type)
return lax.reduce(
x, np.array(0, x.dtype), lax.add if x.dtype != bool else lax.bitwise_or,
axes, out_sharding)
def sum_uniques(operand, names, uniques):
if uniques:
axes = [names.index(name) for name in uniques]
operand = sum(operand, axes)
names = _removechars(names, uniques)
return operand, names
def sum_repeats(operand, names, counts, keep_names):
for name, count in counts.items():
if count > 1:
axes = [i for i, n in enumerate(names) if n == name]
eye = lax._delta(np.dtype('bool'), operand.shape, axes)
operand = lax.select(eye, operand, lax.full_like(operand, 0))
if name not in keep_names:
operand = sum(operand, axes)
names = names.replace(name, '')
else:
operand = sum(operand, axes[:-1])
names = names.replace(name, '', count - 1)
return operand, names
def filter_singleton_dims(operand, names, other_shape, other_names):
eq = core.definitely_equal
keep = [not eq(operand.shape[i], 1) or j == -1 or eq(other_shape[j], 1)
for i, j in enumerate(map(other_names.find, names))]
sqez_axes, keep_axes = partition_list(keep, list(range(operand.ndim)))
return lax.squeeze(operand, sqez_axes), "".join(names[i] for i in keep_axes)
for operand_indices, contracted_names_set, einstr in contractions:
contracted_names = sorted(contracted_names_set)
input_str, result_names = einstr.split('->')
input_names = input_str.split(',')
# switch on the number of operands to be processed in this loop iteration.
# every case here sets 'operand' and 'names'.
if len(operand_indices) == 1:
operand = operands.pop(operand_indices[0])
names, = input_names
counts = collections.Counter(names)
# sum out unique contracted indices with a single reduce-sum
uniques = [name for name in contracted_names if counts[name] == 1]
operand, names = sum_uniques(operand, names, uniques)
# for every repeated index, do a contraction against an identity matrix
operand, names = sum_repeats(operand, names, counts, result_names)
elif len(operand_indices) == 2:
lhs, rhs = map(operands.pop, operand_indices)
lhs_names, rhs_names = input_names
# handle cases where one side of a contracting or batch dimension is 1
# but its counterpart is not.
lhs, lhs_names = filter_singleton_dims(lhs, lhs_names, np.shape(rhs),
rhs_names)
rhs, rhs_names = filter_singleton_dims(rhs, rhs_names, np.shape(lhs),
lhs_names)
lhs_counts = collections.Counter(lhs_names)
rhs_counts = collections.Counter(rhs_names)
# sum out unique contracted indices in lhs and rhs
lhs_uniques = [name for name in contracted_names
if lhs_counts[name] == 1 and rhs_counts[name] == 0]
lhs, lhs_names = sum_uniques(lhs, lhs_names, lhs_uniques)
rhs_uniques = [name for name in contracted_names
if rhs_counts[name] == 1 and lhs_counts[name] == 0]
rhs, rhs_names = sum_uniques(rhs, rhs_names, rhs_uniques)
# for every repeated index, contract against an identity matrix
lhs, lhs_names = sum_repeats(lhs, lhs_names, lhs_counts,
result_names + rhs_names)
rhs, rhs_names = sum_repeats(rhs, rhs_names, rhs_counts,
result_names + lhs_names)
lhs_or_rhs_names = set(lhs_names) | set(rhs_names)
contracted_names = [x for x in contracted_names if x in lhs_or_rhs_names]
lhs_and_rhs_names = set(lhs_names) & set(rhs_names)
batch_names = [x for x in result_names if x in lhs_and_rhs_names]
lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))
for n in batch_names)
# NOTE(mattjj): this can fail non-deterministically in python3, maybe
# due to opt_einsum
assert config.dynamic_shapes.value or all(
name in lhs_names and name in rhs_names and
lhs.shape[lhs_names.index(name)] == rhs.shape[rhs_names.index(name)]
for name in contracted_names), (
"Incompatible reduction dimensions: "
f"lhs.shape={lhs.shape} lhs_names={lhs_names} "
f"rhs.shape={rhs.shape} rhs_names={rhs_names}")
# contract using dot_general
batch_names_str = ''.join(batch_names)
lhs_cont, rhs_cont = unzip2((lhs_names.index(n), rhs_names.index(n))
for n in contracted_names)
deleted_names = batch_names_str + ''.join(contracted_names)
remaining_lhs_names = _removechars(lhs_names, deleted_names)
remaining_rhs_names = _removechars(rhs_names, deleted_names)
# Try both orders of lhs and rhs, in the hope that one of them means we
# don't need an explicit transpose. opt_einsum likes to contract from
# right to left, so we expect (rhs,lhs) to have the best chance of not
# needing a transpose.
names = batch_names_str + remaining_rhs_names + remaining_lhs_names
if names == result_names:
dimension_numbers = ((rhs_cont, lhs_cont), (rhs_batch, lhs_batch))
dot_out_sharding = ({} if out_sharding is None else
{'out_sharding': out_sharding})
operand = _dot_general(rhs, lhs, dimension_numbers, precision,
preferred_element_type=preferred_element_type,
**dot_out_sharding)
else:
names = batch_names_str + remaining_lhs_names + remaining_rhs_names
dimension_numbers = ((lhs_cont, rhs_cont), (lhs_batch, rhs_batch))
out_sharding = (_get_inverse_sharding(out_sharding, names, result_names)
if out_sharding is not None and names != result_names
else out_sharding)
dot_out_sharding = ({} if out_sharding is None else # type: ignore
{'out_sharding': out_sharding})
operand = _dot_general(lhs, rhs, dimension_numbers, precision,
preferred_element_type=preferred_element_type,
**dot_out_sharding)
else:
raise NotImplementedError(
"jax.numpy.einsum does not support simultaneous contraction of 3 or more"
" operands. Typically this means you've passed an unsupported path to"
" the einsum optimize parameter.")
# the resulting 'operand' with axis labels 'names' should be a permutation
# of the desired result
assert len(names) == len(result_names) == len(set(names))
assert set(names) == set(result_names)
if names != result_names:
perm = tuple(names.index(name) for name in result_names)
operand = lax.transpose(operand, perm)
operands.append(operand) # used in next iteration
return lax._convert_element_type(operands[0], preferred_element_type,
output_weak_type)
def _get_inverse_sharding(out_sharding, names, result_names):
if len(result_names) > len(out_sharding.spec):
out_sharding = out_sharding.update(spec=
out_sharding.spec._normalized_spec_for_aval(len(result_names)))
spec = out_sharding.spec
inverse_spec = tuple(spec[result_names.index(name)] for name in names)
return NamedSharding(out_sharding.mesh, spec.update(partitions=inverse_spec))
_poly_einsum_handlers[shape_poly._DimExpr] = shape_poly._einsum_contract_path
|
Unoptimized
|
python
|
ray-project__ray
|
python/ray/util/collective/types.py
|
{
"start": 3725,
"end": 3886
}
|
class ____:
timeout_ms = unset_timeout_ms
#
# @dataclass
# class GatherOptions:
# root_rank = 0
# timeout = unset_timeout
@dataclass
|
AllGatherOptions
|
python
|
ray-project__ray
|
python/ray/llm/_internal/serve/routing_policies/prefix_aware/prefix_tree.py
|
{
"start": 287,
"end": 2008
}
|
class ____:
"""
Node in a prefix tree that represents a segment of text and can belong to multiple tenants.
Each node also tracks the last access time for each tenant.
Simple example of root node connected to two children Nodes:
root = Node(text="", parent=None, edge_label_to_child={"f": fooNode, "b": barNode}, tenant_to_last_access_time={"tenant_1": 2})
fooNode = Node(text="foo", parent=root, edge_label_to_child={}, tenant_to_last_access_time={"tenant_1": 1})
barNode = Node(text="bar", parent=root, edge_label_to_child={}, tenant_to_last_access_time={"tenant_1": 2})
In the above example, "foo" was inserted at time 1, and "bar" was inserted at time 2.
It follows that root was last accessed at time 2.
"""
def __init__(self, text: str = "", parent: Optional[Node] = None) -> None:
"""
Initialize a node in the prefix tree.
Args:
text: The text segment this node represents
parent: The parent node of this node
"""
self.text: str = text
self.parent: Optional[Node] = parent
# Maps first character to child node
self.edge_label_to_child: Dict[str, Node] = {}
# For each tenant that has inserted text matching this node, track its last access timestamp (in seconds)
self.tenant_to_last_access_time: Dict[str, float] = {}
# Doubly linked list pointers for LRU tracking per tenant
# Points to the less recently used node (toward tail)
self.tenant_to_older_node: Dict[str, Optional[Node]] = {}
# Points to the more recently used node (toward head)
self.tenant_to_newer_node: Dict[str, Optional[Node]] = {}
|
Node
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/langchain/agents/middleware/context_editing.py
|
{
"start": 5368,
"end": 8901
}
|
class ____(AgentMiddleware):
"""Automatically prune tool results to manage context size.
The middleware applies a sequence of edits when the total input token count exceeds
configured thresholds.
Currently the `ClearToolUsesEdit` strategy is supported, aligning with Anthropic's
`clear_tool_uses_20250919` behavior [(read more)](https://platform.claude.com/docs/en/agents-and-tools/tool-use/memory-tool).
"""
edits: list[ContextEdit]
token_count_method: Literal["approximate", "model"]
def __init__(
self,
*,
edits: Iterable[ContextEdit] | None = None,
token_count_method: Literal["approximate", "model"] = "approximate", # noqa: S107
) -> None:
"""Initialize an instance of context editing middleware.
Args:
edits: Sequence of edit strategies to apply.
Defaults to a single `ClearToolUsesEdit` mirroring Anthropic defaults.
token_count_method: Whether to use approximate token counting
(faster, less accurate) or exact counting implemented by the
chat model (potentially slower, more accurate).
"""
super().__init__()
self.edits = list(edits or (ClearToolUsesEdit(),))
self.token_count_method = token_count_method
def wrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], ModelResponse],
) -> ModelCallResult:
"""Apply context edits before invoking the model via handler."""
if not request.messages:
return handler(request)
if self.token_count_method == "approximate": # noqa: S105
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return count_tokens_approximately(messages)
else:
system_msg = [request.system_message] if request.system_message else []
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return request.model.get_num_tokens_from_messages(
system_msg + list(messages), request.tools
)
edited_messages = deepcopy(list(request.messages))
for edit in self.edits:
edit.apply(edited_messages, count_tokens=count_tokens)
return handler(request.override(messages=edited_messages))
async def awrap_model_call(
self,
request: ModelRequest,
handler: Callable[[ModelRequest], Awaitable[ModelResponse]],
) -> ModelCallResult:
"""Apply context edits before invoking the model via handler (async version)."""
if not request.messages:
return await handler(request)
if self.token_count_method == "approximate": # noqa: S105
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return count_tokens_approximately(messages)
else:
system_msg = [request.system_message] if request.system_message else []
def count_tokens(messages: Sequence[BaseMessage]) -> int:
return request.model.get_num_tokens_from_messages(
system_msg + list(messages), request.tools
)
edited_messages = deepcopy(list(request.messages))
for edit in self.edits:
edit.apply(edited_messages, count_tokens=count_tokens)
return await handler(request.override(messages=edited_messages))
__all__ = [
"ClearToolUsesEdit",
"ContextEditingMiddleware",
]
|
ContextEditingMiddleware
|
python
|
pyparsing__pyparsing
|
examples/bf.py
|
{
"start": 1310,
"end": 2411
}
|
class ____:
"""
Brainf*ck execution environment, with a memory array and pointer.
"""
def __init__(self, memory_size: int = 1024):
self._ptr = 0
self._memory_size = memory_size
self._memory = [0] * self._memory_size
@property
def ptr(self):
return self._ptr
@ptr.setter
def ptr(self, value):
self._ptr = value % self._memory_size
@property
def at_ptr(self):
return self._memory[self._ptr]
@at_ptr.setter
def at_ptr(self, value):
self._memory[self._ptr] = value % 256
def output_value_at_ptr(self):
print(chr(self.at_ptr), end="")
def input_value(self):
input_char = input() or "\0"
self.at_ptr = ord(input_char[0])
def reset(self):
self._ptr = 0
self._memory[:] = [0] * self._memory_size
def dump_state(self):
for i in range(30):
print(f"{self._memory[i]:3d} ", end="")
print()
if self.ptr < 30:
print(f" {' ' * self.ptr}^")
# define executable classes for each instruction
|
BFEngine
|
python
|
tensorflow__tensorflow
|
tensorflow/python/kernel_tests/array_ops/bcast_ops_test.py
|
{
"start": 1016,
"end": 4933
}
|
class ____(test.TestCase):
def _GetBroadcastShape(self, xs, ys):
return self.evaluate(broadcast_args(xs, ys))
def _GetGradientArgs(self, xs, ys):
return self.evaluate(broadcast_gradient_args(xs, ys))
def testBasic(self):
r = self._GetBroadcastShape([2, 3, 5], [1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 1, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 1, 5])
self.assertAllEqual(r, [2, 3, 5])
def testBasicGradient(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
r0, r1 = self._GetGradientArgs([1], [2, 3, 5])
self.assertAllEqual(r0, [0, 1, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([5], [2, 3, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0])
r0, r1 = self._GetGradientArgs([3, 5], [2, 3, 5])
self.assertAllEqual(r0, [0])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 3, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 1, 5], [3, 1])
self.assertAllEqual(r0, [1])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 1, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [1])
def testZeroDims(self):
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
def testZeroDimsGradient(self):
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 3])
r0, r1 = self._GetGradientArgs([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1, 3])
self.assertAllEqual(r1, [])
def testDataTypes(self):
for dtype in [dtypes.int32, dtypes.int64]:
r = self._GetBroadcastShape(
constant_op.constant([2, 3, 5], dtype=dtype),
constant_op.constant([1], dtype=dtype))
self.assertAllEqual(r, [2, 3, 5])
r0, r1 = self._GetGradientArgs(
constant_op.constant([2, 3, 5], dtype=dtype),
constant_op.constant([1], dtype=dtype))
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
if __name__ == "__main__":
test.main()
|
BcastOpsTest
|
python
|
apache__airflow
|
providers/amazon/tests/unit/amazon/aws/operators/test_mwaa.py
|
{
"start": 1794,
"end": 4638
}
|
class ____:
def test_init(self):
op = MwaaTriggerDagRunOperator(**OP_KWARGS)
assert op.env_name == OP_KWARGS["env_name"]
assert op.trigger_dag_id == OP_KWARGS["trigger_dag_id"]
assert op.trigger_run_id == OP_KWARGS["trigger_run_id"]
assert op.logical_date == OP_KWARGS["logical_date"]
assert op.data_interval_start == OP_KWARGS["data_interval_start"]
assert op.data_interval_end == OP_KWARGS["data_interval_end"]
assert op.conf == OP_KWARGS["conf"]
assert op.note == OP_KWARGS["note"]
assert op.wait_for_completion == OP_KWARGS["wait_for_completion"]
assert op.waiter_delay == OP_KWARGS["waiter_delay"]
assert op.waiter_max_attempts == OP_KWARGS["waiter_max_attempts"]
assert op.deferrable == OP_KWARGS["deferrable"]
@mock.patch.object(MwaaTriggerDagRunOperator, "hook")
def test_execute(self, mock_hook):
mock_hook.invoke_rest_api.return_value = HOOK_RETURN_VALUE
op = MwaaTriggerDagRunOperator(**OP_KWARGS)
op_ret_val = op.execute({})
mock_hook.invoke_rest_api.assert_called_once_with(
env_name=OP_KWARGS["env_name"],
path=f"/dags/{OP_KWARGS['trigger_dag_id']}/dagRuns",
method="POST",
body={
"dag_run_id": OP_KWARGS["trigger_run_id"],
"logical_date": OP_KWARGS["logical_date"],
"data_interval_start": OP_KWARGS["data_interval_start"],
"data_interval_end": OP_KWARGS["data_interval_end"],
"conf": OP_KWARGS["conf"],
"note": OP_KWARGS["note"],
},
airflow_version=None,
)
assert op_ret_val == HOOK_RETURN_VALUE
def test_template_fields(self):
operator = MwaaTriggerDagRunOperator(**OP_KWARGS)
validate_template_fields(operator)
@pytest.mark.parametrize(
("wait_for_completion", "deferrable"),
[
pytest.param(False, False, id="no_wait"),
pytest.param(True, False, id="wait"),
pytest.param(False, True, id="defer"),
],
)
@mock.patch.object(MwaaHook, "get_waiter")
@mock.patch.object(MwaaTriggerDagRunOperator, "hook")
def test_execute_wait_combinations(self, mock_hook, _, wait_for_completion, deferrable):
kwargs = OP_KWARGS
kwargs["wait_for_completion"] = wait_for_completion
kwargs["deferrable"] = deferrable
op = MwaaTriggerDagRunOperator(**OP_KWARGS)
mock_hook.invoke_rest_api.return_value = HOOK_RETURN_VALUE
op.defer = MagicMock()
response = op.execute({})
assert response == HOOK_RETURN_VALUE
assert mock_hook.get_waiter.call_count == wait_for_completion
assert op.defer.call_count == deferrable
|
TestMwaaTriggerDagRunOperator
|
python
|
django__django
|
tests/proxy_models/models.py
|
{
"start": 415,
"end": 541
}
|
class ____(models.Manager):
def get_queryset(self):
return super().get_queryset().exclude(name="fred")
|
PersonManager
|
python
|
joke2k__faker
|
faker/providers/job/hu_HU/__init__.py
|
{
"start": 42,
"end": 11879
}
|
class ____(BaseProvider):
# Derived from KSH's FEOR'08
jobs = (
"Titkár(nő)",
"Értékbecslő",
"Közterület-felügyelő",
"Építőmérnök",
"Köszörűs",
"Gépjármű- és motorkarbantartó",
"Mezőgazdasági mérnök",
"Számítógéphálózat- és rendszertechnikus",
"Adósságbehajtó",
"Fémöntőminta-készítő",
"Gyümölcs- és zöldségfeldolgozó",
"Telekommunikációs mérnök",
"Könyv- és lapkiadó szerkesztője",
"Geológus",
"Manikűrös",
"Energetikus",
"Kézbesítő",
"Kontroller",
"Mentőtiszt",
"Háztartási takarító és kisegítő",
"Dekoratőr",
"Tejfeldolgozó",
"Gyógytornász",
"Csomagkihordó",
"Kádár",
"Színész",
"Anyaggazdálkodó",
"Szoftverfejlesztő",
"Adó- és illetékhivatali ügyintéző",
"Utaskísérő",
"Táj- és kertépítészmérnök",
"Muzeológus",
"Koreográfus",
"Tetőfedő",
"Telepőr",
"Pedikűrös",
"Fémfeldolgozó",
"Intézményi takarító és kisegítő",
"Irodai szakmai irányító",
"Recepciós",
"Gépíró, szövegszerkesztő",
"Ifjúságsegítő",
"Pap",
"Adatbázis- és hálózati elemző",
"Szoftver- és alkalmazásfejlesztő",
"Burkoló",
"Történész",
"Intézményi takarító és kisegítő ",
"Kohó- és anyagtechnikus",
"Jogi asszisztens",
"Tőzsde- és pénzügyi ügynök",
"Varró",
"Bolti pénztáros",
"Kémikus",
"Kőműves",
"Szakorvos",
"Elemző közgazdász",
"Kézi mosó, vasaló",
"Irattáros",
"Földmérő és térinformatikus",
"Vendéglős",
"Élelmiszer-ipari mérnök",
"Kisállattartó és -tenyésztő",
"Szociológus",
"Lakatos",
"Pszichológus",
"Utcaseprő",
"Adatbázis-tervező és -üzemeltető",
"Gyermekfelügyelő",
"Metróvezető",
"Háztartási alkalmazott",
"Könyvelő",
"Általános irodai adminisztrátor",
"Épületasztalos",
"Ékszerkészítő",
"Üvegező",
"Könyvtári, levéltári nyilvántartó",
"Általános iskolai tanár, tanító",
"Szemétgyűjtő",
"Rendőr",
"Orvosi laboratóriumi asszisztens",
"Kubikos",
"Adatrögzítő",
"Informatikatanár",
"Fizikus",
"Vegyésztechnikus",
"Hímző",
"Ügynök",
"Kalapos",
"Egyéb művészetek tanára",
"Zöldségtermesztő",
"Dísznövény-, virág- és faiskolai kertész, csemetenevelő",
"Csipkeverő",
"Postai ügyfélkapcsolati foglalkozású",
"Tolmács",
"Kódoló",
"Fa- és könnyűipari mérnök",
"Szarvasmarha-, ló-, sertés-, juhtartó és -tenyésztő ",
"Település- és közlekedéstervező mérnök",
"Rendszergazda",
"Állatorvosi asszisztens",
"Újságíró",
"Piaci, utcai étel- és italárus",
"Néprajzkutató",
"Vám- és pénzügyőr",
"Hordár",
"Webrendszer-technikus",
"Hivatalsegéd",
"Üzletpolitikai elemző",
"Fogorvos",
"Statisztikus",
"Stukkózó",
"Utazásszervező",
"Épületbádogos",
"Szociális gondozó",
"Villamosipari technikus (elektronikai technikus)",
"Iratkezelő",
"Matróz",
"Trolibuszvezető",
"Banki pénztáros",
"Szikvízkészítő",
"Kovács",
"Minőségbiztosítási mérnök",
"Csillagász",
"Író",
"Könyvtáros",
"Fényképész",
"Bányászati technikus",
"Üzletpolitikai elemző, szervező",
"Jelnyelvi tolmács",
"Alkalmazásprogramozó",
"Cipőkészítő",
"Drágakőcsiszoló",
"Botanikus",
"Járműtakarító",
"Biztosítási ügynök",
"Gépészmérnök",
"Légiforgalmi irányító",
"Üveggyártó",
"Gumitermékgyártó",
"Repülőgépmotor-karbantartó",
"Építészmérnök",
"Tűzoltó",
"Könyvkötő",
"Pultos",
"Borász",
"Gyógyszerész",
"Kozmetikus",
"Segédápoló",
"Ápoló",
"Fordító",
"Munkavédelmi és üzembiztonsági foglalkozású",
"Végrehajtó, adósságbehajtó",
"Gyógyszertári asszisztens",
"Szőrmefestő",
"Bőrtermékkészítő",
"Műsorszóró és audiovizuális technikus",
"Kártevőirtó",
"Rakodómunkás",
"Szabásminta-készítő",
"Hulladékosztályozó",
"Erdő- és természetvédelmi mérnök",
"Készlet- és anyagnyilvántartó",
"Fogászati asszisztens",
"Séf",
"Könyvszakértő",
"Bróker",
"Áru- és divatbemutató",
"Kölcsönző",
"Épületgondnok",
"Telekommunikációs technikus",
"Környezetvédelmi technikus",
"Házvezető",
"Famegmunkáló",
"Szállodai recepciós",
"Kézi csomagoló",
"Ötvös",
"Csecsemő- és kisgyermeknevelő",
"Kerékpár-karbantartó",
"Operatőr",
"Ügyvéd",
"Szigetelő",
"Fizioterápiás asszisztens",
"Kereskedő",
"Biológus",
"Ruházati gép kezelője és gyártósor mellett dolgozó",
"Szűcs",
"Ügyféltájékoztató",
"Gyógynövénytermesztő",
"Lelkész",
"Énekes",
"Munka- és termelésszervező ",
"Légiforgalmi irányítástechnikus",
"Számítógép-hálózati elemző",
"Szabó",
"Szakács",
"Növényorvos ",
"Testőr",
"Erdő- és természetvédelmi technikus",
"Kőfaragó",
"Bányászati szakmai irányító",
"Régész",
"Lakossági kérdező",
"Számviteli ügyintéző",
"Természetvédelmi őr",
"Egyetemi, főiskolai oktató",
"Óvodapedagógus",
"Gyomírtó",
"Növényvédelmi szakértő",
"Védőnő",
"Egészségügyi dokumentátor ",
"Finommechanikai műszerész",
"Műszaki rajzoló",
"Demográfus",
"Általános orvos",
"Fedélzeti tiszt",
"Vagyonőr",
"Rendszerelemző",
"Tímár",
"Hajózómérnök",
"Hálózat- és multimédia-fejlesztő",
"Konyhai kisegítő",
"Mozigépész",
"Épületvillamossági szerelő",
"Bionövény-termesztő",
"Fogtechnikus",
"Büntetés-végrehajtási őr",
"Erdész",
"Vízgazdálkodási gépkezelő",
"Szerszámkészítő",
"Vegyészmérnök",
"Festő",
"Iratkezelő, irattáros",
"Légiforgalmi irányítástechnikai berendezések üzemeltetője",
"Masszőr",
"Zenetanár",
"Zálogházi ügyintéző és pénzkölcsönző",
"Jogtanácsos",
"Tehergépkocsi-vezető",
"Bolti eladó",
"Pénzintézeti ügyintéző",
"Növényorvosi asszisztens",
"Fitnesz- és rekreációs program irányítója",
"Zeneszerző",
"Építményszerkezet-szerelő",
"Vegyes profilú gazdálkodó",
"Pultfeltöltő",
"Képzőművész",
"Végrehajtó",
"Szerencsejáték-szervező",
"Jegypénztáros",
"Konyhafőnök",
"Műtőssegéd",
"Adótanácsadó",
"Jogász",
"Orvosi képalkotó diagnosztikai asszisztens",
"Zoológus",
"Látszerész",
"Szállítási, szállítmányozási nyilvántartó",
"Kárpitos",
"Házi gondozó",
"Táncművész",
"Cipész",
"Élelmiszer-ipari technikus",
"Zenész",
"Könyvelő (analitikus)",
"Felvásárló",
"Személyzeti és pályaválasztási szakértő",
"Bányamérnök",
"Pincér",
"Mosodai gép kezelője",
"Dietetikus",
"Rendező",
"Bognár",
"Targoncavezető",
"Hobbiállat-gondozó",
"Segédrendező",
"Marketing- és PR-ügyintéző",
"Bőrdíszműves",
"Darukezelő",
"Hallás- és beszédterapeuta",
"Konduktor",
"Villamosmérnök (energetikai mérnök)",
"Meteorológus",
"Táplálkozási tanácsadó",
"Cirkuszi előadóművész",
"Húsfeldolgozó",
"Vezető eladó",
"Könyvvizsgáló",
"Feldolgozóipari szakmai irányító",
"Pedagógiai szakértő",
"Telefonos értékesítési ügynök",
"Villamosvezető",
"Baromfitartó és -tenyésztő",
"Politológus",
"Mérőóra-leolvasó",
"Egyéb növénytermesztési foglalkozású",
"Méhész",
"Felvonószerelő",
"Személygépkocsi-vezető",
"Textilműves",
"Építő- és építésztechnikus",
"Bőröndös",
"Gipszkartonozó",
"Kalauz",
"Járművezető-oktató",
"Bérelszámoló",
"Bútorasztalos",
"Villanyszerelő",
"Kesztyűs",
"Nyomdai előkészítő",
"Mezőgazdasági technikus",
"Szőlő-, gyümölcstermesztő",
"Oktatási asszisztens",
"Édesiparitermék-gyártó",
"Fodrász",
"Nyomdász",
"Keramikus",
"Általános egészségügyi asszisztens",
"Ács",
"Kereskedelmi ügyintéző",
"Környezetfelmérő",
"Kéményseprő",
"Fotó- és mozgófilmlaboráns",
"Statisztikai ügyintéző",
"Szakképzett edző",
"Fa- és könnyűipari technikus",
"Múzeumi gyűjteménygondnok",
"Árufeltöltő",
"Idegenvezető",
"Mozdonyvezető",
"Kohó- és anyagmérnök",
"Műköves",
"Állatorvos",
"Földmérő és térinformatikai technikus ",
"Nyelvtanár",
"Ügyész",
"Sportoló",
"Címfestő",
"Nyelvész",
"Gyógypedagógus",
"Üzemanyagtöltő állomás kezelője",
"Fémcsiszoló",
"Kulturális szervező",
"Lakberendező",
"Grafikus és multimédia-tervező ",
"Középiskolai tanár",
"Cukrász",
"Légijármű-vezető",
"Sportszervező",
"Parkolóőr",
"Favágó",
"Matematikus",
"Pénzügyi elemző és befektetési tanácsadó",
"Konferencia- és rendezvényszervező",
"Faesztergályos",
"Kályha- és kandallóépítő",
"Közjegyző",
"Festékszóró",
"Statiszta",
"Minőségbiztosítási technikus",
"Épületszerkezet-tisztító",
"Menetjegyellenőr",
"Kereskedelmi tervező ",
"Munkaerő-piaci szolgáltatási ügyintéző",
"Adószakértő",
"Hegesztő",
"Gyorséttermi eladó",
"Iparművész",
"Díszítő",
"Szociálpolitikus",
"Gyártmány- és ruhatervező",
"Ingatlanforgalmazási ügyintéző",
"Kormányos",
"Díszletező",
"Segédszínész",
"Levéltáros",
"Robbantómester",
"Villamosipari technikus (energetikai technikus)",
"Ortopédiai eszközkészítő",
"Gépésztechnikus",
"Szociális segítő",
"Pék",
"Ipari alpinista",
"Villamosmérnök (elektronikai mérnök)",
"Személyi asszisztens",
"Ablaktisztító",
"Portás",
"Filozófus",
"Forgácsoló",
"Bábművész",
"Kárszakértő",
"Humánpolitikai adminisztrátor",
"Hangszerkészítő",
"Társadalombiztosítási és segélyezési hatósági ügyintéző",
"Optometrista",
"Szántóföldinövény-termesztő",
"Ingatlanügynök",
"Nyomozó",
"Egyéb, máshova nem sorolható technikus",
"Vezető takarító",
"Autóbuszvezető",
"Kárbecslő",
"Piaci árus",
"Bíró",
"Általános iskolai tanár",
"Szerszámköszörűs",
"Építőipari szakmai irányító",
)
def job(self) -> str:
return self.random_element(self.jobs)
|
Provider
|
python
|
google__jax
|
jax/experimental/mosaic/gpu/core.py
|
{
"start": 8571,
"end": 8835
}
|
class ____:
arrival_count: int
num_barriers: int = 1
def __post_init__(self):
if self.arrival_count < 1:
raise ValueError(
f"Arrival count must be at least 1, but got {self.arrival_count}"
)
@dataclasses.dataclass(frozen=True)
|
Barrier
|
python
|
catalyst-team__catalyst
|
catalyst/contrib/layers/pooling.py
|
{
"start": 4238,
"end": 5023
}
|
class ____(nn.Module):
"""@TODO: Docs (add `Example`). Contribution is welcome."""
def __init__(self, in_features, activation_fn="Sigmoid"):
"""@TODO: Docs. Contribution is welcome."""
super().__init__()
self.max = GlobalMaxPool2d()
self.attn = GlobalAttnPool2d(in_features, activation_fn)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Forward call."""
return torch.cat([self.max(x), self.attn(x)], 1)
@staticmethod
def out_features(in_features):
"""Returns number of channels produced by the pooling.
Args:
in_features: number of channels in the input sample
Returns:
number of output features
"""
return in_features * 2
|
GlobalMaxAttnPool2d
|
python
|
apache__airflow
|
airflow-ctl/src/airflowctl/api/datamodels/auth_generated.py
|
{
"start": 1008,
"end": 1128
}
|
class ____(BaseModel):
detail: Annotated[list[ValidationError] | None, Field(title="Detail")] = None
|
HTTPValidationError
|
python
|
numba__numba
|
numba/tests/test_parfors_passes.py
|
{
"start": 7154,
"end": 10644
}
|
class ____(BaseTest):
sub_pass_class = numba.parfors.parfor.ConvertNumpyPass
def check_numpy_allocators(self, fn):
def test_impl():
n = 10
a = fn(n)
return a
sub_pass = self.run_parfor_sub_pass(test_impl, ())
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "numpy_allocator")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl)
def check_numpy_random(self, fn):
def test_impl():
n = 10
a = fn(n)
return a
sub_pass = self.run_parfor_sub_pass(test_impl, ())
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "numpy_allocator")
self.check_records(sub_pass.rewritten)
self.run_parallel_check_output_array(test_impl)
def test_numpy_allocators(self):
fns = [np.ones, np.zeros]
for fn in fns:
with self.subTest(fn.__name__):
self.check_numpy_allocators(fn)
def test_numpy_random(self):
fns = [np.random.random]
for fn in fns:
with self.subTest(fn.__name__):
self.check_numpy_random(fn)
def test_numpy_arrayexpr(self):
def test_impl(a, b):
return a + b
a = b = np.ones(10)
args = (a, b)
argtypes = [typeof(x) for x in args]
sub_pass = self.run_parfor_sub_pass(test_impl, argtypes)
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "arrayexpr")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl, *args)
def test_numpy_arrayexpr_ufunc(self):
def test_impl(a, b):
return np.sin(-a) + np.float64(1) / np.sqrt(b)
a = b = np.ones(10)
args = (a, b)
argtypes = [typeof(x) for x in args]
sub_pass = self.run_parfor_sub_pass(test_impl, argtypes)
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "arrayexpr")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl, *args)
def test_numpy_arrayexpr_boardcast(self):
def test_impl(a, b):
return a + b + np.array(1)
a = np.ones(10)
b = np.ones((3, 10))
args = (a, b)
argtypes = [typeof(x) for x in args]
sub_pass = self.run_parfor_sub_pass(test_impl, argtypes)
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "arrayexpr")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl, *args)
def test_numpy_arrayexpr_reshaped(self):
def test_impl(a, b):
a = a.reshape(1, a.size) # shape[0] is now constant
return a + b
a = np.ones(10)
b = np.ones(10)
args = (a, b)
argtypes = [typeof(x) for x in args]
sub_pass = self.run_parfor_sub_pass(test_impl, argtypes)
self.assertEqual(len(sub_pass.rewritten), 1)
[record] = sub_pass.rewritten
self.assertEqual(record["reason"], "arrayexpr")
self.check_records(sub_pass.rewritten)
self.run_parallel(test_impl, *args)
|
TestConvertNumpyPass
|
python
|
ZoranPandovski__al-go-rithms
|
strings/suffix_array/suffix_array.py
|
{
"start": 1761,
"end": 3844
}
|
class ____:
def __init__(self):
self.index = 0
self.rank = [0, 0]
def buildSuffixArray(s):
s = s + '$'
suffixes = [suffix() for _ in range(len(s))]
for i in range(len(s)):
suffixes[i].index = i
suffixes[i].rank[0] = ord(s[i]) - ord('a')
suffixes[i].rank[1] = ord(s[i+1]) - ord('a') if i+1<len(s) else -1
suffixes = sorted(suffixes, key = lambda x : (x.rank[0], x.rank[1]))
ind = [0]*len(s)
k = 4
while(k < 2*len(s)):
rank = 0
prevRank = suffixes[0].rank[0]
ind[suffixes[0].index] = 0
for i in range(1, len(s)):
if (suffixes[i].rank[0] == prevRank and suffixes[i].rank[1] == suffixes[i-1].rank[1]):
prevRank = suffixes[i].rank[0]
suffixes[i].rank[0] = rank
else:
prevRank = suffixes[i].rank[0]
rank+=1
suffixes[i].rank[0] = rank
ind[suffixes[i].index] = i
for i in range(len(s)):
nextIndex = suffixes[i].index + k//2
suffixes[i].rank[1] = suffixes[ind[nextIndex]].rank[0] if (nextIndex < len(s)) else -1
suffixes = sorted(suffixes, key = lambda x : (x.rank[0], x.rank[1]))
k*=2
suffixArr = [0]*len(s)
for i in range(len(s)):
suffixArr[i] = suffixes[i].index
return suffixArr
# Tests
def testbuildSuffixArray():
suffixArrayTestData = {
'ababba': [6, 5, 0, 2, 4, 1, 3],
'aaaa': [4, 3, 2, 1, 0],
'ppppplppp': [9, 5, 8, 4, 7, 3, 6, 2, 1, 0],
'nn': [2, 1, 0]
}
for testInput in suffixArrayTestData:
testOutput = suffixArrayTestData[testInput]
assert buildSuffixArray(testInput) == testOutput, f"Test failed: for string {testInput}. Expected output is {' '.join(str(e) for e in testOutput)}, got: {' '.join(str(e) for e in buildSuffixArray(testInput))}"
testbuildSuffixArray() # all tests pass: no exception raised
# Driver code for reading from stdin that outputs constructed suffix array:
# s = input()
# print(buildSuffixArray(s))
|
suffix
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_real_transforms.py
|
{
"start": 9100,
"end": 9232
}
|
class ____(_TestDCTIIBase):
def setup_method(self):
self.rdt = int
self.dec = 5
self.type = 2
|
TestDCTIIInt
|
python
|
ansible__ansible
|
test/units/module_utils/facts/test_collectors.py
|
{
"start": 4462,
"end": 6874
}
|
class ____(BaseFactsTest):
__test__ = True
gather_subset = ['!all', 'cmdline']
valid_subsets = ['cmdline']
fact_namespace = 'ansible_cmdline'
collector_class = CmdLineFactCollector
def test_parse_proc_cmdline_uefi(self):
uefi_cmdline = r'initrd=\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd root=UUID=50973b75-4a66-4bf0-9764-2b7614489e64 ro quiet'
expected = {'initrd': r'\70ef65e1a04a47aea04f7b5145ea3537\4.10.0-19-generic\initrd',
'root': 'UUID=50973b75-4a66-4bf0-9764-2b7614489e64',
'quiet': True,
'ro': True}
fact_collector = self.collector_class()
facts_dict = fact_collector._parse_proc_cmdline(uefi_cmdline)
self.assertDictEqual(facts_dict, expected)
def test_parse_proc_cmdline_fedora(self):
cmdline_fedora = r'BOOT_IMAGE=/vmlinuz-4.10.16-200.fc25.x86_64 root=/dev/mapper/fedora-root ro rd.lvm.lv=fedora/root rd.luks.uuid=luks-c80b7537-358b-4a07-b88c-c59ef187479b rd.lvm.lv=fedora/swap rhgb quiet LANG=en_US.UTF-8' # noqa
expected = {'BOOT_IMAGE': '/vmlinuz-4.10.16-200.fc25.x86_64',
'LANG': 'en_US.UTF-8',
'quiet': True,
'rd.luks.uuid': 'luks-c80b7537-358b-4a07-b88c-c59ef187479b',
'rd.lvm.lv': 'fedora/swap',
'rhgb': True,
'ro': True,
'root': '/dev/mapper/fedora-root'}
fact_collector = self.collector_class()
facts_dict = fact_collector._parse_proc_cmdline(cmdline_fedora)
self.assertDictEqual(facts_dict, expected)
def test_parse_proc_cmdline_dup_console(self):
example = r'BOOT_IMAGE=/boot/vmlinuz-4.4.0-72-generic root=UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90 ro console=tty1 console=ttyS0'
# FIXME: Two 'console' keywords? Using a dict for the fact value here loses info. Currently the 'last' one wins
expected = {'BOOT_IMAGE': '/boot/vmlinuz-4.4.0-72-generic',
'root': 'UUID=e12e46d9-06c9-4a64-a7b3-60e24b062d90',
'ro': True,
'console': 'ttyS0'}
fact_collector = self.collector_class()
facts_dict = fact_collector._parse_proc_cmdline(example)
# TODO: fails because we lose a 'console'
self.assertDictEqual(facts_dict, expected)
|
TestCmdLineFacts
|
python
|
huggingface__transformers
|
src/transformers/models/olmo/modular_olmo.py
|
{
"start": 2470,
"end": 4923
}
|
class ____(LlamaRotaryEmbedding):
@torch.no_grad()
@dynamic_rope_update # power user: used with advanced RoPE types (e.g. dynamic rope)
def forward(self, x, position_ids):
inv_freq_expanded = self.inv_freq[None, :, None].float().expand(position_ids.shape[0], -1, 1).to(x.device)
position_ids_expanded = position_ids[:, None, :].float()
device_type = x.device.type if isinstance(x.device.type, str) and x.device.type != "mps" else "cpu"
with torch.autocast(device_type=device_type, enabled=False): # Force float32
freqs = (inv_freq_expanded.float() @ position_ids_expanded.float()).transpose(1, 2)
emb = torch.cat((freqs, freqs), dim=-1)
cos = emb.cos() * self.attention_scaling
sin = emb.sin() * self.attention_scaling
return cos, sin
def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
"""Applies Rotary Position Embedding to the query and key tensors.
Args:
q (`torch.Tensor`): The query tensor.
k (`torch.Tensor`): The key tensor.
cos (`torch.Tensor`): The cosine part of the rotary embedding.
sin (`torch.Tensor`): The sine part of the rotary embedding.
position_ids (`torch.Tensor`, *optional*):
Deprecated and unused.
unsqueeze_dim (`int`, *optional*, defaults to 1):
The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
Returns:
`tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
"""
q_type, k_type = q.dtype, k.dtype
cos = cos.unsqueeze(unsqueeze_dim)
sin = sin.unsqueeze(unsqueeze_dim)
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
return q_embed.to(q_type), k_embed.to(k_type)
|
OlmoRotaryEmbedding
|
python
|
pandas-dev__pandas
|
pandas/tests/internals/test_internals.py
|
{
"start": 42158,
"end": 47556
}
|
class ____:
@pytest.fixture(
params=[
lambda x: x,
lambda x: x.to_series(),
lambda x: x._data,
lambda x: list(x),
lambda x: x.astype(object),
lambda x: np.asarray(x),
lambda x: x[0],
lambda x: x[:0],
]
)
def element(self, request):
"""
Functions that take an Index and return an element that should have
blk._can_hold_element(element) for a Block with this index's dtype.
"""
return request.param
def test_datetime_block_can_hold_element(self):
block = create_block("datetime", [0])
assert block._can_hold_element([])
# We will check that block._can_hold_element iff arr.__setitem__ works
arr = pd.array(block.values.ravel())
# coerce None
assert block._can_hold_element(None)
arr[0] = None
assert arr[0] is pd.NaT
# coerce different types of datetime objects
vals = [np.datetime64("2010-10-10"), datetime(2010, 10, 10)]
for val in vals:
assert block._can_hold_element(val)
arr[0] = val
val = date(2010, 10, 10)
assert not block._can_hold_element(val)
msg = (
"value should be a 'Timestamp', 'NaT', "
"or array of those. Got 'date' instead."
)
with pytest.raises(TypeError, match=msg):
arr[0] = val
@pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
def test_interval_can_hold_element_emptylist(self, dtype, element):
arr = np.array([1, 3, 4], dtype=dtype)
ii = IntervalIndex.from_breaks(arr)
blk = new_block(ii._data, BlockPlacement([1]), ndim=2)
assert blk._can_hold_element([])
# TODO: check this holds for all blocks
@pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64])
def test_interval_can_hold_element(self, dtype, element):
arr = np.array([1, 3, 4, 9], dtype=dtype)
ii = IntervalIndex.from_breaks(arr)
blk = new_block(ii._data, BlockPlacement([1]), ndim=2)
elem = element(ii)
self.check_series_setitem(elem, ii, True)
assert blk._can_hold_element(elem)
# Careful: to get the expected Series-inplace behavior we need
# `elem` to not have the same length as `arr`
ii2 = IntervalIndex.from_breaks(arr[:-1], closed="neither")
elem = element(ii2)
with pytest.raises(TypeError, match="Invalid value"):
self.check_series_setitem(elem, ii, False)
assert not blk._can_hold_element(elem)
ii3 = IntervalIndex.from_breaks([Timestamp(1), Timestamp(3), Timestamp(4)])
elem = element(ii3)
with pytest.raises(TypeError, match="Invalid value"):
self.check_series_setitem(elem, ii, False)
assert not blk._can_hold_element(elem)
ii4 = IntervalIndex.from_breaks([Timedelta(1), Timedelta(3), Timedelta(4)])
elem = element(ii4)
with pytest.raises(TypeError, match="Invalid value"):
self.check_series_setitem(elem, ii, False)
assert not blk._can_hold_element(elem)
def test_period_can_hold_element_emptylist(self):
pi = period_range("2016", periods=3, freq="Y")
blk = new_block(pi._data.reshape(1, 3), BlockPlacement([1]), ndim=2)
assert blk._can_hold_element([])
def test_period_can_hold_element(self, element):
pi = period_range("2016", periods=3, freq="Y")
elem = element(pi)
self.check_series_setitem(elem, pi, True)
# Careful: to get the expected Series-inplace behavior we need
# `elem` to not have the same length as `arr`
pi2 = pi.asfreq("D")[:-1]
elem = element(pi2)
with pytest.raises(TypeError, match="Invalid value"):
self.check_series_setitem(elem, pi, False)
dti = pi.to_timestamp("s")[:-1]
elem = element(dti)
with pytest.raises(TypeError, match="Invalid value"):
self.check_series_setitem(elem, pi, False)
def test_period_reindex_axis(self):
# GH#60273 Test reindexing of block with PeriodDtype
pi = period_range("2020", periods=5, freq="Y")
blk = new_block(pi._data.reshape(5, 1), BlockPlacement(slice(5)), ndim=2)
mgr = BlockManager(blocks=(blk,), axes=[Index(np.arange(5)), Index(["a"])])
reindexed = mgr.reindex_axis(Index([0, 2, 4]), axis=0)
result = DataFrame._from_mgr(reindexed, axes=reindexed.axes)
expected = DataFrame([[pi[0], pi[2], pi[4]]], columns=[0, 2, 4], index=["a"])
tm.assert_frame_equal(result, expected)
def check_can_hold_element(self, obj, elem, inplace: bool):
blk = obj._mgr.blocks[0]
if inplace:
assert blk._can_hold_element(elem)
else:
assert not blk._can_hold_element(elem)
def check_series_setitem(self, elem, index: Index, inplace: bool):
arr = index._data.copy()
ser = Series(arr, copy=False)
self.check_can_hold_element(ser, elem, inplace)
if is_scalar(elem):
ser[0] = elem
else:
ser[: len(elem)] = elem
if inplace:
assert ser._values is arr # i.e. setting was done inplace
else:
assert ser.dtype == object
|
TestCanHoldElement
|
python
|
dagster-io__dagster
|
python_modules/dagster-test/dagster_test/utils/benchmark.py
|
{
"start": 347,
"end": 3300
}
|
class ____:
def __init__(
self,
*,
output: TextIO = sys.stdout,
name: Optional[str] = None,
experiment_settings: Optional[Mapping[str, Any]] = None,
):
self.entries: list[ProfilingEntry] = []
self.output = Console()
self.name = name or "anonymous"
self.experiment_settings = experiment_settings
def start(self) -> Self:
self.entries.append(ProfilingEntry("start", time.time()))
return self
def log_start_message(self) -> None:
self.output.print(f"Profiling session started ({self.name})")
self._log_blank_line()
if self.experiment_settings:
self._log_experiment_settings()
self._log_blank_line()
@contextmanager
def logged_execution_time(self, name: str) -> Iterator[None]:
yield
self.entries.append(ProfilingEntry(name, time.time()))
self._log_step(-1)
def log_result_summary(self):
self._log_divider()
if self.experiment_settings:
self._log_experiment_settings()
self._log_blank_line()
self._log_result_table()
# ########################
# ##### PRIVATE
# ########################
def _log_step(self, index: int) -> None:
index = index if index >= 0 else len(self.entries) + index
entry = self.entries[index]
label = f"Execution time for step {index} ({entry.name}):"
time_elapsed = entry.time - self.entries[index - 1].time
message = f"{label} {time_elapsed:.4f} seconds"
self.output.print(message)
def _log_header(self, header: str) -> None:
self.output.print(header)
self.output.print("-" * len(header))
def _log_divider(self) -> None:
self.output.print("=" * 79)
self.output.print()
def _log_blank_line(self) -> None:
self.output.print()
def _log_experiment_settings(self) -> None:
table = self._get_experiment_settings_table()
self.output.print(table)
def _log_result_table(self) -> None:
table = self._get_result_table()
self.output.print(table)
def _get_experiment_settings_table(self) -> Table:
table = Table(title="Experiment settings", title_justify="left")
table.add_column("Key", justify="right")
table.add_column("Value", justify="right")
for key, value in (self.experiment_settings or {}).items():
table.add_row(key, str(value))
return table
def _get_result_table(self) -> Table:
table = Table(title="Execution times", title_justify="left")
table.add_column("Index", justify="right")
table.add_column("Step", justify="right")
table.add_column("Time", justify="right")
for i, entry in enumerate(self.entries[1:]):
table.add_row(str(i), entry.name, f"{entry.time - self.entries[i].time:.4f}")
return table
|
ProfilingSession
|
python
|
Lightning-AI__lightning
|
src/lightning/fabric/strategies/launchers/xla.py
|
{
"start": 1128,
"end": 5002
}
|
class ____(_Launcher):
r"""Launches processes that run a given function in parallel on XLA supported hardware, and joins them all at the
end.
The main process in which this launcher is invoked creates N so-called worker processes (using the
`torch_xla` :func:`xmp.spawn`) that run the given function.
Worker processes have a rank that ranges from 0 to N - 1.
Note:
- This launcher requires all objects to be pickleable.
- It is important that the entry point to the program/script is guarded by ``if __name__ == "__main__"``.
Args:
strategy: A reference to the strategy that is used together with this launcher
"""
def __init__(self, strategy: Union["XLAStrategy", "XLAFSDPStrategy"]) -> None:
if not _XLA_AVAILABLE:
raise ModuleNotFoundError(str(_XLA_AVAILABLE))
self._strategy = strategy
self._start_method = "fork"
@property
@override
def is_interactive_compatible(self) -> bool:
return True
@override
def launch(self, function: Callable, *args: Any, **kwargs: Any) -> Any:
"""Launches processes that run the given function in parallel.
The function is allowed to have a return value. However, when all processes join, only the return value
of worker process 0 gets returned from this `launch` method in the main process.
Arguments:
function: The entry point for all launched processes.
*args: Optional positional arguments to be passed to the given function.
**kwargs: Optional keyword arguments to be passed to the given function.
"""
return_queue: Union[queue.Queue, mp.SimpleQueue]
return_queue = mp.Manager().Queue()
import torch_xla.distributed.xla_multiprocessing as xmp
spawn_kwargs = {}
nprocs = self._strategy.num_processes
if nprocs == 1:
# avoid warning: "Unsupported nprocs". If it's 1, it will call the launched function directly.
# otherwise it will use all devices
spawn_kwargs["nprocs"] = nprocs
xmp.spawn(
self._wrapping_function,
args=(function, args, kwargs, return_queue),
start_method=self._start_method,
**spawn_kwargs,
)
return return_queue.get()
def _wrapping_function(
self,
# XLA's multiprocessing returns the global index, not the local index as torch's multiprocessing
# https://github.com/pytorch/xla/blob/v1.13.0/torch_xla/distributed/xla_multiprocessing.py#L321
process_idx: int,
function: Callable,
args: Any,
kwargs: Any,
return_queue: Union[mp.SimpleQueue, queue.Queue],
global_states: Optional[_GlobalStateSnapshot] = None,
) -> None:
import torch_xla.core.xla_model as xm
if len(xm.get_xla_supported_devices()) > 1:
# `get_xla_supported_devices` in the spawned process returns the logical devices (2 for v2/v3 and 1 for v4)
# so when there's more than one (multithreading), objects need to be deep-copied
import copy
function, args, kwargs = copy.deepcopy((function, args, kwargs))
results = function(*args, **kwargs)
if self._strategy.local_rank == 0:
return_queue.put(move_data_to_device(results, "cpu"))
_rank_teardown(self._strategy.local_rank)
def _rank_teardown(rank: int) -> None:
import torch_xla.core.xla_model as xm
# Make all processes wait for each other before joining
# https://github.com/pytorch/xla/issues/1801#issuecomment-602799542
xm.rendezvous("end-process")
# Ensure that the rank 0 process is the one exiting last
# https://github.com/pytorch/xla/issues/2190#issuecomment-641665358
if rank == 0:
time.sleep(1)
|
_XLALauncher
|
python
|
apache__airflow
|
airflow-core/src/airflow/serialization/enums.py
|
{
"start": 1130,
"end": 2270
}
|
class ____(str, Enum):
"""Enum of supported attribute types of DAG."""
DAG = "dag"
ASSET_EVENT_ACCESSORS = "asset_event_accessors"
ASSET_EVENT_ACCESSOR = "asset_event_accessor"
OP = "operator"
DATETIME = "datetime"
TIMEDELTA = "timedelta"
TIMEZONE = "timezone"
RELATIVEDELTA = "relativedelta"
BASE_TRIGGER = "base_trigger"
AIRFLOW_EXC_SER = "airflow_exc_ser"
BASE_EXC_SER = "base_exc_ser"
DICT = "dict"
SET = "set"
TUPLE = "tuple"
POD = "k8s.V1Pod"
TASK_GROUP = "taskgroup"
EDGE_INFO = "edgeinfo"
PARAM = "param"
XCOM_REF = "xcomref"
ASSET = "asset"
ASSET_ALIAS = "asset_alias"
ASSET_ANY = "asset_any"
ASSET_ALL = "asset_all"
ASSET_REF = "asset_ref"
ASSET_UNIQUE_KEY = "asset_unique_key"
ASSET_ALIAS_UNIQUE_KEY = "asset_alias_unique_key"
CONNECTION = "connection"
TASK_CONTEXT = "task_context"
ARG_NOT_SET = "arg_not_set"
TASK_CALLBACK_REQUEST = "task_callback_request"
DAG_CALLBACK_REQUEST = "dag_callback_request"
TASK_INSTANCE_KEY = "task_instance_key"
DEADLINE_ALERT = "deadline_alert"
|
DagAttributeTypes
|
python
|
getsentry__sentry
|
tests/sentry/issues/endpoints/test_organization_searches.py
|
{
"start": 4244,
"end": 12125
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-organization-searches"
method = "post"
@cached_property
def manager(self) -> User:
user = self.create_user("test@test.com")
self.create_member(organization=self.organization, user=user, role="manager")
return user
@cached_property
def member(self) -> User:
user = self.create_user("test@test.com")
self.create_member(organization=self.organization, user=user)
return user
def test_simple(self) -> None:
search_type = SearchType.ISSUE.value
name = "test"
query = "hello"
visibility = Visibility.ORGANIZATION
self.login_as(user=self.manager)
resp = self.get_success_response(
self.organization.slug,
type=search_type,
name=name,
query=query,
visibility=visibility,
)
assert resp.data["name"] == name
assert resp.data["query"] == query
assert resp.data["type"] == search_type
assert resp.data["visibility"] == visibility
assert SavedSearch.objects.filter(id=resp.data["id"]).exists()
def test_member_cannot_create_org_search(self) -> None:
self.login_as(user=self.member)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query="test",
visibility=Visibility.ORGANIZATION,
)
assert resp.status_code == 400
def test_member_can_create_owner_search(self) -> None:
self.login_as(user=self.member)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query="test",
visibility=Visibility.OWNER,
)
assert resp.status_code == 200
assert SavedSearch.objects.filter(id=resp.data["id"]).exists()
def test_org_global_search_conflict(self) -> None:
global_search = SavedSearch.objects.create(
type=SearchType.ISSUE.value,
name="Some global search",
query="is:unresolved",
is_global=True,
visibility=Visibility.ORGANIZATION,
)
# Org searches may be created with same query as global searches
self.login_as(user=self.manager)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query=global_search.query,
)
assert resp.status_code == 200
assert SavedSearch.objects.filter(id=resp.data["id"]).exists()
def test_org_org_search_conflict(self) -> None:
org_search = SavedSearch.objects.create(
organization=self.organization,
type=SearchType.ISSUE.value,
name="Some org search",
query="org search",
visibility=Visibility.ORGANIZATION,
)
self.login_as(user=self.manager)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query=org_search.query,
visibility=Visibility.ORGANIZATION,
)
assert resp.status_code == 400
assert "already exists" in resp.data["detail"]
def test_owner_global_search_conflict(self) -> None:
global_search = SavedSearch.objects.create(
type=SearchType.ISSUE.value,
name="Some global search",
query="is:unresolved",
is_global=True,
visibility=Visibility.ORGANIZATION,
)
# Owner searches may be created with same query as global searches
self.login_as(user=self.member)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query=global_search.query,
visibility=Visibility.OWNER,
)
assert resp.status_code == 200
assert SavedSearch.objects.filter(id=resp.data["id"]).exists()
def test_owner_org_search_conflict(self) -> None:
org_search = SavedSearch.objects.create(
organization=self.organization,
type=SearchType.ISSUE.value,
name="Some org search",
query="org search",
visibility=Visibility.ORGANIZATION,
)
# Owner searches may be created with same query as org searches
self.login_as(user=self.member)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query=org_search.query,
visibility=Visibility.OWNER,
)
assert resp.status_code == 200
assert SavedSearch.objects.filter(id=resp.data["id"]).exists()
def test_owner_owner_search_conflict(self) -> None:
user_search = SavedSearch.objects.create(
organization=self.organization,
type=SearchType.ISSUE.value,
name="Some user search",
query="user search",
visibility=Visibility.OWNER,
owner_id=self.member.id,
)
self.login_as(user=self.member)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query=user_search.query,
visibility=Visibility.OWNER,
)
assert resp.status_code == 400
assert "already exists" in resp.data["detail"]
def test_owner1_owner2_search_conflict(self) -> None:
# User 1 has a saved search in org
other_user_search = SavedSearch.objects.create(
organization=self.organization,
type=SearchType.ISSUE.value,
name="Some other user in org made this search",
query="user search",
visibility=Visibility.OWNER,
owner_id=self.create_user("otheruser@test.com").id,
)
# User 2 creates a similar search in the same org
self.login_as(user=self.member)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query=other_user_search.query,
visibility=Visibility.OWNER,
)
# Should work and both searches should exist
assert resp.status_code == 200
assert SavedSearch.objects.filter(id=other_user_search.id).exists()
assert SavedSearch.objects.filter(id=resp.data["id"]).exists()
def test_owner_pinned_search_conflict(self) -> None:
# Member has a pinned search
pinned_search = SavedSearch.objects.create(
organization=self.organization,
type=SearchType.ISSUE.value,
name="My Pinned Search",
query="user pinned search",
visibility=Visibility.OWNER_PINNED,
owner_id=self.member.id,
)
# Member creates a saved search with the same query
self.login_as(user=self.member)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query=pinned_search.query,
visibility=Visibility.OWNER,
)
assert resp.status_code == 200
assert SavedSearch.objects.filter(id=resp.data["id"]).exists()
def test_empty(self) -> None:
self.login_as(user=self.manager)
resp = self.get_response(
self.organization.slug,
type=SearchType.ISSUE.value,
name="hello",
query="",
visibility=Visibility.ORGANIZATION,
)
assert resp.status_code == 400
assert "This field may not be blank." == resp.data["query"][0]
|
CreateOrganizationSearchesTest
|
python
|
facelessuser__pymdown-extensions
|
pymdownx/progressbar.py
|
{
"start": 6458,
"end": 7580
}
|
class ____(Extension):
"""Add progress bar extension to Markdown class."""
def __init__(self, *args, **kwargs):
"""Initialize."""
self.config = {
'level_class': [
True,
"Include class that defines progress level - Default: True"
],
'progress_increment': [
20,
"Progress increment step - Default: 20"
],
'add_classes': [
'',
"Add additional classes to the progress tag for styling. "
"Classes are separated by spaces. - Default: None"
]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md):
"""Add the progress bar pattern handler."""
util.escape_chars(md, ['='])
progress = ProgressBarPattern(RE_PROGRESS, md)
progress.config = self.getConfigs()
md.inlinePatterns.register(progress, "progress-bar", 179)
def makeExtension(*args, **kwargs):
"""Return extension."""
return ProgressBarExtension(*args, **kwargs)
|
ProgressBarExtension
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_vl_moe/modeling_qwen3_vl_moe.py
|
{
"start": 26800,
"end": 27842
}
|
class ____(GradientCheckpointingLayer):
def __init__(self, config, attn_implementation: str = "sdpa") -> None:
super().__init__()
self.norm1 = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.norm2 = nn.LayerNorm(config.hidden_size, eps=1e-6)
self.attn = Qwen3VLMoeVisionAttention(config=config)
self.mlp = Qwen3VLMoeVisionMLP(config=config)
def forward(
self,
hidden_states: torch.Tensor,
cu_seqlens: torch.Tensor,
rotary_pos_emb: Optional[torch.Tensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs,
) -> torch.Tensor:
hidden_states = hidden_states + self.attn(
self.norm1(hidden_states),
cu_seqlens=cu_seqlens,
rotary_pos_emb=rotary_pos_emb,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = hidden_states + self.mlp(self.norm2(hidden_states))
return hidden_states
|
Qwen3VLMoeVisionBlock
|
python
|
streamlit__streamlit
|
lib/streamlit/components/v2/bidi_component/state.py
|
{
"start": 1100,
"end": 3141
}
|
class ____(AttributeDictionary):
"""The schema for the custom component result object.
The custom component result object is a dictionary-like object that
supports both key and attribute notation. It contains all of the
component's state and trigger values.
Attributes
----------
<state_keys> : Any
All state values from the component. State values are persistent across
app reruns until explicitly changed. You can have multiple state keys
as attributes.
<trigger_keys> : Any
All trigger values from the component. Trigger values are transient and
reset to ``None`` after one script run. You can have multiple trigger
keys as attributes.
"""
def __init__(
self,
state_vals: dict[str, Any] | None = None,
trigger_vals: dict[str, Any] | None = None,
) -> None:
"""Initialize a BidiComponentResult.
Parameters
----------
state_vals : dict[str, Any] or None
A dictionary of state values from the component.
trigger_vals : dict[str, Any] or None
A dictionary of trigger values from the component.
"""
if state_vals is None:
state_vals = {}
if trigger_vals is None:
trigger_vals = {}
super().__init__(
{
# The order here matters, because all stateful values will
# always be returned, but trigger values may be transient. This
# mirrors presentation behavior in
# `make_bidi_component_presenter`.
**trigger_vals,
**state_vals,
}
)
def unwrap_component_state(raw_state: Any) -> dict[str, Any]:
"""Return flat mapping when given a dict; otherwise, empty dict.
The new canonical state is flat, so this is effectively an identity for
dict inputs and a guard for other types.
"""
return dict(raw_state) if isinstance(raw_state, dict) else {}
|
BidiComponentResult
|
python
|
giampaolo__psutil
|
tests/test_heap.py
|
{
"start": 7963,
"end": 9524
}
|
class ____(HeapTestCase):
@retry_on_failure()
def test_heap_used(self):
"""Test that HeapAlloc() without HeapFree() increases heap_used."""
size = HEAP_SIZE
mem1 = psutil.heap_info()
heap = GetProcessHeap()
addr = HeapAlloc(heap, size)
mem2 = psutil.heap_info()
try:
assert mem2.heap_used - mem1.heap_used == size
finally:
HeapFree(heap, addr)
trim_memory()
mem3 = psutil.heap_info()
assert mem3.heap_used == mem1.heap_used
@retry_on_failure()
def test_mmap_used(self):
"""Test that VirtualAllocEx() without VirtualFreeEx() increases
mmap_used.
"""
size = MMAP_SIZE
mem1 = psutil.heap_info()
addr = VirtualAllocEx(size)
mem2 = psutil.heap_info()
try:
assert mem2.mmap_used - mem1.mmap_used == size
finally:
VirtualFreeEx(addr)
trim_memory()
mem3 = psutil.heap_info()
assert mem3.mmap_used == mem1.mmap_used
@retry_on_failure()
def test_heap_count(self):
"""Test that HeapCreate() without HeapDestroy() increases
heap_count.
"""
mem1 = psutil.heap_info()
heap = HeapCreate(HEAP_SIZE, 0)
mem2 = psutil.heap_info()
try:
assert mem2.heap_count == mem1.heap_count + 1
finally:
HeapDestroy(heap)
trim_memory()
mem3 = psutil.heap_info()
assert mem3.heap_count == mem1.heap_count
|
TestHeapWindows
|
python
|
modin-project__modin
|
modin/core/dataframe/pandas/partitioning/partition_manager.py
|
{
"start": 3328,
"end": 77428
}
|
class ____(
ClassLogger, ABC, modin_layer="PARTITION-MANAGER", log_level=LogLevel.DEBUG
):
"""
Base class for managing the dataframe data layout and operators across the distribution of partitions.
Partition class is the class to use for storing each partition.
Each partition must extend the `PandasDataframePartition` class.
"""
_partition_class = None
# Column partitions class is the class to use to create the column partitions.
_column_partitions_class = None
# Row partitions class is the class to use to create the row partitions.
_row_partition_class = None
_execution_wrapper = None
@classmethod
def materialize_futures(cls, input_list):
"""
Materialize all futures in the input list.
Parameters
----------
input_list : list
The list that has to be manipulated.
Returns
-------
list
A new list with materialized objects.
"""
# Do nothing if input_list is None or [].
if input_list is None:
return None
filtered_list = []
filtered_idx = []
for idx, item in enumerate(input_list):
if cls._execution_wrapper.is_future(item):
filtered_idx.append(idx)
filtered_list.append(item)
filtered_list = cls._execution_wrapper.materialize(filtered_list)
result = input_list.copy()
for idx, item in zip(filtered_idx, filtered_list):
result[idx] = item
return result
@classmethod
def preprocess_func(cls, map_func):
"""
Preprocess a function to be applied to `PandasDataframePartition` objects.
Parameters
----------
map_func : callable
The function to be preprocessed.
Returns
-------
callable
The preprocessed version of the `map_func` provided.
Notes
-----
Preprocessing does not require any specific format, only that the
`PandasDataframePartition.apply` method will recognize it (for the subclass
being used).
If your `PandasDataframePartition` objects assume that a function provided
is serialized or wrapped or in some other format, this is the place
to add that logic. It is possible that this can also just return
`map_func` if the `apply` method of the `PandasDataframePartition` object
you are using does not require any modification to a given function.
"""
if cls._execution_wrapper.is_future(map_func):
return map_func # Has already been preprocessed
old_value = PersistentPickle.get()
# When performing a function with Modin objects, it is more profitable to
# do the conversion to pandas once on the main process than several times
# on worker processes. Details: https://github.com/modin-project/modin/pull/6673/files#r1391086755
# For Dask, otherwise there may be an error: `coroutine 'Client._gather' was never awaited`
need_update = not PersistentPickle.get() and Engine.get() != "Dask"
if need_update:
PersistentPickle.put(True)
try:
result = cls._partition_class.preprocess_func(map_func)
finally:
if need_update:
PersistentPickle.put(old_value)
return result
# END Abstract Methods
@classmethod
def create_partition_from_metadata(
cls, dtypes: Optional[pandas.Series] = None, **metadata
):
"""
Create NumPy array of partitions that holds an empty dataframe with given metadata.
Parameters
----------
dtypes : pandas.Series, optional
Column dtypes.
Upon creating a pandas DataFrame from `metadata` we call `astype` since
pandas doesn't allow to pass a list of dtypes directly in the constructor.
**metadata : dict
Metadata that has to be wrapped in a partition.
Returns
-------
np.ndarray
A NumPy 2D array of a single partition which contains the data.
"""
metadata_dataframe = pandas.DataFrame(**metadata)
if dtypes is not None:
metadata_dataframe = metadata_dataframe.astype(dtypes)
return np.array([[cls._partition_class.put(metadata_dataframe)]])
@classmethod
def column_partitions(cls, partitions, full_axis=True):
"""
Get the list of `BaseDataframeAxisPartition` objects representing column-wise partitions.
Parameters
----------
partitions : list-like
List of (smaller) partitions to be combined to column-wise partitions.
full_axis : bool, default: True
Whether or not this partition contains the entire column axis.
Returns
-------
list
A list of `BaseDataframeAxisPartition` objects.
Notes
-----
Each value in this list will be an `BaseDataframeAxisPartition` object.
`BaseDataframeAxisPartition` is located in `axis_partition.py`.
"""
if not isinstance(partitions, list):
partitions = [partitions]
return [
cls._column_partitions_class(col, full_axis=full_axis)
for frame in partitions
for col in frame.T
]
@classmethod
def row_partitions(cls, partitions):
"""
List of `BaseDataframeAxisPartition` objects representing row-wise partitions.
Parameters
----------
partitions : list-like
List of (smaller) partitions to be combined to row-wise partitions.
Returns
-------
list
A list of `BaseDataframeAxisPartition` objects.
Notes
-----
Each value in this list will an `BaseDataframeAxisPartition` object.
`BaseDataframeAxisPartition` is located in `axis_partition.py`.
"""
if not isinstance(partitions, list):
partitions = [partitions]
return [cls._row_partition_class(row) for frame in partitions for row in frame]
@classmethod
def axis_partition(cls, partitions, axis, full_axis: bool = True):
"""
Logically partition along given axis (columns or rows).
Parameters
----------
partitions : list-like
List of partitions to be combined.
axis : {0, 1}
0 for column partitions, 1 for row partitions.
full_axis : bool, default: True
Whether or not this partition contains the entire column axis.
Returns
-------
list
A list of `BaseDataframeAxisPartition` objects.
"""
make_column_partitions = axis == 0
if not full_axis and not make_column_partitions:
raise NotImplementedError(
(
"Row partitions must contain the entire axis. We don't "
+ "support virtual partitioning for row partitions yet."
)
)
return (
cls.column_partitions(partitions)
if make_column_partitions
else cls.row_partitions(partitions)
)
@classmethod
def groupby_reduce(
cls, axis, partitions, by, map_func, reduce_func, apply_indices=None
):
"""
Groupby data using the `map_func` provided along the `axis` over the `partitions` then reduce using `reduce_func`.
Parameters
----------
axis : {0, 1}
Axis to groupby over.
partitions : NumPy 2D array
Partitions of the ModinFrame to groupby.
by : NumPy 2D array
Partitions of 'by' to broadcast.
map_func : callable
Map function.
reduce_func : callable,
Reduce function.
apply_indices : list of ints, default: None
Indices of `axis ^ 1` to apply function over.
Returns
-------
NumPy array
Partitions with applied groupby.
"""
if apply_indices is not None:
partitions = (
partitions[apply_indices] if axis else partitions[:, apply_indices]
)
if by is not None:
# need to make sure that the partitioning of the following objects
# coincides in the required axis, because `partition_manager.broadcast_apply`
# doesn't call `_copartition` unlike `modin_frame.broadcast_apply`
assert partitions.shape[axis] == by.shape[axis], (
f"the number of partitions along {axis=} is not equal: "
+ f"{partitions.shape[axis]} != {by.shape[axis]}"
)
mapped_partitions = cls.broadcast_apply(
axis, map_func, left=partitions, right=by
)
else:
mapped_partitions = cls.map_partitions(partitions, map_func)
# Assuming, that the output will not be larger than the input,
# keep the current number of partitions.
num_splits = min(len(partitions), NPartitions.get())
return cls.map_axis_partitions(
axis,
mapped_partitions,
reduce_func,
enumerate_partitions=True,
num_splits=num_splits,
)
@classmethod
@wait_computations_if_benchmark_mode
def broadcast_apply_select_indices(
cls,
axis,
apply_func,
left,
right,
left_indices,
right_indices,
keep_remaining=False,
):
"""
Broadcast the `right` partitions to `left` and apply `apply_func` to selected indices.
Parameters
----------
axis : {0, 1}
Axis to apply and broadcast over.
apply_func : callable
Function to apply.
left : NumPy 2D array
Left partitions.
right : NumPy 2D array
Right partitions.
left_indices : list-like
Indices to apply function to.
right_indices : dictionary of indices of right partitions
Indices that you want to bring at specified left partition, for example
dict {key: {key1: [0, 1], key2: [5]}} means that in left[key] you want to
broadcast [right[key1], right[key2]] partitions and internal indices
for `right` must be [[0, 1], [5]].
keep_remaining : bool, default: False
Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns
-------
NumPy array
An array of partition objects.
Notes
-----
Your internal function must take these kwargs:
[`internal_indices`, `other`, `internal_other_indices`] to work correctly!
"""
if not axis:
partitions_for_apply = left.T
right = right.T
else:
partitions_for_apply = left
[obj.drain_call_queue() for row in right for obj in row]
def get_partitions(index):
"""Grab required partitions and indices from `right` and `right_indices`."""
must_grab = right_indices[index]
partitions_list = np.array([right[i] for i in must_grab.keys()])
indices_list = list(must_grab.values())
return {"other": partitions_list, "internal_other_indices": indices_list}
new_partitions = np.array(
[
(
partitions_for_apply[i]
if i not in left_indices
else cls._apply_func_to_list_of_partitions_broadcast(
apply_func,
partitions_for_apply[i],
internal_indices=left_indices[i],
**get_partitions(i),
)
)
for i in range(len(partitions_for_apply))
if i in left_indices or keep_remaining
]
)
if not axis:
new_partitions = new_partitions.T
return new_partitions
@classmethod
@wait_computations_if_benchmark_mode
def base_broadcast_apply(cls, axis, apply_func, left, right):
"""
Broadcast the `right` partitions to `left` and apply `apply_func` function.
Parameters
----------
axis : {0, 1}
Axis to apply and broadcast over.
apply_func : callable
Function to apply.
left : np.ndarray
NumPy array of left partitions.
right : np.ndarray
NumPy array of right partitions.
Returns
-------
np.ndarray
NumPy array of result partition objects.
Notes
-----
This will often be overridden by implementations. It materializes the
entire partitions of the right and applies them to the left through `apply`.
"""
def map_func(df, *others):
other = (
pandas.concat(others, axis=axis ^ 1) if len(others) > 1 else others[0]
)
# to reduce peak memory consumption
del others
return apply_func(df, other)
map_func = cls.preprocess_func(map_func)
rt_axis_parts = cls.axis_partition(right, axis ^ 1)
return np.array(
[
[
part.apply(
map_func,
*(
rt_axis_parts[col_idx].list_of_blocks
if axis
else rt_axis_parts[row_idx].list_of_blocks
),
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
@classmethod
@wait_computations_if_benchmark_mode
def broadcast_axis_partitions(
cls,
axis,
apply_func,
left,
right,
keep_partitioning=False,
num_splits=None,
apply_indices=None,
broadcast_all=True,
enumerate_partitions=False,
lengths=None,
apply_func_args=None,
**kwargs,
):
"""
Broadcast the `right` partitions to `left` and apply `apply_func` along full `axis`.
Parameters
----------
axis : {0, 1}
Axis to apply and broadcast over.
apply_func : callable
Function to apply.
left : NumPy 2D array
Left partitions.
right : NumPy 2D array
Right partitions.
keep_partitioning : boolean, default: False
The flag to keep partition boundaries for Modin Frame if possible.
Setting it to True disables shuffling data from one partition to another in case the resulting
number of splits is equal to the initial number of splits.
num_splits : int, optional
The number of partitions to split the result into across the `axis`. If None, then the number
of splits will be infered automatically. If `num_splits` is None and `keep_partitioning=True`
then the number of splits is preserved.
apply_indices : list of ints, default: None
Indices of `axis ^ 1` to apply function over.
broadcast_all : bool, default: True
Whether or not to pass all right axis partitions to each of the left axis partitions.
enumerate_partitions : bool, default: False
Whether or not to pass partition index into `apply_func`.
Note that `apply_func` must be able to accept `partition_idx` kwarg.
lengths : list of ints, default: None
The list of lengths to shuffle the object. Note:
1. Passing `lengths` omits the `num_splits` parameter as the number of splits
will now be inferred from the number of integers present in `lengths`.
2. When passing lengths you must explicitly specify `keep_partitioning=False`.
apply_func_args : list-like, optional
Positional arguments to pass to the `func`.
**kwargs : dict
Additional options that could be used by different engines.
Returns
-------
NumPy array
An array of partition objects.
"""
ErrorMessage.catch_bugs_and_request_email(
failure_condition=keep_partitioning and lengths is not None,
extra_log=f"`keep_partitioning` must be set to `False` when passing `lengths`. Got: {keep_partitioning=} | {lengths=}",
)
# Since we are already splitting the DataFrame back up after an
# operation, we will just use this time to compute the number of
# partitions as best we can right now.
if keep_partitioning and num_splits is None:
num_splits = len(left) if axis == 0 else len(left.T)
elif lengths:
num_splits = len(lengths)
elif num_splits is None:
num_splits = NPartitions.get()
else:
ErrorMessage.catch_bugs_and_request_email(
failure_condition=not isinstance(num_splits, int),
extra_log=f"Expected `num_splits` to be an integer, got: {type(num_splits)} | {num_splits=}",
)
preprocessed_map_func = cls.preprocess_func(apply_func)
left_partitions = cls.axis_partition(left, axis)
right_partitions = None if right is None else cls.axis_partition(right, axis)
# For mapping across the entire axis, we don't maintain partitioning because we
# may want to line to partitioning up with another BlockPartitions object. Since
# we don't need to maintain the partitioning, this gives us the opportunity to
# load-balance the data as well.
kw = {
"num_splits": num_splits,
"maintain_partitioning": keep_partitioning,
}
if lengths:
kw["lengths"] = lengths
kw["manual_partition"] = True
if apply_indices is None:
apply_indices = np.arange(len(left_partitions))
result_blocks = np.array(
[
left_partitions[i].apply(
preprocessed_map_func,
*(apply_func_args if apply_func_args else []),
other_axis_partition=(
right_partitions if broadcast_all else right_partitions[i]
),
**kw,
**({"partition_idx": idx} if enumerate_partitions else {}),
**kwargs,
)
for idx, i in enumerate(apply_indices)
]
)
# If we are mapping over columns, they are returned to use the same as
# rows, so we need to transpose the returned 2D NumPy array to return
# the structure to the correct order.
return result_blocks.T if not axis else result_blocks
@classmethod
@wait_computations_if_benchmark_mode
def base_map_partitions(
cls,
partitions,
map_func,
func_args=None,
func_kwargs=None,
):
"""
Apply `map_func` to every partition in `partitions`.
Parameters
----------
partitions : NumPy 2D array
Partitions housing the data of Modin Frame.
map_func : callable
Function to apply.
func_args : iterable, optional
Positional arguments for the 'map_func'.
func_kwargs : dict, optional
Keyword arguments for the 'map_func'.
Returns
-------
NumPy array
An array of partitions
"""
preprocessed_map_func = cls.preprocess_func(map_func)
return np.array(
[
[
part.apply(
preprocessed_map_func,
*func_args if func_args is not None else (),
**func_kwargs if func_kwargs is not None else {},
)
for part in row_of_parts
]
for row_of_parts in partitions
]
)
@classmethod
@wait_computations_if_benchmark_mode
def broadcast_apply(
cls,
axis,
apply_func,
left,
right,
):
"""
Broadcast the `right` partitions to `left` and apply `apply_func` function using different approaches to achieve the best performance.
Parameters
----------
axis : {0, 1}
Axis to apply and broadcast over.
apply_func : callable
Function to apply.
left : np.ndarray
NumPy array of left partitions.
right : np.ndarray
NumPy array of right partitions.
Returns
-------
np.ndarray
NumPy array of result partition objects.
"""
if not DynamicPartitioning.get():
# block-wise broadcast
new_partitions = cls.base_broadcast_apply(
axis,
apply_func,
left,
right,
)
else:
# The dynamic partitioning behavior of `broadcast_apply` differs from that of `map_partitions`,
# since the columnar approach for `broadcast_apply` results in slowdown.
# axis-wise broadcast
new_partitions = cls.broadcast_axis_partitions(
axis=axis ^ 1,
left=left,
right=right,
apply_func=apply_func,
broadcast_all=False,
keep_partitioning=True,
)
return new_partitions
@classmethod
@wait_computations_if_benchmark_mode
def map_partitions(
cls,
partitions,
map_func,
func_args=None,
func_kwargs=None,
):
"""
Apply `map_func` to `partitions` using different approaches to achieve the best performance.
Parameters
----------
partitions : NumPy 2D array
Partitions housing the data of Modin Frame.
map_func : callable
Function to apply.
func_args : iterable, optional
Positional arguments for the 'map_func'.
func_kwargs : dict, optional
Keyword arguments for the 'map_func'.
Returns
-------
NumPy array
An array of partitions
"""
if not DynamicPartitioning.get():
# block-wise map
new_partitions = cls.base_map_partitions(
partitions, map_func, func_args, func_kwargs
)
else:
# axis-wise map
# we choose an axis for a combination of partitions
# whose size is closer to the number of CPUs
if abs(partitions.shape[0] - CpuCount.get()) < abs(
partitions.shape[1] - CpuCount.get()
):
axis = 1
else:
axis = 0
column_splits = CpuCount.get() // partitions.shape[1]
if axis == 0 and column_splits > 1:
# splitting by parts of columnar partitions
new_partitions = cls.map_partitions_joined_by_column(
partitions, column_splits, map_func, func_args, func_kwargs
)
else:
# splitting by full axis partitions
new_partitions = cls.map_axis_partitions(
axis,
partitions,
lambda df: map_func(
df,
*(func_args if func_args is not None else ()),
**(func_kwargs if func_kwargs is not None else {}),
),
keep_partitioning=True,
)
return new_partitions
@classmethod
@wait_computations_if_benchmark_mode
def lazy_map_partitions(
cls,
partitions,
map_func,
func_args=None,
func_kwargs=None,
enumerate_partitions=False,
):
"""
Apply `map_func` to every partition in `partitions` *lazily*.
Parameters
----------
partitions : NumPy 2D array
Partitions of Modin Frame.
map_func : callable
Function to apply.
func_args : iterable, optional
Positional arguments for the 'map_func'.
func_kwargs : dict, optional
Keyword arguments for the 'map_func'.
enumerate_partitions : bool, default: False
Returns
-------
NumPy array
An array of partitions
"""
preprocessed_map_func = cls.preprocess_func(map_func)
return np.array(
[
[
part.add_to_apply_calls(
preprocessed_map_func,
*(tuple() if func_args is None else func_args),
**func_kwargs if func_kwargs is not None else {},
**({"partition_idx": i} if enumerate_partitions else {}),
)
for part in row
]
for i, row in enumerate(partitions)
]
)
@classmethod
def map_axis_partitions(
cls,
axis,
partitions,
map_func,
keep_partitioning=False,
num_splits=None,
lengths=None,
enumerate_partitions=False,
**kwargs,
):
"""
Apply `map_func` to every partition in `partitions` along given `axis`.
Parameters
----------
axis : {0, 1}
Axis to perform the map across (0 - index, 1 - columns).
partitions : NumPy 2D array
Partitions of Modin Frame.
map_func : callable
Function to apply.
keep_partitioning : boolean, default: False
The flag to keep partition boundaries for Modin Frame if possible.
Setting it to True disables shuffling data from one partition to another in case the resulting
number of splits is equal to the initial number of splits.
num_splits : int, optional
The number of partitions to split the result into across the `axis`. If None, then the number
of splits will be infered automatically. If `num_splits` is None and `keep_partitioning=True`
then the number of splits is preserved.
lengths : list of ints, default: None
The list of lengths to shuffle the object. Note:
1. Passing `lengths` omits the `num_splits` parameter as the number of splits
will now be inferred from the number of integers present in `lengths`.
2. When passing lengths you must explicitly specify `keep_partitioning=False`.
enumerate_partitions : bool, default: False
Whether or not to pass partition index into `map_func`.
Note that `map_func` must be able to accept `partition_idx` kwarg.
**kwargs : dict
Additional options that could be used by different engines.
Returns
-------
NumPy array
An array of new partitions for Modin Frame.
Notes
-----
This method should be used in the case when `map_func` relies on
some global information about the axis.
"""
return cls.broadcast_axis_partitions(
axis=axis,
left=partitions,
apply_func=map_func,
keep_partitioning=keep_partitioning,
num_splits=num_splits,
right=None,
lengths=lengths,
enumerate_partitions=enumerate_partitions,
**kwargs,
)
@classmethod
def map_partitions_joined_by_column(
cls,
partitions,
column_splits,
map_func,
map_func_args=None,
map_func_kwargs=None,
):
"""
Combine several blocks by column into one virtual partition and apply "map_func" to them.
Parameters
----------
partitions : NumPy 2D array
Partitions of Modin Frame.
column_splits : int
The number of splits by column.
map_func : callable
Function to apply.
map_func_args : iterable, optional
Positional arguments for the 'map_func'.
map_func_kwargs : dict, optional
Keyword arguments for the 'map_func'.
Returns
-------
NumPy array
An array of new partitions for Modin Frame.
"""
if column_splits < 1:
raise ValueError(
"The value of columns_splits must be greater than or equal to 1."
)
# step cannot be less than 1
step = max(partitions.shape[0] // column_splits, 1)
preprocessed_map_func = cls.preprocess_func(map_func)
result = np.empty(partitions.shape, dtype=object)
for i in range(
0,
partitions.shape[0],
step,
):
partitions_subset = partitions[i : i + step]
# This is necessary when ``partitions.shape[0]`` is not divisible
# by `column_splits` without a remainder.
actual_step = len(partitions_subset)
kw = {
"num_splits": actual_step,
}
joined_column_partitions = cls.column_partitions(partitions_subset)
for j in range(partitions.shape[1]):
result[i : i + actual_step, j] = joined_column_partitions[j].apply(
preprocessed_map_func,
*map_func_args if map_func_args is not None else (),
**kw,
**map_func_kwargs if map_func_kwargs is not None else {},
)
return result
@classmethod
def concat(cls, axis, left_parts, right_parts):
"""
Concatenate the blocks of partitions with another set of blocks.
Parameters
----------
axis : int
The axis to concatenate to.
left_parts : np.ndarray
NumPy array of partitions to concatenate with.
right_parts : np.ndarray or list
NumPy array of partitions to be concatenated.
Returns
-------
np.ndarray
A new NumPy array with concatenated partitions.
list[int] or None
Row lengths if possible to compute it.
Notes
-----
Assumes that the blocks are already the same shape on the
dimension being concatenated. A ValueError will be thrown if this
condition is not met.
"""
# TODO: Possible change is `isinstance(right_parts, list)`
if type(right_parts) is list:
# `np.array` with partitions of empty ModinFrame has a shape (0,)
# but `np.concatenate` can concatenate arrays only if its shapes at
# specified axis are equals, so filtering empty frames to avoid concat error
right_parts = [o for o in right_parts if o.size != 0]
to_concat = (
[left_parts] + right_parts if left_parts.size != 0 else right_parts
)
result = (
np.concatenate(to_concat, axis=axis) if len(to_concat) else left_parts
)
else:
result = np.append(left_parts, right_parts, axis=axis)
if axis == 0:
return cls.rebalance_partitions(result)
else:
return result, None
@classmethod
def to_pandas(cls, partitions):
"""
Convert NumPy array of PandasDataframePartition to pandas DataFrame.
Parameters
----------
partitions : np.ndarray
NumPy array of PandasDataframePartition.
Returns
-------
pandas.DataFrame
A pandas DataFrame
"""
return create_pandas_df_from_partitions(
cls.get_objects_from_partitions(partitions.flatten()), partitions.shape
)
@classmethod
def to_numpy(cls, partitions, **kwargs):
"""
Convert NumPy array of PandasDataframePartition to NumPy array of data stored within `partitions`.
Parameters
----------
partitions : np.ndarray
NumPy array of PandasDataframePartition.
**kwargs : dict
Keyword arguments for PandasDataframePartition.to_numpy function.
Returns
-------
np.ndarray
A NumPy array.
"""
return np.block(
[[block.to_numpy(**kwargs) for block in row] for row in partitions]
)
@classmethod
def split_pandas_df_into_partitions(
cls, df, row_chunksize, col_chunksize, update_bar
):
"""
Split given pandas DataFrame according to the row/column chunk sizes into distributed partitions.
Parameters
----------
df : pandas.DataFrame
row_chunksize : int
col_chunksize : int
update_bar : callable(x) -> x
Function that updates a progress bar.
Returns
-------
2D np.ndarray[PandasDataframePartition]
"""
put_func = cls._partition_class.put
# even a full-axis slice can cost something (https://github.com/pandas-dev/pandas/issues/55202)
# so we try not to do it if unnecessary.
if col_chunksize >= len(df.columns):
col_parts = [df]
else:
col_parts = [
df.iloc[:, i : i + col_chunksize]
for i in range(0, len(df.columns), col_chunksize)
]
parts = [
[
update_bar(
put_func(col_part.iloc[i : i + row_chunksize]),
)
for col_part in col_parts
]
for i in range(0, len(df), row_chunksize)
]
return np.array(parts)
@classmethod
@wait_computations_if_benchmark_mode
def from_pandas(cls, df, return_dims=False):
"""
Return the partitions from pandas.DataFrame.
Parameters
----------
df : pandas.DataFrame
A pandas.DataFrame.
return_dims : bool, default: False
If it's True, return as (np.ndarray, row_lengths, col_widths),
else np.ndarray.
Returns
-------
(np.ndarray, backend) or (np.ndarray, backend, row_lengths, col_widths)
A NumPy array with partitions (with dimensions or not).
"""
num_splits = NPartitions.get()
min_row_block_size = MinRowPartitionSize.get()
min_column_block_size = MinColumnPartitionSize.get()
row_chunksize = compute_chunksize(df.shape[0], num_splits, min_row_block_size)
col_chunksize = compute_chunksize(
df.shape[1], num_splits, min_column_block_size
)
bar_format = (
"{l_bar}{bar}{r_bar}"
if os.environ.get("DEBUG_PROGRESS_BAR", "False") == "True"
else "{desc}: {percentage:3.0f}%{bar} Elapsed time: {elapsed}, estimated remaining time: {remaining}"
)
if ProgressBar.get():
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
from tqdm.autonotebook import tqdm as tqdm_notebook
except ImportError:
raise ImportError("Please pip install tqdm to use the progress bar")
rows = max(1, round(len(df) / row_chunksize))
cols = max(1, round(len(df.columns) / col_chunksize))
update_count = rows * cols
pbar = tqdm_notebook(
total=round(update_count),
desc="Distributing Dataframe",
bar_format=bar_format,
)
else:
pbar = None
def update_bar(f):
if ProgressBar.get():
pbar.update(1)
return f
parts = cls.split_pandas_df_into_partitions(
df, row_chunksize, col_chunksize, update_bar
)
backend = get_pandas_backend(df.dtypes)
if ProgressBar.get():
pbar.close()
if not return_dims:
return parts, backend
else:
row_lengths = [
(
row_chunksize
if i + row_chunksize < len(df)
else len(df) % row_chunksize or row_chunksize
)
for i in range(0, len(df), row_chunksize)
]
col_widths = [
(
col_chunksize
if i + col_chunksize < len(df.columns)
else len(df.columns) % col_chunksize or col_chunksize
)
for i in range(0, len(df.columns), col_chunksize)
]
return parts, backend, row_lengths, col_widths
@classmethod
def from_arrow(cls, at, return_dims=False):
"""
Return the partitions from Apache Arrow (PyArrow).
Parameters
----------
at : pyarrow.table
Arrow Table.
return_dims : bool, default: False
If it's True, return as (np.ndarray, row_lengths, col_widths),
else np.ndarray.
Returns
-------
(np.ndarray, backend) or (np.ndarray, backend, row_lengths, col_widths)
A NumPy array with partitions (with dimensions or not).
"""
return cls.from_pandas(at.to_pandas(), return_dims=return_dims)
@classmethod
def get_objects_from_partitions(cls, partitions):
"""
Get the objects wrapped by `partitions` (in parallel if supported).
Parameters
----------
partitions : np.ndarray
NumPy array with ``PandasDataframePartition``-s.
Returns
-------
list
The objects wrapped by `partitions`.
"""
if hasattr(cls, "_execution_wrapper"):
# more efficient parallel implementation
for idx, part in enumerate(partitions):
if hasattr(part, "force_materialization"):
partitions[idx] = part.force_materialization()
assert all(
[len(partition.list_of_blocks) == 1 for partition in partitions]
), "Implementation assumes that each partition contains a single block."
return cls._execution_wrapper.materialize(
[partition.list_of_blocks[0] for partition in partitions]
)
return [partition.get() for partition in partitions]
@classmethod
def wait_partitions(cls, partitions):
"""
Wait on the objects wrapped by `partitions`, without materializing them.
This method will block until all computations in the list have completed.
Parameters
----------
partitions : np.ndarray
NumPy array with ``PandasDataframePartition``-s.
Notes
-----
This method should be implemented in a more efficient way for engines that supports
waiting on objects in parallel.
"""
for partition in partitions:
partition.wait()
@classmethod
def get_indices(cls, axis, partitions, index_func=None):
"""
Get the internal indices stored in the partitions.
Parameters
----------
axis : {0, 1}
Axis to extract the labels over.
partitions : np.ndarray
NumPy array with PandasDataframePartition's.
index_func : callable, default: None
The function to be used to extract the indices.
Returns
-------
pandas.Index
A pandas Index object.
list of pandas.Index
The list of internal indices for each partition.
Notes
-----
These are the global indices of the object. This is mostly useful
when you have deleted rows/columns internally, but do not know
which ones were deleted.
"""
if index_func is None:
index_func = lambda df: df.axes[axis] # noqa: E731
ErrorMessage.catch_bugs_and_request_email(not callable(index_func))
func = cls.preprocess_func(index_func)
target = partitions.T if axis == 0 else partitions
if len(target):
new_idx = [idx.apply(func) for idx in target[0]]
new_idx = cls.get_objects_from_partitions(new_idx)
else:
new_idx = [pandas.Index([])]
# filter empty indexes in case there are multiple partitions
total_idx = list(filter(len, new_idx))
if len(total_idx) > 0:
# TODO FIX INFORMATION LEAK!!!!1!!1!!
total_idx = total_idx[0].append(total_idx[1:])
else:
# Meaning that all partitions returned a zero-length index,
# in this case, we return an index of any partition to preserve
# the index's metadata
total_idx = new_idx[0]
return total_idx, new_idx
@classmethod
def _apply_func_to_list_of_partitions_broadcast(
cls, func, partitions, other, **kwargs
):
"""
Apply a function to a list of remote partitions.
`other` partitions will be broadcasted to `partitions`
and `func` will be applied.
Parameters
----------
func : callable
The func to apply.
partitions : np.ndarray
The partitions to which the `func` will apply.
other : np.ndarray
The partitions to be broadcasted to `partitions`.
**kwargs : dict
Keyword arguments for PandasDataframePartition.apply function.
Returns
-------
list
A list of PandasDataframePartition objects.
"""
preprocessed_func = cls.preprocess_func(func)
return [
obj.apply(preprocessed_func, other=[o.get() for o in broadcasted], **kwargs)
for obj, broadcasted in zip(partitions, other.T)
]
@classmethod
def _apply_func_to_list_of_partitions(cls, func, partitions, **kwargs):
"""
Apply a function to a list of remote partitions.
Parameters
----------
func : callable
The func to apply.
partitions : np.ndarray
The partitions to which the `func` will apply.
**kwargs : dict
Keyword arguments for PandasDataframePartition.apply function.
Returns
-------
list
A list of PandasDataframePartition objects.
Notes
-----
This preprocesses the `func` first before applying it to the partitions.
"""
preprocessed_func = cls.preprocess_func(func)
return [obj.apply(preprocessed_func, **kwargs) for obj in partitions]
@classmethod
def combine(cls, partitions, new_index=None, new_columns=None):
"""
Convert a NumPy 2D array of partitions to a NumPy 2D array of a single partition.
Parameters
----------
partitions : np.ndarray
The partitions which have to be converted to a single partition.
new_index : pandas.Index, optional
Index for propagation into internal partitions.
Optimization allowing to do this in one remote kernel.
new_columns : pandas.Index, optional
Columns for propagation into internal partitions.
Optimization allowing to do this in one remote kernel.
Returns
-------
np.ndarray
A NumPy 2D array of a single partition.
"""
if partitions.size <= 1 and new_index is None and new_columns is None:
return partitions
def to_pandas_remote(df, partition_shape, *dfs):
"""Copy of ``cls.to_pandas()`` method adapted for a remote function."""
return create_pandas_df_from_partitions(
(df,) + dfs,
partition_shape,
called_from_remote=True,
new_index=new_index,
new_columns=new_columns,
)
preprocessed_func = cls.preprocess_func(to_pandas_remote)
partition_shape = partitions.shape
partitions_flattened = partitions.flatten()
for idx, part in enumerate(partitions_flattened):
if hasattr(part, "force_materialization"):
partitions_flattened[idx] = part.force_materialization()
partition_refs = [
partition.list_of_blocks[0] for partition in partitions_flattened[1:]
]
combined_partition = partitions.flat[0].apply(
preprocessed_func, partition_shape, *partition_refs
)
return np.array([combined_partition]).reshape(1, -1)
@classmethod
@wait_computations_if_benchmark_mode
def apply_func_to_select_indices(
cls, axis, partitions, func, indices, keep_remaining=False
):
"""
Apply a function to select indices.
Parameters
----------
axis : {0, 1}
Axis to apply the `func` over.
partitions : np.ndarray
The partitions to which the `func` will apply.
func : callable
The function to apply to these indices of partitions.
indices : dict
The indices to apply the function to.
keep_remaining : bool, default: False
Whether or not to keep the other partitions. Some operations
may want to drop the remaining partitions and keep
only the results.
Returns
-------
np.ndarray
A NumPy array with partitions.
Notes
-----
Your internal function must take a kwarg `internal_indices` for
this to work correctly. This prevents information leakage of the
internal index to the external representation.
"""
if partitions.size == 0:
return np.array([[]])
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(func, dict):
dict_func = func
else:
dict_func = None
if not axis:
partitions_for_apply = partitions.T
else:
partitions_for_apply = partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_func is not None:
if not keep_remaining:
result = np.array(
[
cls._apply_func_to_list_of_partitions(
func,
partitions_for_apply[o_idx],
func_dict={
i_idx: dict_func[i_idx]
for i_idx in list_to_apply
if i_idx >= 0
},
)
for o_idx, list_to_apply in indices.items()
]
)
else:
result = np.array(
[
(
partitions_for_apply[i]
if i not in indices
else cls._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
func_dict={
idx: dict_func[idx]
for idx in indices[i]
if idx >= 0
},
)
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# We are passing internal indices in here. In order for func to
# actually be able to use this information, it must be able to take in
# the internal indices. This might mean an iloc in the case of Pandas
# or some other way to index into the internal representation.
result = np.array(
[
cls._apply_func_to_list_of_partitions(
func,
partitions_for_apply[idx],
internal_indices=list_to_apply,
)
for idx, list_to_apply in indices.items()
]
)
else:
# The difference here is that we modify a subset and return the
# remaining (non-updated) blocks in their original position.
result = np.array(
[
(
partitions_for_apply[i]
if i not in indices
else cls._apply_func_to_list_of_partitions(
func,
partitions_for_apply[i],
internal_indices=indices[i],
)
)
for i in range(len(partitions_for_apply))
]
)
return result.T if not axis else result
@classmethod
@wait_computations_if_benchmark_mode
def apply_func_to_select_indices_along_full_axis(
cls, axis, partitions, func, indices, keep_remaining=False
):
"""
Apply a function to a select subset of full columns/rows.
Parameters
----------
axis : {0, 1}
The axis to apply the function over.
partitions : np.ndarray
The partitions to which the `func` will apply.
func : callable
The function to apply.
indices : list-like
The global indices to apply the func to.
keep_remaining : bool, default: False
Whether or not to keep the other partitions.
Some operations may want to drop the remaining partitions and
keep only the results.
Returns
-------
np.ndarray
A NumPy array with partitions.
Notes
-----
This should be used when you need to apply a function that relies
on some global information for the entire column/row, but only need
to apply a function to a subset.
For your func to operate directly on the indices provided,
it must use `internal_indices` as a keyword argument.
"""
if partitions.size == 0:
return np.array([[]])
# Handling dictionaries has to be done differently, but we still want
# to figure out the partitions that need to be applied to, so we will
# store the dictionary in a separate variable and assign `indices` to
# the keys to handle it the same as we normally would.
if isinstance(func, dict):
dict_func = func
else:
dict_func = None
preprocessed_func = cls.preprocess_func(func)
# Since we might be keeping the remaining blocks that are not modified,
# we have to also keep the block_partitions object in the correct
# direction (transpose for columns).
if not keep_remaining:
selected_partitions = partitions.T if not axis else partitions
selected_partitions = np.array([selected_partitions[i] for i in indices])
selected_partitions = (
selected_partitions.T if not axis else selected_partitions
)
else:
selected_partitions = partitions
if not axis:
partitions_for_apply = cls.column_partitions(selected_partitions)
partitions_for_remaining = partitions.T
else:
partitions_for_apply = cls.row_partitions(selected_partitions)
partitions_for_remaining = partitions
# We may have a command to perform different functions on different
# columns at the same time. We attempt to handle this as efficiently as
# possible here. Functions that use this in the dictionary format must
# accept a keyword argument `func_dict`.
if dict_func is not None:
if not keep_remaining:
result = np.array(
[
part.apply(
preprocessed_func,
func_dict={idx: dict_func[idx] for idx in indices[i]},
)
for i, part in zip(indices, partitions_for_apply)
]
)
else:
result = np.array(
[
(
partitions_for_remaining[i]
if i not in indices
else cls._apply_func_to_list_of_partitions(
preprocessed_func,
partitions_for_apply[i],
func_dict={idx: dict_func[idx] for idx in indices[i]},
)
)
for i in range(len(partitions_for_apply))
]
)
else:
if not keep_remaining:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
part.apply(preprocessed_func, internal_indices=indices[i])
for i, part in zip(indices, partitions_for_apply)
]
)
else:
# See notes in `apply_func_to_select_indices`
result = np.array(
[
(
partitions_for_remaining[i]
if i not in indices
else partitions_for_apply[i].apply(
preprocessed_func, internal_indices=indices[i]
)
)
for i in range(len(partitions_for_remaining))
]
)
return result.T if not axis else result
@classmethod
@wait_computations_if_benchmark_mode
def apply_func_to_indices_both_axis(
cls,
partitions,
func,
row_partitions_list,
col_partitions_list,
item_to_distribute=no_default,
row_lengths=None,
col_widths=None,
):
"""
Apply a function along both axes.
Parameters
----------
partitions : np.ndarray
The partitions to which the `func` will apply.
func : callable
The function to apply.
row_partitions_list : iterable of tuples
Iterable of tuples, containing 2 values:
1. Integer row partition index.
2. Internal row indexer of this partition.
col_partitions_list : iterable of tuples
Iterable of tuples, containing 2 values:
1. Integer column partition index.
2. Internal column indexer of this partition.
item_to_distribute : np.ndarray or scalar, default: no_default
The item to split up so it can be applied over both axes.
row_lengths : list of ints, optional
Lengths of partitions for every row. If not specified this information
is extracted from partitions itself.
col_widths : list of ints, optional
Widths of partitions for every column. If not specified this information
is extracted from partitions itself.
Returns
-------
np.ndarray
A NumPy array with partitions.
Notes
-----
For your func to operate directly on the indices provided,
it must use `row_internal_indices`, `col_internal_indices` as keyword
arguments.
"""
partition_copy = partitions.copy()
row_position_counter = 0
if row_lengths is None:
row_lengths = [None] * len(row_partitions_list)
if col_widths is None:
col_widths = [None] * len(col_partitions_list)
def compute_part_size(indexer, remote_part, part_idx, axis):
"""Compute indexer length along the specified axis for the passed partition."""
if isinstance(indexer, slice):
shapes_container = row_lengths if axis == 0 else col_widths
part_size = shapes_container[part_idx]
if part_size is None:
part_size = (
remote_part.length() if axis == 0 else remote_part.width()
)
shapes_container[part_idx] = part_size
indexer = range(*indexer.indices(part_size))
return len(indexer)
for row_idx, row_values in enumerate(row_partitions_list):
row_blk_idx, row_internal_idx = row_values
col_position_counter = 0
row_offset = 0
for col_idx, col_values in enumerate(col_partitions_list):
col_blk_idx, col_internal_idx = col_values
remote_part = partition_copy[row_blk_idx, col_blk_idx]
row_offset = compute_part_size(
row_internal_idx, remote_part, row_idx, axis=0
)
col_offset = compute_part_size(
col_internal_idx, remote_part, col_idx, axis=1
)
if item_to_distribute is not no_default:
if isinstance(item_to_distribute, np.ndarray):
item = item_to_distribute[
row_position_counter : row_position_counter + row_offset,
col_position_counter : col_position_counter + col_offset,
]
else:
item = item_to_distribute
item = {"item": item}
else:
item = {}
block_result = remote_part.add_to_apply_calls(
func,
row_internal_indices=row_internal_idx,
col_internal_indices=col_internal_idx,
**item,
)
partition_copy[row_blk_idx, col_blk_idx] = block_result
col_position_counter += col_offset
row_position_counter += row_offset
return partition_copy
@classmethod
@wait_computations_if_benchmark_mode
def n_ary_operation(cls, left, func, right: list):
r"""
Apply an n-ary operation to multiple ``PandasDataframe`` objects.
This method assumes that all the partitions of the dataframes in left
and right have the same dimensions. For each position i, j in each
dataframe's partitions, the result has a partition at (i, j) whose data
is func(left_partitions[i,j], \*each_right_partitions[i,j]).
Parameters
----------
left : np.ndarray
The partitions of left ``PandasDataframe``.
func : callable
The function to apply.
right : list of np.ndarray
The list of partitions of other ``PandasDataframe``.
Returns
-------
np.ndarray
A NumPy array with new partitions.
"""
func = cls.preprocess_func(func)
def get_right_block(right_partitions, row_idx, col_idx):
partition = right_partitions[row_idx][col_idx]
blocks = partition.list_of_blocks
"""
NOTE:
Currently we do one remote call per right virtual partition to
materialize the partitions' blocks, then another remote call to do
the n_ary operation. we could get better performance if we
assembled the other partition within the remote `apply` call, by
passing the partition in as `other_axis_partition`. However,
passing `other_axis_partition` requires some extra care that would
complicate the code quite a bit:
- block partitions don't know how to deal with `other_axis_partition`
- the right axis partition's axis could be different from the axis
of the corresponding left partition
- there can be multiple other_axis_partition because this is an n-ary
operation and n can be > 2.
So for now just do the materialization in a separate remote step.
"""
if len(blocks) > 1:
partition.force_materialization()
assert len(partition.list_of_blocks) == 1
return partition.list_of_blocks[0]
return np.array(
[
[
part.apply(
func,
*(
get_right_block(right_partitions, row_idx, col_idx)
for right_partitions in right
),
)
for col_idx, part in enumerate(left[row_idx])
]
for row_idx in range(len(left))
]
)
@classmethod
def finalize(cls, partitions):
"""
Perform all deferred calls on partitions.
Parameters
----------
partitions : np.ndarray
Partitions of Modin Dataframe on which all deferred calls should be performed.
"""
[part.drain_call_queue() for row in partitions for part in row]
@classmethod
def rebalance_partitions(cls, partitions):
"""
Rebalance a 2-d array of partitions if we are using ``PandasOnRay`` or ``PandasOnDask`` executions.
For all other executions, the partitions are returned unchanged.
Rebalance the partitions by building a new array
of partitions out of the original ones so that:
- If all partitions have a length, each new partition has roughly the same number of rows.
- Otherwise, each new partition spans roughly the same number of old partitions.
Parameters
----------
partitions : np.ndarray
The 2-d array of partitions to rebalance.
Returns
-------
np.ndarray
A NumPy array with the same; or new, rebalanced, partitions, depending on the execution
engine and storage format.
list[int] or None
Row lengths if possible to compute it.
"""
# We rebalance when the ratio of the number of existing partitions to
# the ideal number of partitions is larger than this threshold. The
# threshold is a heuristic that may need to be tuned for performance.
max_excess_of_num_partitions = 1.5
num_existing_partitions = partitions.shape[0]
ideal_num_new_partitions = NPartitions.get()
if (
num_existing_partitions
<= ideal_num_new_partitions * max_excess_of_num_partitions
):
return partitions, None
# If any partition has an unknown length, give each axis partition
# roughly the same number of row partitions. We use `_length_cache` here
# to avoid materializing any unmaterialized lengths.
if any(
partition._length_cache is None for row in partitions for partition in row
):
# We need each partition to go into an axis partition, but the
# number of axis partitions may not evenly divide the number of
# partitions.
chunk_size = compute_chunksize(
num_existing_partitions, ideal_num_new_partitions, min_block_size=1
)
new_partitions = np.array(
[
cls.column_partitions(
partitions[i : i + chunk_size],
full_axis=False,
)
for i in range(
0,
num_existing_partitions,
chunk_size,
)
]
)
return new_partitions, None
# If we know the number of rows in every partition, then we should try
# instead to give each new partition roughly the same number of rows.
new_partitions = []
# `start` is the index of the first existing partition that we want to
# put into the current new partition.
start = 0
total_rows = sum(part.length() for part in partitions[:, 0])
ideal_partition_size = compute_chunksize(
total_rows, ideal_num_new_partitions, min_block_size=1
)
for _ in range(ideal_num_new_partitions):
# We might pick up old partitions too quickly and exhaust all of them.
if start >= len(partitions):
break
# `stop` is the index of the last existing partition so far that we
# want to put into the current new partition.
stop = start
partition_size = partitions[start][0].length()
# Add existing partitions into the current new partition until the
# number of rows in the new partition hits `ideal_partition_size`.
while stop < len(partitions) and partition_size < ideal_partition_size:
stop += 1
if stop < len(partitions):
partition_size += partitions[stop][0].length()
# If the new partition is larger than we want, split the last
# current partition that it contains into two partitions, where
# the first partition has just enough rows to make the current
# new partition have length `ideal_partition_size`, and the second
# partition has the remainder.
if partition_size > ideal_partition_size * max_excess_of_num_partitions:
prev_length = sum(row[0].length() for row in partitions[start:stop])
new_last_partition_size = ideal_partition_size - prev_length
partitions = np.insert(
partitions,
stop + 1,
[
obj.mask(slice(new_last_partition_size, None), slice(None))
for obj in partitions[stop]
],
0,
)
# TODO: explicit `_length_cache` computing may be avoided after #4903 is merged
for obj in partitions[stop + 1]:
obj._length_cache = partition_size - (
prev_length + new_last_partition_size
)
partitions[stop, :] = [
obj.mask(slice(None, new_last_partition_size), slice(None))
for obj in partitions[stop]
]
# TODO: explicit `_length_cache` computing may be avoided after #4903 is merged
for obj in partitions[stop]:
obj._length_cache = new_last_partition_size
# The new virtual partitions are not `full_axis`, even if they
# happen to span all rows in the dataframe, because they are
# meant to be the final partitions of the dataframe. They've
# already been split up correctly along axis 0, but using the
# default full_axis=True would cause partition.apply() to split
# its result along axis 0.
new_partitions.append(
cls.column_partitions(partitions[start : stop + 1], full_axis=False)
)
start = stop + 1
new_partitions = np.array(new_partitions)
lengths = [part.length() for part in new_partitions[:, 0]]
return new_partitions, lengths
@classmethod
@wait_computations_if_benchmark_mode
def shuffle_partitions(
cls,
partitions,
index,
shuffle_functions: "ShuffleFunctions",
final_shuffle_func,
right_partitions=None,
):
"""
Return shuffled partitions.
Parameters
----------
partitions : np.ndarray
The 2-d array of partitions to shuffle.
index : int or list of ints
The index(es) of the column partitions corresponding to the partitions that contain the column to sample.
shuffle_functions : ShuffleFunctions
An object implementing the functions that we will be using to perform this shuffle.
final_shuffle_func : Callable(pandas.DataFrame) -> pandas.DataFrame
Function that shuffles the data within each new partition.
right_partitions : np.ndarray, optional
Partitions to broadcast to `self` partitions. If specified, the method builds range-partitioning
for `right_partitions` basing on bins calculated for `partitions`, then performs broadcasting.
Returns
-------
np.ndarray
A list of row-partitions that have been shuffled.
"""
# Mask the partition that contains the column that will be sampled.
masked_partitions = partitions[:, index]
# Sample each partition
sample_func = cls.preprocess_func(shuffle_functions.sample_fn)
if masked_partitions.ndim == 1:
samples = [partition.apply(sample_func) for partition in masked_partitions]
else:
samples = [
cls._row_partition_class(row_part, full_axis=False).apply(sample_func)
for row_part in masked_partitions
]
# Get each sample to pass in to the pivot function
samples = cls.get_objects_from_partitions(samples)
num_bins = shuffle_functions.pivot_fn(samples)
# Convert our list of block partitions to row partitions. We need to create full-axis
# row partitions since we need to send the whole partition to the split step as otherwise
# we wouldn't know how to split the block partitions that don't contain the shuffling key.
row_partitions = cls.row_partitions(partitions)
if num_bins > 1:
# Gather together all of the sub-partitions
split_row_partitions = np.array(
[
partition.split(
shuffle_functions.split_fn,
num_splits=num_bins,
# The partition's metadata will never be accessed for the split partitions,
# thus no need to compute it.
extract_metadata=False,
)
for partition in row_partitions
]
).T
if right_partitions is None:
# We need to convert every partition that came from the splits into a column partition.
return np.array(
[
[
cls._column_partitions_class(
row_partition, full_axis=False
).apply(final_shuffle_func)
]
for row_partition in split_row_partitions
]
)
right_row_parts = cls.row_partitions(right_partitions)
right_split_row_partitions = np.array(
[
partition.split(
shuffle_functions.split_fn,
num_splits=num_bins,
extract_metadata=False,
)
for partition in right_row_parts
]
).T
return np.array(
[
cls._column_partitions_class(row_partition, full_axis=False).apply(
final_shuffle_func,
other_axis_partition=cls._column_partitions_class(
right_row_partitions
),
)
for right_row_partitions, row_partition in zip(
right_split_row_partitions, split_row_partitions
)
]
)
else:
# If there are not pivots we can simply apply the function row-wise
if right_partitions is None:
return np.array(
[row_part.apply(final_shuffle_func) for row_part in row_partitions]
)
right_row_parts = cls.row_partitions(right_partitions)
return np.array(
[
row_part.apply(
final_shuffle_func, other_axis_partition=right_row_part
)
for right_row_part, row_part in zip(right_row_parts, row_partitions)
]
)
|
PandasDataframePartitionManager
|
python
|
jina-ai__jina
|
tests/integration/sparse_pipeline/test_sparse_pipeline.py
|
{
"start": 567,
"end": 2072
}
|
class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.docs = DocumentArray()
@requests(on='/index')
def encode(self, docs: DocumentArray, *args, **kwargs) -> Any:
for i, doc in enumerate(docs):
doc.embedding = sparse.coo_matrix(doc.content)
self.docs.extend(docs)
@requests(on='/search')
def query(self, docs: DocumentArray, parameters, *args, **kwargs):
top_k = int(parameters['top_k'])
for doc in docs:
doc.matches = self.docs[:top_k]
def test_sparse_pipeline(mocker, docs_to_index):
def validate(response):
assert len(response.docs) == 1
for doc in response.docs:
assert len(doc.matches) == TOP_K
for i, match in enumerate(doc.matches):
assert match.id == docs_to_index[i].id
assert isinstance(match.embedding, sparse.coo_matrix)
f = Flow().add(uses=DummyCSRSparseIndexEncoder)
mock = mocker.Mock()
error_mock = mocker.Mock()
with f:
f.post(
on='/index',
inputs=docs_to_index,
on_error=error_mock,
)
f.post(
on='/search',
inputs=docs_to_index[0],
parameters={'top_k': TOP_K},
on_done=mock,
on_error=error_mock,
)
mock.assert_called_once()
validate_callback(mock, validate)
error_mock.assert_not_called()
|
DummyCSRSparseIndexEncoder
|
python
|
pytorch__pytorch
|
torch/distributed/optim/functional_adam.py
|
{
"start": 811,
"end": 7391
}
|
class ____:
def __init__(
self,
params: list[Tensor],
lr: float = 1e-3,
betas: tuple[float, float] = (0.9, 0.999),
eps: float = 1e-8,
weight_decay: float = 0.0,
amsgrad: bool = False,
maximize: bool = False,
foreach: bool = False,
fused: bool = False,
_allow_empty_param_list: bool = False,
):
_scripted_functional_optimizer_deprecation_warning(stacklevel=2)
if not 0.0 <= lr:
raise ValueError(f"Invalid learning rate: {lr}")
if not 0.0 <= eps:
raise ValueError(f"Invalid epsilon value: {eps}")
if not 0.0 <= betas[0] < 1.0:
raise ValueError(f"Invalid beta parameter at index 0: {betas[0]}")
if not 0.0 <= betas[1] < 1.0:
raise ValueError(f"Invalid beta parameter at index 1: {betas[1]}")
if not 0.0 <= weight_decay:
raise ValueError(f"Invalid weight_decay value: {weight_decay}")
self.defaults = {
"lr": lr,
"eps": eps,
"beta1": betas[0],
"beta2": betas[1],
"weight_decay": weight_decay,
}
self.amsgrad = amsgrad
self.maximize = maximize
self.foreach = foreach
self.fused = fused
self.state = torch.jit.annotate(dict[torch.Tensor, dict[str, torch.Tensor]], {})
if len(params) == 0 and not _allow_empty_param_list:
raise ValueError("optimizer got an empty parameter list")
# NOTE: we only have one param_group and don't allow user to add additional
# param group as it's not a common use case.
self.param_group = {"params": params}
def step_param(self, param: Tensor, grad: Tensor | None):
"""
Similar to step, but operates on a single parameter and optionally a
gradient tensor.
"""
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: list[Tensor] = []
has_complex = torch.is_complex(param)
if grad is not None:
params_with_grad.append(param)
grads.append(grad)
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state["step"] = torch.tensor(0.0)
state["exp_avg"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
state["exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
if self.amsgrad:
state["max_exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
state = self.state[param]
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if self.amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
with torch.no_grad():
F.adam(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
has_complex=has_complex,
maximize=self.maximize,
beta1=self.defaults["beta1"],
beta2=self.defaults["beta2"],
lr=self.defaults["lr"],
weight_decay=self.defaults["weight_decay"],
eps=self.defaults["eps"],
foreach=self.foreach,
fused=self.fused,
grad_scale=None,
found_inf=None,
)
def step(self, gradients: list[Tensor | None]):
params = self.param_group["params"]
params_with_grad = []
grads = []
exp_avgs = []
exp_avg_sqs = []
max_exp_avg_sqs = []
state_steps: list[Tensor] = []
has_complex = False
if len(params) != len(gradients):
raise ValueError(
"the gradients passed in does not equal to the size of the parameters!"
+ f"Params length: {len(params)}. "
+ f"Gradients length: {len(gradients)}"
)
for param, gradient in zip(self.param_group["params"], gradients):
if gradient is not None:
has_complex |= torch.is_complex(param)
params_with_grad.append(param)
grads.append(gradient)
# Lazy state initialization
if param not in self.state:
self.state[param] = {}
state = self.state[param]
state["step"] = torch.tensor(0.0)
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
if self.amsgrad:
# Maintains max of all exp. moving avg. of sq. grad. values
state["max_exp_avg_sq"] = torch.zeros_like(
param, memory_format=torch.preserve_format
)
state = self.state[param]
exp_avgs.append(state["exp_avg"])
exp_avg_sqs.append(state["exp_avg_sq"])
if self.amsgrad:
max_exp_avg_sqs.append(state["max_exp_avg_sq"])
state_steps.append(state["step"])
with torch.no_grad():
F.adam(
params_with_grad,
grads,
exp_avgs,
exp_avg_sqs,
max_exp_avg_sqs,
state_steps,
amsgrad=self.amsgrad,
has_complex=has_complex,
maximize=self.maximize,
beta1=self.defaults["beta1"],
beta2=self.defaults["beta2"],
lr=self.defaults["lr"],
weight_decay=self.defaults["weight_decay"],
eps=self.defaults["eps"],
foreach=self.foreach,
fused=self.fused,
grad_scale=None,
found_inf=None,
)
|
_FunctionalAdam
|
python
|
walkccc__LeetCode
|
solutions/819. Most Common Word/819.py
|
{
"start": 0,
"end": 268
}
|
class ____:
def mostCommonWord(self, paragraph: str, banned: list[str]) -> str:
banned = set(banned)
words = re.findall(r'\w+', paragraph.lower())
return collections.Counter(
word for word in words if word not in banned).most_common(1)[0][0]
|
Solution
|
python
|
sphinx-doc__sphinx
|
sphinx/search/it.py
|
{
"start": 193,
"end": 596
}
|
class ____(SearchLanguage):
lang = 'it'
language_name = 'Italian'
js_stemmer_rawcode = 'italian-stemmer.js'
stopwords = ITALIAN_STOPWORDS
def __init__(self, options: dict[str, str]) -> None:
super().__init__(options)
self.stemmer = snowballstemmer.stemmer('italian')
def stem(self, word: str) -> str:
return self.stemmer.stemWord(word.lower())
|
SearchItalian
|
python
|
rapidsai__cudf
|
python/cudf/cudf/core/udf/groupby_typing.py
|
{
"start": 10156,
"end": 12894
}
|
class ____(AttributeTemplate):
key = GroupType
resolve_max = _make_unary_attr("max")
resolve_min = _make_unary_attr("min")
resolve_sum = _make_unary_attr("sum")
resolve_mean = _make_unary_attr("mean")
resolve_var = _make_unary_attr("var")
resolve_std = _make_unary_attr("std")
resolve_size = _create_reduction_attr(
"GroupType.size", retty=group_size_type
)
resolve_count = _create_reduction_attr(
"GroupType.count", retty=types.int64
)
def resolve_idxmax(self, mod):
return types.BoundFunction(
GroupIdxMax, GroupType(mod.group_scalar_type, mod.index_type)
)
def resolve_idxmin(self, mod):
return types.BoundFunction(
GroupIdxMin, GroupType(mod.group_scalar_type, mod.index_type)
)
def resolve_corr(self, mod):
return types.BoundFunction(
GroupCorr, GroupType(mod.group_scalar_type, mod.index_type)
)
for ty in SUPPORTED_GROUPBY_NUMBA_TYPES:
_register_cuda_unary_reduction_caller("Max", ty, ty)
_register_cuda_unary_reduction_caller("Min", ty, ty)
_register_cuda_idx_reduction_caller("IdxMax", ty)
_register_cuda_idx_reduction_caller("IdxMin", ty)
if ty in types.integer_domain:
_register_cuda_binary_reduction_caller("Corr", ty, ty, types.float64)
_register_cuda_unary_reduction_caller("Sum", types.int32, types.int64)
_register_cuda_unary_reduction_caller("Sum", types.int64, types.int64)
_register_cuda_unary_reduction_caller("Sum", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Sum", types.float64, types.float64)
_register_cuda_unary_reduction_caller("Mean", types.int32, types.float64)
_register_cuda_unary_reduction_caller("Mean", types.int64, types.float64)
_register_cuda_unary_reduction_caller("Mean", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Mean", types.float64, types.float64)
_register_cuda_unary_reduction_caller("Std", types.int32, types.float64)
_register_cuda_unary_reduction_caller("Std", types.int64, types.float64)
_register_cuda_unary_reduction_caller("Std", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Std", types.float64, types.float64)
_register_cuda_unary_reduction_caller("Var", types.int32, types.float64)
_register_cuda_unary_reduction_caller("Var", types.int64, types.float64)
_register_cuda_unary_reduction_caller("Var", types.float32, types.float32)
_register_cuda_unary_reduction_caller("Var", types.float64, types.float64)
for attr in ("group_data", "index", "size"):
make_attribute_wrapper(GroupType, attr, attr)
for op in arith_ops + comparison_ops + unary_ops:
cuda_registry.register_global(op)(GroupOpBase)
|
GroupAttr
|
python
|
pandas-dev__pandas
|
pandas/tests/reshape/test_melt.py
|
{
"start": 19650,
"end": 25705
}
|
class ____:
def test_pairs(self):
data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [1766, 3301, 1454, 3139, 4133],
"id": [101, 102, 103, 104, 105],
"sex": ["Male", "Female", "Female", "Female", "Female"],
"visitdt1": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
],
"visitdt2": ["21jan2009", np.nan, "22jan2009", "31dec2008", "03feb2009"],
"visitdt3": ["05feb2009", np.nan, np.nan, "02jan2009", "15feb2009"],
"wt1": [1823, 3338, 1549, 3298, 4306],
"wt2": [2011.0, np.nan, 1892.0, 3338.0, 4575.0],
"wt3": [2293.0, np.nan, np.nan, 3377.0, 4805.0],
}
df = DataFrame(data)
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 4)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
result = lreshape(df, spec)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
1454,
3139,
4133,
1766,
3139,
4133,
],
"id": [101, 102, 103, 104, 105, 101, 103, 104, 105, 101, 104, 105],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
1892.0,
3338.0,
4575.0,
2293.0,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
result = lreshape(df, spec, dropna=False)
exp_data = {
"birthdt": [
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
"08jan2009",
"20dec2008",
"30dec2008",
"21dec2008",
"11jan2009",
],
"birthwt": [
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
1766,
3301,
1454,
3139,
4133,
],
"id": [
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
101,
102,
103,
104,
105,
],
"sex": [
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
"Male",
"Female",
"Female",
"Female",
"Female",
],
"visitdt": [
"11jan2009",
"22dec2008",
"04jan2009",
"29dec2008",
"20jan2009",
"21jan2009",
np.nan,
"22jan2009",
"31dec2008",
"03feb2009",
"05feb2009",
np.nan,
np.nan,
"02jan2009",
"15feb2009",
],
"wt": [
1823.0,
3338.0,
1549.0,
3298.0,
4306.0,
2011.0,
np.nan,
1892.0,
3338.0,
4575.0,
2293.0,
np.nan,
np.nan,
3377.0,
4805.0,
],
}
exp = DataFrame(exp_data, columns=result.columns)
tm.assert_frame_equal(result, exp)
spec = {
"visitdt": [f"visitdt{i:d}" for i in range(1, 3)],
"wt": [f"wt{i:d}" for i in range(1, 4)],
}
msg = "All column lists must be same length"
with pytest.raises(ValueError, match=msg):
lreshape(df, spec)
|
TestLreshape
|
python
|
huggingface__transformers
|
examples/modular-transformers/modular_new_model.py
|
{
"start": 139,
"end": 1006
}
|
class ____(GemmaConfig):
def __init__(
self,
vocab_size=256030,
hidden_size=64,
intermediate_size=90,
num_hidden_layers=28,
num_attention_heads=16,
num_key_value_heads=16,
head_dim=256,
hidden_act="gelu_pytorch_tanh",
hidden_activation=None,
max_position_embeddings=1500,
initializer_range=0.02,
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
eos_token_id=1,
bos_token_id=2,
tie_word_embeddings=True,
rope_theta=10000.0,
attention_bias=False,
attention_dropout=0.0,
use_bidirectional_attention=False,
layer_types=None,
**kwargs,
):
super().__init__(self, **kwargs)
@property
def num_heads(self):
return self.num_attention_heads
|
NewModelConfig
|
python
|
encode__django-rest-framework
|
tests/schemas/test_coreapi.py
|
{
"start": 2214,
"end": 2356
}
|
class ____(serializers.Serializer):
c = serializers.CharField(required=True)
d = serializers.CharField(required=False)
|
AnotherSerializer
|
python
|
weaviate__weaviate-python-client
|
weaviate/backup/backup.py
|
{
"start": 2215,
"end": 2398
}
|
class ____(BackupStatusReturn):
"""Return type of the backup creation and restore methods."""
collections: List[str] = Field(default_factory=list, alias="classes")
|
BackupReturn
|
python
|
modin-project__modin
|
modin/pandas/indexing.py
|
{
"start": 22404,
"end": 36313
}
|
class ____(_LocationIndexerBase):
"""
An indexer for modin_df.loc[] functionality.
Parameters
----------
modin_df : Union[DataFrame, Series]
DataFrame to operate on.
"""
_extensions: EXTENSION_DICT_TYPE = EXTENSION_DICT_TYPE(dict)
def __getitem__(self, key):
"""
Retrieve dataset according to `key`.
Parameters
----------
key : callable, scalar, or tuple
The global row index to retrieve data from.
Returns
-------
modin.pandas.DataFrame or modin.pandas.Series
Located dataset.
See Also
--------
pandas.DataFrame.loc
"""
if self.df.empty:
return self.df._default_to_pandas(lambda df: df.loc[key])
if isinstance(key, tuple):
key = self._validate_key_length(key)
if (
isinstance(key, tuple)
and len(key) == 2
and all((is_scalar(k) for k in key))
and self.qc.has_multiindex(axis=0)
):
# __getitem__ has no way to distinguish between
# loc[('level_one_key', level_two_key')] and
# loc['level_one_key', 'column_name']. It's possible for both to be valid
# when we have a multiindex on axis=0, and it seems pandas uses
# interpretation 1 if that's possible. Do the same.
locators = self._parse_row_and_column_locators((key, slice(None)))
try:
return self._helper_for__getitem__(key, *locators)
except KeyError:
pass
return self._helper_for__getitem__(
key, *self._parse_row_and_column_locators(key)
)
def _helper_for__getitem__(self, key, row_loc, col_loc, ndim):
"""
Retrieve dataset according to `key`, row_loc, and col_loc.
Parameters
----------
key : callable, scalar, or tuple
The global row index to retrieve data from.
row_loc : callable, scalar, or slice
Row locator(s) as a scalar or List.
col_loc : callable, scalar, or slice
Row locator(s) as a scalar or List.
ndim : int
The number of dimensions of the returned object.
Returns
-------
modin.pandas.DataFrame or modin.pandas.Series
Located dataset.
"""
row_scalar = is_scalar(row_loc)
col_scalar = is_scalar(col_loc)
# The thought process here is that we should check to see that we have a full key lookup
# for a MultiIndex DataFrame. If that's the case, then we should not drop any levels
# since our resulting intermediate dataframe will have dropped these for us already.
# Thus, we need to make sure we don't try to drop these levels again. The logic here is
# kind of hacked together. Ideally, we should handle this properly in the lower-level
# implementations, but this will have to be engineered properly later.
row_multiindex_full_lookup = self._multiindex_possibly_contains_key(
axis=0, key=row_loc
)
col_multiindex_full_lookup = self._multiindex_possibly_contains_key(
axis=1, key=col_loc
)
levels_already_dropped = (
row_multiindex_full_lookup or col_multiindex_full_lookup
)
if isinstance(row_loc, Series) and is_boolean_array(row_loc):
return self._handle_boolean_masking(row_loc, col_loc)
qc_view = self.qc.take_2d_labels(row_loc, col_loc)
result = self._get_pandas_object_from_qc_view(
qc_view,
row_multiindex_full_lookup,
col_multiindex_full_lookup,
row_scalar,
col_scalar,
ndim,
)
if isinstance(result, Series):
result._parent = self.df
result._parent_axis = 0
col_loc_as_list = [col_loc] if col_scalar else col_loc
row_loc_as_list = [row_loc] if row_scalar else row_loc
# Pandas drops the levels that are in the `loc`, so we have to as well.
if (
isinstance(result, (Series, DataFrame))
and result._query_compiler.has_multiindex()
and not levels_already_dropped
):
if (
isinstance(result, Series)
and not isinstance(col_loc_as_list, slice)
and all(
col_loc_as_list[i] in result.index.levels[i]
for i in range(len(col_loc_as_list))
)
):
result.index = result.index.droplevel(list(range(len(col_loc_as_list))))
elif not isinstance(row_loc_as_list, slice) and all(
not isinstance(row_loc_as_list[i], slice)
and row_loc_as_list[i] in result.index.levels[i]
for i in range(len(row_loc_as_list))
):
result.index = result.index.droplevel(list(range(len(row_loc_as_list))))
if (
isinstance(result, DataFrame)
and not isinstance(col_loc_as_list, slice)
and not levels_already_dropped
and result._query_compiler.has_multiindex(axis=1)
and all(
col_loc_as_list[i] in result.columns.levels[i]
for i in range(len(col_loc_as_list))
)
):
result.columns = result.columns.droplevel(list(range(len(col_loc_as_list))))
# This is done for cases where the index passed in has other state, like a
# frequency in the case of DateTimeIndex.
if (
row_loc is not None
and isinstance(col_loc, slice)
and col_loc == slice(None)
and isinstance(key, pandas.Index)
):
result.index = key
return result
def __setitem__(self, key, item):
"""
Assign `item` value to dataset located by `key`.
Parameters
----------
key : callable or tuple
The global row index to assign data to.
item : modin.pandas.DataFrame, modin.pandas.Series or scalar
Value that should be assigned to located dataset.
See Also
--------
pandas.DataFrame.loc
"""
if self.df.empty:
def _loc(df):
df.loc[key] = item
return df
self.df._update_inplace(
new_query_compiler=self.df._default_to_pandas(_loc)._query_compiler
)
self.qc = self.df._query_compiler
return
row_loc, col_loc, ndims = self._parse_row_and_column_locators(key)
append_axis = self._check_missing_loc(row_loc, col_loc)
if ndims >= 1 and append_axis is not None:
# We enter this codepath if we're either appending a row or a column
if append_axis:
# Appending at least one new column
if is_scalar(col_loc):
col_loc = [col_loc]
self._setitem_with_new_columns(row_loc, col_loc, item)
else:
# Appending at most one new row
if is_scalar(row_loc) or len(row_loc) == 1:
index = self.qc.index.insert(len(self.qc.index), row_loc)
self.qc = self.qc.reindex(labels=index, axis=0, fill_value=0)
self.df._update_inplace(new_query_compiler=self.qc)
self._set_item_existing_loc(row_loc, col_loc, item)
else:
self._set_item_existing_loc(row_loc, col_loc, item)
self.qc = self.df._query_compiler
def _setitem_with_new_columns(self, row_loc, col_loc, item):
"""
Assign `item` value to dataset located by `row_loc` and `col_loc` with new columns.
Parameters
----------
row_loc : scalar, slice, list, array or tuple
Row locator.
col_loc : list, array or tuple
Columns locator.
item : modin.pandas.DataFrame, modin.pandas.Series or scalar
Value that should be assigned to located dataset.
"""
if is_list_like(item) and not isinstance(item, (DataFrame, Series)):
item = np.array(item)
if len(item.shape) == 1:
if len(col_loc) != 1:
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
else:
if item.shape[-1] != len(col_loc):
raise ValueError(
"Must have equal len keys and value when setting with an iterable"
)
common_label_loc = np.isin(col_loc, self.qc.columns.values)
if not all(common_label_loc):
# In this case we have some new cols and some old ones
columns = self.qc.columns
for i in range(len(common_label_loc)):
if not common_label_loc[i]:
columns = columns.insert(len(columns), col_loc[i])
self.qc = self.qc.reindex(labels=columns, axis=1, fill_value=np.nan)
self.df._update_inplace(new_query_compiler=self.qc)
self._set_item_existing_loc(row_loc, np.array(col_loc), item)
self.qc = self.df._query_compiler
def _set_item_existing_loc(self, row_loc, col_loc, item):
"""
Assign `item` value to dataset located by `row_loc` and `col_loc` with existing rows and columns.
Parameters
----------
row_loc : scalar, slice, list, array or tuple
Row locator.
col_loc : scalar, slice, list, array or tuple
Columns locator.
item : modin.pandas.DataFrame, modin.pandas.Series or scalar
Value that should be assigned to located dataset.
"""
if (
isinstance(row_loc, Series)
and is_boolean_array(row_loc)
and is_scalar(item)
):
new_qc = self.df._query_compiler.setitem_bool(
row_loc._query_compiler, col_loc, item
)
self.df._update_inplace(new_qc)
self.qc = self.df._query_compiler
return
row_lookup, col_lookup = self.qc.get_positions_from_labels(row_loc, col_loc)
if isinstance(item, np.ndarray) and is_boolean_array(row_loc):
# fix for 'test_loc_series'; np.log(Series) returns nd.array instead
# of Series as it was before (`Series.__array_wrap__` is removed)
# otherwise incompatible shapes are obtained
item = item.take(row_lookup)
self._setitem_positional(
row_lookup,
col_lookup,
item,
axis=self._determine_setitem_axis(
row_lookup, col_lookup, is_scalar(row_loc), is_scalar(col_loc)
),
)
def _check_missing_loc(self, row_loc, col_loc):
"""
Help `__setitem__` compute whether an axis needs appending.
Parameters
----------
row_loc : scalar, slice, list, array or tuple
Row locator.
col_loc : scalar, slice, list, array or tuple
Columns locator.
Returns
-------
int or None :
0 if new row, 1 if new column, None if neither.
"""
if is_scalar(row_loc):
return 0 if row_loc not in self.qc.index else None
elif isinstance(row_loc, list):
missing_labels = self._compute_enlarge_labels(
pandas.Index(row_loc), self.qc.index
)
if len(missing_labels) > 1:
# We cast to list to copy pandas' error:
# In pandas, we get: KeyError: [a, b,...] not in index
# If we don't convert to list we get: KeyError: [a b ...] not in index
raise KeyError("{} not in index".format(list(missing_labels)))
if (
not (is_list_like(row_loc) or isinstance(row_loc, slice))
and row_loc not in self.qc.index
):
return 0
if (
isinstance(col_loc, list)
and len(pandas.Index(col_loc).difference(self.qc.columns)) >= 1
):
return 1
if is_scalar(col_loc) and col_loc not in self.qc.columns:
return 1
return None
def _compute_enlarge_labels(self, locator, base_index):
"""
Help to _enlarge_axis, compute common labels and extra labels.
Parameters
----------
locator : pandas.Index
Index from locator.
base_index : pandas.Index
Current index.
Returns
-------
nan_labels : pandas.Index
The labels that need to be added.
"""
# base_index_type can be pd.Index or pd.DatetimeIndex
# depending on user input and pandas behavior
# See issue #2264
base_as_index = pandas.Index(list(base_index))
locator_as_index = pandas.Index(list(locator))
if locator_as_index.inferred_type == "boolean":
if len(locator_as_index) != len(base_as_index):
raise ValueError(
f"Item wrong length {len(locator_as_index)} instead of {len(base_as_index)}!"
)
common_labels = base_as_index[locator_as_index]
nan_labels = pandas.Index([])
else:
common_labels = locator_as_index.intersection(base_as_index)
nan_labels = locator_as_index.difference(base_as_index)
if len(common_labels) == 0:
raise KeyError(
"None of [{labels}] are in the [{base_index_name}]".format(
labels=list(locator_as_index), base_index_name=base_as_index
)
)
return nan_labels
|
_LocIndexer
|
python
|
pytorch__pytorch
|
test/inductor/test_padding.py
|
{
"start": 5562,
"end": 8581
}
|
class ____(TestCaseBase):
@unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
def test_nobias_LinearAndSoftmax_both_shapes(self):
self.test_LinearAndSoftmax_both_shapes(bias=False)
@unittest.skipIf(not DO_PERF_TEST, "Perf test not enabled")
def test_LinearAndSoftmax_both_shapes(self, bias=True):
"""
Compare the perf with good and bad shape.
"""
m_bad_shape = LinearAndSoftmax(vocab_size=30523, bias=bias)
inptus_bad_shape = m_bad_shape.get_example_inputs()
m_good_shape = LinearAndSoftmax(vocab_size=30528, bias=bias)
inputs_good_shape = m_good_shape.get_example_inputs()
m_bad_shape_opt = torch.compile(m_bad_shape)
m_good_shape_opt = torch.compile(m_good_shape)
latency_good_shape = benchmarker.benchmark_gpu(
lambda: forward_and_backward_pass(m_good_shape_opt, inputs_good_shape)
)
latency_bad_shape = benchmarker.benchmark_gpu(
lambda: forward_and_backward_pass(m_bad_shape_opt, inptus_bad_shape)
)
print(
f"Latency for good shape v.s. bad shape: {latency_good_shape:.3f}ms v.s. {latency_bad_shape:.3f}ms"
)
@unittest.skipIf(not DO_PERF_TEST or not HAS_TRANSFORMER, "Perf test not enabled")
def test_BertForMaskedLM(self, num_layers=1):
"""
Compare the perf between doing padding and good shape.
"""
from transformers import BertForMaskedLM
config_cls = BertForMaskedLM.config_class
bs = 16
seq_length = 512
def create_model(vocab_size):
config = config_cls()
config.num_hidden_layers = num_layers
config.vocab_size = vocab_size
inputs = gen_transformer_inputs(config.vocab_size, bs, seq_length)
model = BertForMaskedLM(config)
optim = get_optim(model)
def f(**inputs):
optim.zero_grad(True)
with torch.autocast(GPU_TYPE):
pred = model(**inputs)
loss = pred[0]
loss.backward()
optim.step()
return torch.compile(f), inputs
f_good_shape, inputs_good_shape = create_model(30528)
f_bad_shape, inputs_bad_shape = create_model(30522)
print("benchmark for good shape")
latency_good_shape = benchmarker.benchmark_gpu(
lambda: f_good_shape(**inputs_good_shape)
)
print("benchmark for bad shape")
latency_bad_shape = benchmarker.benchmark_gpu(
lambda: f_bad_shape(**inputs_bad_shape)
)
print(
f"Latency with good and bad shape: {latency_good_shape:.3f} v.s. {latency_bad_shape:.3f}"
)
self.do_profiling(
lambda: f_good_shape(**inputs_good_shape),
lambda: f_bad_shape(**inputs_bad_shape),
tag_lhs="With good shape",
tag_rhs="With bad shape",
)
|
PerfTestBetweenGoodAndBadShape
|
python
|
run-llama__llama_index
|
llama-index-integrations/protocols/llama-index-protocols-ag-ui/llama_index/protocols/ag_ui/agent.py
|
{
"start": 1158,
"end": 1295
}
|
class ____(Event):
tool_call_id: str
tool_name: str
tool_kwargs: Dict[str, Any]
tool_output: ToolOutput
|
ToolCallResultEvent
|
python
|
scipy__scipy
|
scipy/fftpack/tests/test_helper.py
|
{
"start": 993,
"end": 1337
}
|
class ____:
def test_definition(self):
x = [0,1,2,3,4,-4,-3,-2,-1]
assert_array_almost_equal(9*fftfreq(9),x)
assert_array_almost_equal(9*pi*fftfreq(9,pi),x)
x = [0,1,2,3,4,-5,-4,-3,-2,-1]
assert_array_almost_equal(10*fftfreq(10),x)
assert_array_almost_equal(10*pi*fftfreq(10,pi),x)
|
TestFFTFreq
|
python
|
kamyu104__LeetCode-Solutions
|
Python/determine-color-of-a-chessboard-square.py
|
{
"start": 29,
"end": 255
}
|
class ____(object):
def squareIsWhite(self, coordinates):
"""
:type coordinates: str
:rtype: bool
"""
return (ord(coordinates[0])-ord('a'))%2 != (ord(coordinates[1])-ord('1'))%2
|
Solution
|
python
|
mitmproxy__pdoc
|
test/testdata/flavors_google.py
|
{
"start": 5030,
"end": 6148
}
|
class ____(Exception):
"""Exceptions are documented in the same way as classes.
The __init__ method may be documented in either the class level
docstring, or as a docstring on the __init__ method itself.
Either form is acceptable, but the two should not be mixed. Choose one
convention to document the __init__ method and be consistent with it.
Note:
Do not include the `self` parameter in the ``Args`` section.
Args:
msg (str): Human readable string describing the exception.
code (:obj:`int`, optional): Error code.
Attributes:
msg (str): Human readable string describing the exception.
code (int): Exception error code.
"""
def __init__(self, msg, code):
self.msg = msg
self.code = code
def add_note(self, note: str):
"""This method is present on Python 3.11+ and manually added here so that snapshots are consistent."""
def with_traceback(self, object, /):
"""This method has a changed docstring in Python 3.13+ and is manually added here so that snapshots are consistent."""
|
ExampleError
|
python
|
run-llama__llama_index
|
llama-index-core/llama_index/core/evaluation/dataset_generation.py
|
{
"start": 1438,
"end": 3386
}
|
class ____(BaseModel):
"""
Query Response Dataset.
The response can be empty if the dataset is generated from documents.
Args:
queries (Dict[str, str]): Query id -> query.
responses (Dict[str, str]): Query id -> response.
"""
queries: Dict[str, str] = Field(
default_factory=dict, description="Query id -> query"
)
responses: Dict[str, str] = Field(
default_factory=dict, description="Query id -> response"
)
@classmethod
def from_qr_pairs(
cls,
qr_pairs: List[Tuple[str, str]],
) -> QueryResponseDataset:
"""Create from qr pairs."""
# define ids as simple integers
queries = {str(idx): query for idx, (query, _) in enumerate(qr_pairs)}
responses = {str(idx): response for idx, (_, response) in enumerate(qr_pairs)}
return cls(queries=queries, responses=responses)
@property
def qr_pairs(self) -> List[Tuple[str, str]]:
"""Get pairs."""
# if query_id not in response, throw error
for query_id in self.queries:
if query_id not in self.responses:
raise ValueError(f"Query id {query_id} not in responses")
return [
(self.queries[query_id], self.responses[query_id])
for query_id in self.queries
]
@property
def questions(self) -> List[str]:
"""Get questions."""
return list(self.queries.values())
def save_json(self, path: str) -> None:
"""Save json."""
with open(path, "w") as f:
json.dump(self.model_dump(), f, indent=4)
@classmethod
def from_json(cls, path: str) -> QueryResponseDataset:
"""Load json."""
with open(path) as f:
data = json.load(f)
return cls(**data)
@deprecated(
"Deprecated in favor of `RagDatasetGenerator` which should be used instead.",
action="always",
)
|
QueryResponseDataset
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/typeVarDefaultClass3.py
|
{
"start": 173,
"end": 665
}
|
class ____[T1 = str, T2 = T1](dict[T1, T2]):
def method1(self) -> Self:
return self
reveal_type(
ClassA[int].method1, expected_text="(self: ClassA[int, int]) -> ClassA[int, int]"
)
reveal_type(
ClassA.method1, expected_text="(self: ClassA[str, str]) -> ClassA[str, str]"
)
a1 = ClassA[int]()
reveal_type(a1, expected_text="ClassA[int, int]")
a2 = ClassA()
reveal_type(a2, expected_text="ClassA[str, str]")
# This should generate an error because T2 depends on T1.
|
ClassA
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/instance/types.py
|
{
"start": 949,
"end": 2708
}
|
class ____(logging.Handler):
def __init__(self, instance: "DagsterInstance"):
self._instance = instance
super().__init__()
def emit(self, record: logging.LogRecord) -> None:
from dagster._core.events import EngineEventData
from dagster._core.events.log import StructuredLoggerMessage, construct_event_record
record_metadata = get_log_record_metadata(record)
event = construct_event_record(
StructuredLoggerMessage(
name=record.name,
message=record.msg,
level=record.levelno,
meta=record_metadata,
record=record,
)
)
try:
self._instance.handle_new_event(
event, batch_metadata=record_metadata["dagster_event_batch_metadata"]
)
except Exception as e:
sys.stderr.write(f"Exception while writing logger call to event log: {e}\n")
if event.dagster_event:
# Swallow user-generated log failures so that the entire step/run doesn't fail, but
# raise failures writing system-generated log events since they are the source of
# truth for the state of the run
raise
elif event.run_id:
self._instance.report_engine_event(
"Exception while writing logger call to event log",
job_name=event.job_name,
run_id=event.run_id,
step_key=event.step_key,
engine_event_data=EngineEventData(
error=serializable_error_info_from_exc_info(sys.exc_info()),
),
)
|
_EventListenerLogHandler
|
python
|
pytorch__pytorch
|
test/fx/test_future.py
|
{
"start": 554,
"end": 730
}
|
class ____(torch.nn.Module):
def forward(self, x: list[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
# Non-torch annotation with internal forward references
|
M3
|
python
|
getsentry__sentry
|
tests/sentry/incidents/models/test_incidents.py
|
{
"start": 492,
"end": 1898
}
|
class ____(TestCase):
def test_empty(self) -> None:
incidents = Incident.objects.fetch_for_organization(self.organization, [self.project])
assert [] == list(incidents)
self.create_project()
def test_simple(self) -> None:
incident = self.create_incident()
assert [incident] == list(
Incident.objects.fetch_for_organization(self.organization, [self.project])
)
def test_invalid_project(self) -> None:
project = self.create_project()
incident = self.create_incident(projects=[project])
assert [] == list(
Incident.objects.fetch_for_organization(self.organization, [self.project])
)
assert [incident] == list(
Incident.objects.fetch_for_organization(self.organization, [project])
)
def test_multi_project(self) -> None:
project = self.create_project()
incident = self.create_incident(projects=[project, self.project])
assert [incident] == list(
Incident.objects.fetch_for_organization(self.organization, [self.project])
)
assert [incident] == list(
Incident.objects.fetch_for_organization(self.organization, [project])
)
assert [incident] == list(
Incident.objects.fetch_for_organization(self.organization, [self.project, project])
)
|
FetchForOrganizationTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/training/sync_replicas_optimizer_test.py
|
{
"start": 3281,
"end": 10824
}
|
class ____(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
@test_util.run_v1_only(
"This exercises tensor lookup via names which is not supported in V2.")
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))
self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))
self.assertAllEqual(0, sessions[0].run(local_step_0))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],
sessions[1].run(var_sparse_g_1))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, sessions[0].run(local_step_0))
self.assertAllEqual(0, sessions[1].run(local_step_1))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[0].run(local_step_0))
self.assertAllEqual(1, sessions[1].run(local_step_1))
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0])))
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
# 3 workers and one of them is backup.
@test_util.run_v1_only(
"This exercises tensor lookup via names which is not supported in V2.")
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[1].run(local_step_1))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, sessions[1].run(global_step))
# Starts worker 1.
thread_1.start()
thread_1.join()
thread_0.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
|
SyncReplicasOptimizerTest
|
python
|
django__django
|
tests/async/test_async_shortcuts.py
|
{
"start": 211,
"end": 2432
}
|
class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.s1 = SimpleModel.objects.create(field=0)
cls.s2 = SimpleModel.objects.create(field=1)
cls.r1 = RelatedModel.objects.create(simple=cls.s1)
async def test_aget_object_or_404(self):
self.assertEqual(await aget_object_or_404(SimpleModel, field=1), self.s2)
self.assertEqual(await aget_object_or_404(SimpleModel, Q(field=0)), self.s1)
self.assertEqual(
await aget_object_or_404(SimpleModel.objects.all(), field=1), self.s2
)
self.assertEqual(
await aget_object_or_404(self.s1.relatedmodel_set, pk=self.r1.pk), self.r1
)
# Http404 is returned if the list is empty.
msg = "No SimpleModel matches the given query."
with self.assertRaisesMessage(Http404, msg):
await aget_object_or_404(SimpleModel, field=2)
async def test_get_list_or_404(self):
self.assertEqual(await aget_list_or_404(SimpleModel, field=1), [self.s2])
self.assertEqual(await aget_list_or_404(SimpleModel, Q(field=0)), [self.s1])
self.assertEqual(
await aget_list_or_404(SimpleModel.objects.all(), field=1), [self.s2]
)
self.assertEqual(
await aget_list_or_404(self.s1.relatedmodel_set, pk=self.r1.pk), [self.r1]
)
# Http404 is returned if the list is empty.
msg = "No SimpleModel matches the given query."
with self.assertRaisesMessage(Http404, msg):
await aget_list_or_404(SimpleModel, field=2)
async def test_get_object_or_404_bad_class(self):
msg = (
"First argument to aget_object_or_404() must be a Model, Manager, or "
"QuerySet, not 'str'."
)
with self.assertRaisesMessage(ValueError, msg):
await aget_object_or_404("SimpleModel", field=0)
async def test_get_list_or_404_bad_class(self):
msg = (
"First argument to aget_list_or_404() must be a Model, Manager, or "
"QuerySet, not 'list'."
)
with self.assertRaisesMessage(ValueError, msg):
await aget_list_or_404([SimpleModel], field=1)
|
GetListObjectOr404Test
|
python
|
getsentry__sentry
|
tests/sentry/integrations/source_code_management/test_commit_context.py
|
{
"start": 660,
"end": 1133
}
|
class ____(CommitContextIntegration):
"""Mock implementation for testing"""
integration_name = "mock_integration"
def __init__(self) -> None:
self.client = Mock()
self.client.base_url = "https://example.com"
def get_client(self) -> CommitContextClient:
return self.client
def on_create_or_update_comment_error(self, api_error: ApiError, metrics_base: str) -> bool:
raise NotImplementedError
|
MockCommitContextIntegration
|
python
|
sphinx-doc__sphinx
|
tests/utils.py
|
{
"start": 1665,
"end": 4365
}
|
class ____(HttpServerThread):
def __init__(self, handler: type[BaseRequestHandler], *, port: int = 0) -> None:
super().__init__(handler, port=port)
sslcontext = SSLContext(PROTOCOL_TLS_SERVER)
sslcontext.load_cert_chain(CERT_FILE)
self.server.socket = sslcontext.wrap_socket(
self.server.socket, server_side=True
)
@contextmanager
def http_server(
handler: type[BaseRequestHandler],
*,
tls_enabled: bool = False,
port: int = 0,
) -> Iterator[HTTPServer]:
server_cls = HttpsServerThread if tls_enabled else HttpServerThread
server_thread = server_cls(handler, port=port)
server_thread.start()
server_port = server_thread.server.server_port
assert port in {0, server_port}
try:
socket.create_connection(('localhost', server_port), timeout=0.5).close()
yield server_thread.server # Connection has been confirmed possible; proceed.
finally:
server_thread.terminate()
@contextmanager
def rewrite_hyperlinks(app: Sphinx, server: HTTPServer) -> Iterator[None]:
"""Rewrite hyperlinks that refer to network location 'localhost:7777',
allowing that location to vary dynamically with the arbitrary test HTTP
server port assigned during unit testing.
:param app: The Sphinx application where link replacement is to occur.
:param server: Destination server to redirect the hyperlinks to.
"""
match_netloc, replacement_netloc = (
'localhost:7777',
f'localhost:{server.server_port}',
)
def rewrite_hyperlink(_app: Sphinx, uri: str) -> str | None:
parsed_uri = urlparse(uri)
if parsed_uri.netloc != match_netloc:
return uri
return parsed_uri._replace(netloc=replacement_netloc).geturl()
listener_id = app.connect('linkcheck-process-uri', rewrite_hyperlink)
yield
app.disconnect(listener_id)
@contextmanager
def serve_application(
app: Sphinx,
handler: type[BaseRequestHandler],
*,
tls_enabled: bool = False,
port: int = 0,
) -> Iterator[str]:
"""Prepare a temporary server to handle HTTP requests related to the links
found in a Sphinx application project.
:param app: The Sphinx application.
:param handler: Determines how each request will be handled.
:param tls_enabled: Whether TLS (SSL) should be enabled for the server.
:param port: Optional server port (default: auto).
:return: The address of the temporary HTTP server.
"""
with (
http_server(handler, tls_enabled=tls_enabled, port=port) as server,
rewrite_hyperlinks(app, server),
):
yield f'localhost:{server.server_port}'
|
HttpsServerThread
|
python
|
pandas-dev__pandas
|
pandas/io/formats/format.py
|
{
"start": 50294,
"end": 51028
}
|
class ____(_GenericArrayFormatter):
values: DatetimeArray
def __init__(
self,
values: DatetimeArray,
nat_rep: str = "NaT",
date_format: None = None,
**kwargs,
) -> None:
super().__init__(values, **kwargs)
self.nat_rep = nat_rep
self.date_format = date_format
def _format_strings(self) -> list[str]:
"""we by definition have DO NOT have a TZ"""
values = self.values
if self.formatter is not None:
return [self.formatter(x) for x in values]
fmt_values = values._format_native_types(
na_rep=self.nat_rep, date_format=self.date_format
)
return fmt_values.tolist()
|
_Datetime64Formatter
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/commands/sanity/integration_aliases.py
|
{
"start": 932,
"end": 16675
}
|
class ____(SanitySingleVersion):
"""Sanity test to evaluate integration test aliases."""
CI_YML = '.azure-pipelines/azure-pipelines.yml'
TEST_ALIAS_PREFIX = 'shippable' # this will be changed at some point in the future
DISABLED = 'disabled/'
UNSTABLE = 'unstable/'
UNSUPPORTED = 'unsupported/'
EXPLAIN_URL = get_docs_url('https://docs.ansible.com/ansible-core/devel/dev_guide/testing/sanity/integration-aliases.html')
TEMPLATE_DISABLED = """
The following integration tests are **disabled** [[explain]({explain_url}#disabled)]:
{tests}
Consider fixing the integration tests before or alongside changes.
"""
TEMPLATE_UNSTABLE = """
The following integration tests are **unstable** [[explain]({explain_url}#unstable)]:
{tests}
Tests may need to be restarted due to failures unrelated to changes.
"""
TEMPLATE_UNSUPPORTED = """
The following integration tests are **unsupported** [[explain]({explain_url}#unsupported)]:
{tests}
Consider running the tests manually or extending test infrastructure to add support.
"""
TEMPLATE_UNTESTED = """
The following modules have **no integration tests** [[explain]({explain_url}#untested)]:
{tests}
Consider adding integration tests before or alongside changes.
"""
ansible_only = True
def __init__(self) -> None:
super().__init__()
self._ci_config: dict[str, t.Any] = {}
self._ci_test_groups: dict[str, list[int]] = {}
@property
def can_ignore(self) -> bool:
"""True if the test supports ignore entries."""
return False
@property
def no_targets(self) -> bool:
"""True if the test does not use test targets. Mutually exclusive with all_targets."""
return True
def load_ci_config(self, python: PythonConfig) -> dict[str, t.Any]:
"""Load and return the CI YAML configuration."""
if not self._ci_config:
self._ci_config = self.load_yaml(python, self.CI_YML)
return self._ci_config
@property
def ci_test_groups(self) -> dict[str, list[int]]:
"""Return a dictionary of CI test names and their group(s)."""
if not self._ci_test_groups:
test_groups: dict[str, set[int]] = {}
for stage in self._ci_config['stages']:
for job in stage['jobs']:
if job.get('template') != 'templates/matrix.yml':
continue
parameters = job['parameters']
groups = parameters.get('groups', [])
test_format = parameters.get('testFormat', '{0}')
test_group_format = parameters.get('groupFormat', '{0}/{{1}}')
for target in parameters['targets']:
test = target.get('test') or target.get('name')
if groups:
tests_formatted = [test_group_format.format(test_format).format(test, group) for group in groups]
else:
tests_formatted = [test_format.format(test)]
for test_formatted in tests_formatted:
parts = test_formatted.split('/')
key = parts[0]
if key in ('sanity', 'units'):
continue
try:
group = int(parts[-1])
except ValueError:
continue
if group < 1 or group > 99:
continue
group_set = test_groups.setdefault(key, set())
group_set.add(group)
self._ci_test_groups = dict((key, sorted(value)) for key, value in test_groups.items())
return self._ci_test_groups
def format_test_group_alias(self, name: str, fallback: str = '') -> str:
"""Return a test group alias using the given name and fallback."""
group_numbers = self.ci_test_groups.get(name, None)
if group_numbers:
group_numbers = [num for num in group_numbers if num not in (6, 7)] # HACK: ignore special groups 6 and 7
if min(group_numbers) != 1:
display.warning('Min test group "%s" in %s is %d instead of 1.' % (name, self.CI_YML, min(group_numbers)), unique=True)
if max(group_numbers) != len(group_numbers):
display.warning('Max test group "%s" in %s is %d instead of %d.' % (name, self.CI_YML, max(group_numbers), len(group_numbers)), unique=True)
if max(group_numbers) > 9:
alias = '%s/%s/group(%s)/' % (self.TEST_ALIAS_PREFIX, name, '|'.join(str(i) for i in range(min(group_numbers), max(group_numbers) + 1)))
elif len(group_numbers) > 1:
alias = '%s/%s/group[%d-%d]/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers), max(group_numbers))
else:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, name, min(group_numbers))
elif fallback:
alias = '%s/%s/group%d/' % (self.TEST_ALIAS_PREFIX, fallback, 1)
else:
raise Exception('cannot find test group "%s" in %s' % (name, self.CI_YML))
return alias
def load_yaml(self, python: PythonConfig, path: str) -> dict[str, t.Any]:
"""Load the specified YAML file and return the contents."""
yaml_to_json_path = os.path.join(SANITY_ROOT, self.name, 'yaml_to_json.py')
return json.loads(raw_command([python.path, yaml_to_json_path], data=read_text_file(path), capture=True)[0])
def test(self, args: SanityConfig, targets: SanityTargets, python: PythonConfig) -> TestResult:
if args.explain:
return SanitySuccess(self.name)
if not os.path.isfile(self.CI_YML):
return SanityFailure(self.name, messages=[SanityMessage(
message='file missing',
path=self.CI_YML,
)])
results = Results(
comments=[],
labels={},
)
self.load_ci_config(python)
self.check_changes(args, results)
write_json_test_results(ResultType.BOT, 'data-sanity-ci.json', results.__dict__)
messages = []
messages += self.check_posix_targets(args)
messages += self.check_windows_targets()
if messages:
return SanityFailure(self.name, messages=messages)
return SanitySuccess(self.name)
def check_posix_targets(self, args: SanityConfig) -> list[SanityMessage]:
"""Check POSIX integration test targets and return messages with any issues found."""
posix_targets = tuple(walk_posix_integration_targets())
clouds = get_cloud_platforms(args, posix_targets)
cloud_targets = ['cloud/%s/' % cloud for cloud in clouds]
all_cloud_targets = tuple(filter_targets(posix_targets, ['cloud/'], errors=False))
invalid_cloud_targets = tuple(filter_targets(all_cloud_targets, cloud_targets, include=False, errors=False))
messages = []
for target in invalid_cloud_targets:
for alias in target.aliases:
if alias.startswith('cloud/') and alias != 'cloud/':
if any(alias.startswith(cloud_target) for cloud_target in cloud_targets):
continue
messages.append(SanityMessage('invalid alias `%s`' % alias, '%s/aliases' % target.path))
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/', '%s/generic/' % self.TEST_ALIAS_PREFIX], include=False, errors=False)),
find=self.format_test_group_alias('linux').replace('linux', 'posix'),
find_incidental=['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX],
)
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['%s/generic/' % self.TEST_ALIAS_PREFIX], errors=False)),
find=self.format_test_group_alias('generic'),
)
for cloud in clouds:
if cloud == 'httptester':
find = self.format_test_group_alias('linux').replace('linux', 'posix')
find_incidental = ['%s/posix/incidental/' % self.TEST_ALIAS_PREFIX]
else:
find = self.format_test_group_alias(cloud, 'generic')
find_incidental = ['%s/%s/incidental/' % (self.TEST_ALIAS_PREFIX, cloud), '%s/cloud/incidental/' % self.TEST_ALIAS_PREFIX]
messages += self.check_ci_group(
targets=tuple(filter_targets(posix_targets, ['cloud/%s/' % cloud], errors=False)),
find=find,
find_incidental=find_incidental,
)
target_type_groups = {
IntegrationTargetType.TARGET: (1, 2),
IntegrationTargetType.CONTROLLER: (3, 4, 5),
IntegrationTargetType.CONFLICT: (),
IntegrationTargetType.UNKNOWN: (),
}
for target in posix_targets:
if target.name == 'ansible-test-container':
continue # special test target which uses group 6 -- nothing else should be in that group
if target.name in ('dnf-oldest', 'dnf-latest'):
continue # special test targets which use group 7 -- nothing else should be in that group
if f'{self.TEST_ALIAS_PREFIX}/posix/' not in target.aliases:
continue
found_groups = [alias for alias in target.aliases if re.search(f'^{self.TEST_ALIAS_PREFIX}/posix/group[0-9]+/$', alias)]
expected_groups = [f'{self.TEST_ALIAS_PREFIX}/posix/group{group}/' for group in target_type_groups[target.target_type]]
valid_groups = [group for group in found_groups if group in expected_groups]
invalid_groups = [group for group in found_groups if not any(group.startswith(expected_group) for expected_group in expected_groups)]
if not valid_groups:
messages.append(SanityMessage(f'Target of type {target.target_type.name} must be in at least one of these groups: {", ".join(expected_groups)}',
f'{target.path}/aliases'))
if invalid_groups:
messages.append(SanityMessage(f'Target of type {target.target_type.name} cannot be in these groups: {", ".join(invalid_groups)}',
f'{target.path}/aliases'))
return messages
def check_windows_targets(self) -> list[SanityMessage]:
"""Check Windows integration test targets and return messages with any issues found."""
windows_targets = tuple(walk_windows_integration_targets())
messages = []
messages += self.check_ci_group(
targets=windows_targets,
find=self.format_test_group_alias('windows'),
find_incidental=['%s/windows/incidental/' % self.TEST_ALIAS_PREFIX],
)
return messages
def check_ci_group(
self,
targets: tuple[CompletionTarget, ...],
find: str,
find_incidental: t.Optional[list[str]] = None,
) -> list[SanityMessage]:
"""Check the CI groups set in the provided targets and return a list of messages with any issues found."""
all_paths = set(target.path for target in targets)
supported_paths = set(target.path for target in filter_targets(targets, [find], errors=False))
unsupported_paths = set(target.path for target in filter_targets(targets, [self.UNSUPPORTED], errors=False))
if find_incidental:
incidental_paths = set(target.path for target in filter_targets(targets, find_incidental, errors=False))
else:
incidental_paths = set()
unassigned_paths = all_paths - supported_paths - unsupported_paths - incidental_paths
conflicting_paths = supported_paths & unsupported_paths
unassigned_message = 'missing alias `%s` or `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
conflicting_message = 'conflicting alias `%s` and `%s`' % (find.strip('/'), self.UNSUPPORTED.strip('/'))
messages = []
for path in unassigned_paths:
if path == 'test/integration/targets/ansible-test-container':
continue # special test target which uses group 6 -- nothing else should be in that group
if path in (
'test/integration/targets/dnf-oldest',
'test/integration/targets/dnf-latest',
):
continue # special test targets which use group 7 -- nothing else should be in that group
messages.append(SanityMessage(unassigned_message, '%s/aliases' % path))
for path in conflicting_paths:
messages.append(SanityMessage(conflicting_message, '%s/aliases' % path))
return messages
def check_changes(self, args: SanityConfig, results: Results) -> None:
"""Check changes and store results in the provided result dictionary."""
integration_targets = list(walk_integration_targets())
module_targets = list(walk_module_targets())
integration_targets_by_name = dict((target.name, target) for target in integration_targets)
module_names_by_path = dict((target.path, target.module) for target in module_targets)
disabled_targets = []
unstable_targets = []
unsupported_targets = []
for command in [command for command in args.metadata.change_description.focused_command_targets if 'integration' in command]:
for target in args.metadata.change_description.focused_command_targets[command]:
if self.DISABLED in integration_targets_by_name[target].aliases:
disabled_targets.append(target)
elif self.UNSTABLE in integration_targets_by_name[target].aliases:
unstable_targets.append(target)
elif self.UNSUPPORTED in integration_targets_by_name[target].aliases:
unsupported_targets.append(target)
untested_modules = []
for path in args.metadata.change_description.no_integration_paths:
module = module_names_by_path.get(path)
if module:
untested_modules.append(module)
comments = [
self.format_comment(self.TEMPLATE_DISABLED, disabled_targets),
self.format_comment(self.TEMPLATE_UNSTABLE, unstable_targets),
self.format_comment(self.TEMPLATE_UNSUPPORTED, unsupported_targets),
self.format_comment(self.TEMPLATE_UNTESTED, untested_modules),
]
comments = [comment for comment in comments if comment]
labels = dict(
needs_tests=bool(untested_modules),
disabled_tests=bool(disabled_targets),
unstable_tests=bool(unstable_targets),
unsupported_tests=bool(unsupported_targets),
)
results.comments += comments
results.labels.update(labels)
def format_comment(self, template: str, targets: list[str]) -> t.Optional[str]:
"""Format and return a comment based on the given template and targets, or None if there are no targets."""
if not targets:
return None
tests = '\n'.join('- %s' % target for target in targets)
data = dict(
explain_url=self.EXPLAIN_URL,
tests=tests,
)
message = textwrap.dedent(template).strip().format(**data)
return message
@dataclasses.dataclass
|
IntegrationAliasesTest
|
python
|
doocs__leetcode
|
solution/2300-2399/2363.Merge Similar Items/Solution.py
|
{
"start": 0,
"end": 258
}
|
class ____:
def mergeSimilarItems(
self, items1: List[List[int]], items2: List[List[int]]
) -> List[List[int]]:
cnt = Counter()
for v, w in chain(items1, items2):
cnt[v] += w
return sorted(cnt.items())
|
Solution
|
python
|
wandb__wandb
|
wandb/sdk/data_types/table.py
|
{
"start": 40982,
"end": 43667
}
|
class ____(Media):
"""A table which is composed of multiple sub-tables.
Currently, PartitionedTable is designed to point to a directory within an
artifact.
"""
_log_type = "partitioned-table"
def __init__(self, parts_path):
"""Initialize a PartitionedTable.
Args:
parts_path (str): path to a directory of tables in the artifact.
"""
super().__init__()
self.parts_path = parts_path
self._loaded_part_entries = {}
def to_json(self, artifact_or_run):
json_obj = {
"_type": PartitionedTable._log_type,
}
if isinstance(artifact_or_run, wandb.Run):
artifact_entry_url = self._get_artifact_entry_ref_url()
if artifact_entry_url is None:
raise ValueError(
"PartitionedTables must first be added to an Artifact before logging to a Run"
)
json_obj["artifact_path"] = artifact_entry_url
else:
json_obj["parts_path"] = self.parts_path
return json_obj
@classmethod
def from_json(cls, json_obj, source_artifact):
instance = cls(json_obj["parts_path"])
entries = source_artifact.manifest.get_entries_in_directory(
json_obj["parts_path"]
)
for entry in entries:
instance._add_part_entry(entry, source_artifact)
return instance
def iterrows(self):
"""Iterate over rows as (ndx, row).
Args:
index (int): The index of the row.
row (List[any]): The data of the row.
"""
columns = None
ndx = 0
for entry_path in self._loaded_part_entries:
part = self._loaded_part_entries[entry_path].get_part()
if columns is None:
columns = part.columns
elif columns != part.columns:
raise ValueError(
f"Table parts have non-matching columns. {columns} != {part.columns}"
)
for _, row in part.iterrows():
yield ndx, row
ndx += 1
self._loaded_part_entries[entry_path].free()
def _add_part_entry(self, entry, source_artifact):
self._loaded_part_entries[entry.path] = _PartitionTablePartEntry(
entry, source_artifact
)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.parts_path == other.parts_path
def bind_to_run(self, *args, **kwargs):
raise ValueError("PartitionedTables cannot be bound to runs")
|
PartitionedTable
|
python
|
getsentry__sentry
|
src/sentry/users/services/user_option/impl.py
|
{
"start": 550,
"end": 3834
}
|
class ____(UserOptionService):
def serialize_many(
self,
*,
filter: UserOptionFilterArgs,
as_user: RpcUser | None = None,
auth_context: AuthenticationContext | None = None,
) -> list[OpaqueSerializedResponse]:
return self._FQ.serialize_many(filter, as_user, auth_context)
def get_many(self, *, filter: UserOptionFilterArgs) -> list[RpcUserOption]:
return self._FQ.get_many(filter)
def delete_options(self, *, option_ids: list[int]) -> None:
UserOption.objects.filter(id__in=option_ids).delete()
def set_option(
self,
*,
user_id: int,
value: Any,
key: str,
project_id: int | None = None,
organization_id: int | None = None,
) -> None:
UserOption.objects.set_value(
user=user_id,
key=key,
value=value,
project_id=project_id,
organization_id=organization_id,
)
class _UserOptionFilterQuery(
FilterQueryDatabaseImpl[UserOption, UserOptionFilterArgs, RpcUserOption, None]
):
def base_query(self, select_related: bool = True) -> QuerySet[UserOption]:
return UserOption.objects.all()
def filter_arg_validator(self) -> Callable[[UserOptionFilterArgs], str | None]:
return self._filter_has_any_key_validator("user_ids")
def serialize_api(self, serializer: None) -> Serializer:
# User options should not be serialized in this way
raise NotImplementedError
def apply_filters(
self, query: QuerySet[UserOption], filters: UserOptionFilterArgs
) -> QuerySet[UserOption]:
# To maintain expected behaviors, we default these to None and always query for them
if "project_ids" in filters:
query = query.filter(
user_id__in=filters["user_ids"],
project_id__in=filters["project_ids"],
)
else:
project_id = None
if "project_id" in filters:
project_id = filters["project_id"]
organization_id = None
if "organization_id" in filters:
organization_id = filters["organization_id"]
query = query.filter(
user_id__in=filters["user_ids"],
project_id=project_id,
organization_id=organization_id,
)
if "keys" in filters or "key" in filters:
keys: list[str] = []
if "keys" in filters:
keys = filters["keys"]
if "key" in filters:
keys.append(filters["key"])
query = query.filter(
key__in=keys,
)
return query
def serialize_rpc(self, op: UserOption) -> RpcUserOption:
return RpcUserOption(
id=op.id,
user_id=op.user_id,
value=op.value,
key=op.key,
project_id=op.project_id,
organization_id=op.organization_id,
)
_FQ = _UserOptionFilterQuery()
|
DatabaseBackedUserOptionService
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_data_validation06.py
|
{
"start": 315,
"end": 2514
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("data_validation02.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file data validation."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.data_validation(
"C2",
{
"validate": "list",
"value": ["Foo", "Bar", "Baz"],
"input_title": "This is the input title",
"input_message": "This is the input message",
},
)
# The following should be rejected because the list items are too long.
input_title = "This is the longest input title1"
input_message = "This is the longest input message " + ("a" * 221)
values = [
"Foobar",
"Foobas",
"Foobat",
"Foobau",
"Foobav",
"Foobaw",
"Foobax",
"Foobay",
"Foobaz",
"Foobba",
"Foobbb",
"Foobbc",
"Foobbd",
"Foobbe",
"Foobbf",
"Foobbg",
"Foobbh",
"Foobbi",
"Foobbj",
"Foobbk",
"Foobbl",
"Foobbm",
"Foobbn",
"Foobbo",
"Foobbp",
"Foobbq",
"Foobbr",
"Foobbs",
"Foobbt",
"Foobbu",
"Foobbv",
"Foobbw",
"Foobbx",
"Foobby",
"Foobbz",
"Foobca",
"End1",
]
# Ignore the warnings raised by data_validation().
import warnings
warnings.filterwarnings("ignore")
worksheet.data_validation(
"D6",
{
"validate": "list",
"value": values,
"input_title": input_title,
"input_message": input_message,
},
)
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
django__django
|
django/forms/widgets.py
|
{
"start": 14352,
"end": 15741
}
|
class ____(Input):
allow_multiple_selected = False
input_type = "file"
needs_multipart_form = True
template_name = "django/forms/widgets/file.html"
def __init__(self, attrs=None):
if (
attrs is not None
and not self.allow_multiple_selected
and attrs.get("multiple", False)
):
raise ValueError(
"%s doesn't support uploading multiple files."
% self.__class__.__qualname__
)
if self.allow_multiple_selected:
if attrs is None:
attrs = {"multiple": True}
else:
attrs.setdefault("multiple", True)
super().__init__(attrs)
def format_value(self, value):
"""File input never renders a value."""
return
def value_from_datadict(self, data, files, name):
"File widgets take data from FILES, not POST"
getter = files.get
if self.allow_multiple_selected:
try:
getter = files.getlist
except AttributeError:
pass
return getter(name)
def value_omitted_from_data(self, data, files, name):
return name not in files
def use_required_attribute(self, initial):
return super().use_required_attribute(initial) and not initial
FILE_INPUT_CONTRADICTION = object()
|
FileInput
|
python
|
has2k1__plotnine
|
tests/test_layers.py
|
{
"start": 2718,
"end": 3712
}
|
class ____:
p = ggplot(larger_data, aes("x", "y"))
def _assert_raster_smaller(self, p_no_raster, p_raster):
# Plot and check that the file sizes are smaller when
# rastering. Then delete the files.
geom_name = p_raster.layers[0].geom.__class__.__name__
fn1 = Path(f"{geom_name}-no-raster.pdf")
fn2 = Path(f"{geom_name}-raster.pdf")
try:
with pytest.warns(PlotnineWarning):
p_no_raster.save(fn1)
p_raster.save(fn2)
assert fn1.stat().st_size > fn2.stat().st_size
finally:
fn1.unlink(missing_ok=True)
fn2.unlink(missing_ok=True)
def test_geom_point(self):
p1 = self.p + geom_point()
p2 = self.p + geom_point(raster=True)
self._assert_raster_smaller(p1, p2)
def test_geom_path(self):
p1 = self.p + geom_path()
p2 = self.p + geom_path(raster=True)
self._assert_raster_smaller(p1, p2)
|
TestRasterizing
|
python
|
pandas-dev__pandas
|
pandas/tests/tslibs/test_np_datetime.py
|
{
"start": 4750,
"end": 7889
}
|
class ____:
def test_pass_non_dt64_array(self):
# check that we raise, not segfault
arr = np.arange(5)
dtype = np.dtype("M8[ns]")
msg = (
"astype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
astype_overflowsafe(arr, dtype, copy=True)
with pytest.raises(TypeError, match=msg):
astype_overflowsafe(arr, dtype, copy=False)
def test_pass_non_dt64_dtype(self):
# check that we raise, not segfault
arr = np.arange(5, dtype="i8").view("M8[D]")
dtype = np.dtype("m8[ns]")
msg = (
"astype_overflowsafe values.dtype and dtype must be either "
"both-datetime64 or both-timedelta64"
)
with pytest.raises(TypeError, match=msg):
astype_overflowsafe(arr, dtype, copy=True)
with pytest.raises(TypeError, match=msg):
astype_overflowsafe(arr, dtype, copy=False)
def test_astype_overflowsafe_dt64(self):
dtype = np.dtype("M8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
# arr.astype silently overflows, so this
wrong = arr.astype(dtype)
roundtrip = wrong.astype(arr.dtype)
assert not (wrong == roundtrip).all()
msg = "Out of bounds nanosecond timestamp"
with pytest.raises(OutOfBoundsDatetime, match=msg):
astype_overflowsafe(arr, dtype)
# But converting to microseconds is fine, and we match numpy's results.
dtype2 = np.dtype("M8[us]")
result = astype_overflowsafe(arr, dtype2)
expected = arr.astype(dtype2)
tm.assert_numpy_array_equal(result, expected)
def test_astype_overflowsafe_td64(self):
dtype = np.dtype("m8[ns]")
dt = np.datetime64("2262-04-05", "D")
arr = dt + np.arange(10, dtype="m8[D]")
arr = arr.view("m8[D]")
# arr.astype silently overflows, so this
wrong = arr.astype(dtype)
roundtrip = wrong.astype(arr.dtype)
assert not (wrong == roundtrip).all()
msg = r"Cannot convert 106752 days to timedelta64\[ns\] without overflow"
with pytest.raises(OutOfBoundsTimedelta, match=msg):
astype_overflowsafe(arr, dtype)
# But converting to microseconds is fine, and we match numpy's results.
dtype2 = np.dtype("m8[us]")
result = astype_overflowsafe(arr, dtype2)
expected = arr.astype(dtype2)
tm.assert_numpy_array_equal(result, expected)
def test_astype_overflowsafe_disallow_rounding(self):
arr = np.array([-1500, 1500], dtype="M8[ns]")
dtype = np.dtype("M8[us]")
msg = "Cannot losslessly cast '-1500 ns' to us"
with pytest.raises(ValueError, match=msg):
astype_overflowsafe(arr, dtype, round_ok=False)
result = astype_overflowsafe(arr, dtype, round_ok=True)
expected = arr.astype(dtype)
tm.assert_numpy_array_equal(result, expected)
|
TestAstypeOverflowSafe
|
python
|
huggingface__transformers
|
tests/models/rembert/test_modeling_rembert.py
|
{
"start": 17253,
"end": 18382
}
|
class ____(unittest.TestCase):
@slow
def test_inference_model(self):
# Test exact values at the last hidden layer
model = RemBertModel.from_pretrained("google/rembert")
input_ids = torch.tensor([[312, 56498, 313, 2125, 313]])
segment_ids = torch.tensor([[0, 0, 0, 1, 1]])
with torch.no_grad():
output = model(input_ids, token_type_ids=segment_ids, output_hidden_states=True)
hidden_size = 1152
expected_shape = torch.Size((1, 5, hidden_size))
self.assertEqual(output["last_hidden_state"].shape, expected_shape)
expected_implementation = torch.tensor(
[
[
[0.0754, -0.2022, 0.1904],
[-0.3354, -0.3692, -0.4791],
[-0.2314, -0.6729, -0.0749],
[-0.0396, -0.3105, -0.4234],
[-0.1571, -0.0525, 0.5353],
]
]
)
torch.testing.assert_close(
output["last_hidden_state"][:, :, :3], expected_implementation, rtol=1e-4, atol=1e-4
)
|
RemBertModelIntegrationTest
|
python
|
jazzband__django-simple-history
|
simple_history/tests/models.py
|
{
"start": 12203,
"end": 12263
}
|
class ____(Book):
price = models.FloatField()
|
HardbackBook
|
python
|
automl__auto-sklearn
|
autosklearn/data/validation.py
|
{
"start": 1231,
"end": 7425
}
|
class ____(BaseEstimator):
"""
Makes sure the input data complies with Auto-sklearn requirements.
Categorical inputs are encoded via a Label Encoder, if the input
is a dataframe.
This class also perform checks for data integrity and flags the user
via informative errors.
Attributes
----------
feat_type: Optional[List[str]] = None
In case the dataset is not a pandas DataFrame:
+ If provided, this list indicates which columns should be treated as
categorical it is internally transformed into a dictionary that
indicates a mapping from column index to categorical/numerical.
+ If not provided, by default all columns are treated as numerical
If the input dataset is of type pandas dataframe, this argument
must be none, as the column type will be inferred from the pandas dtypes.
is_classification: bool
For classification task, this flag indicates that the target data
should be encoded
feature_validator: FeatureValidator
A FeatureValidator instance used to validate and encode feature columns to match
sklearn expectations on the data
target_validator: TargetValidator
A TargetValidator instance used for classification to validate and encode the
target values
"""
def __init__(
self,
feat_type: Optional[List[str]] = None,
is_classification: bool = False,
logger_port: Optional[int] = None,
allow_string_features: bool = True,
) -> None:
self.feat_type = feat_type
self.is_classification = is_classification
self.logger_port = logger_port
if self.logger_port is not None:
self.logger = get_named_client_logger(
name="Validation",
port=self.logger_port,
)
else:
self.logger = logging.getLogger("Validation")
self.allow_string_features = allow_string_features
self.feature_validator = FeatureValidator(
feat_type=self.feat_type,
logger=self.logger,
allow_string_features=self.allow_string_features,
)
self.target_validator = TargetValidator(
is_classification=self.is_classification, logger=self.logger
)
self._is_fitted = False
def fit(
self,
X_train: SUPPORTED_FEAT_TYPES,
y_train: SUPPORTED_TARGET_TYPES,
X_test: Optional[SUPPORTED_FEAT_TYPES] = None,
y_test: Optional[SUPPORTED_TARGET_TYPES] = None,
) -> BaseEstimator:
"""
Validates and fit a categorical encoder (if needed) to the features, and
a encoder for targets in the case of classification. Specifically:
For features:
Valid data types are enforced (List, np.ndarray, pd.DataFrame, pd.Series, scipy
sparse) as well as dimensionality checks
If the provided data is a pandas DataFrame with categorical/boolean/int columns,
such columns will be encoded using an Ordinal Encoder
For targets:
* Checks for dimensionality as well as missing values are performed.
* If performing a classification task, the data is going to be encoded
Parameters
----------
X_train: SUPPORTED_FEAT_TYPES
A set of features that are going to be validated (type and dimensionality
checks). If this data contains categorical columns, an encoder is going to
be instantiated and trained with this data.
y_train: SUPPORTED_TARGET_TYPES
A set of targets to encoded if the task is for classification.
X_test: Optional[SUPPORTED_FEAT_TYPES]
A hold out set of features used for checking
y_test: SUPPORTED_TARGET_TYPES
A hold out set of targets used for checking. Additionally, if the current
task is a classification task, this y_test categories are also going to be
used to fit a pre-processing encoding (to prevent errors on unseen classes).
Returns
-------
self
"""
# Check that the data is valid
if np.shape(X_train)[0] != np.shape(y_train)[0]:
raise ValueError(
"Inconsistent number of train datapoints for features and targets,"
" {} for features and {} for targets".format(
np.shape(X_train)[0],
np.shape(y_train)[0],
)
)
if X_test is not None and np.shape(X_test)[0] != np.shape(y_test)[0]:
raise ValueError(
"Inconsistent number of test datapoints for features and targets,"
" {} for features and {} for targets".format(
np.shape(X_test)[0],
np.shape(y_test)[0],
)
)
self.feature_validator.fit(X_train, X_test)
self.target_validator.fit(y_train, y_test)
self._is_fitted = True
return self
def transform(
self,
X: SUPPORTED_FEAT_TYPES,
y: Optional[Union[List, pd.Series, pd.DataFrame, np.ndarray]] = None,
) -> Tuple[Union[np.ndarray, pd.DataFrame, spmatrix], Optional[np.ndarray]]:
"""
Transform the given target or features to a numpy array
Parameters
----------
X: SUPPORTED_FEAT_TYPES
A set of features to transform
y: Optional[SUPPORTED_TARGET_TYPES]
A set of targets to transform
Return
------
np.ndarray:
The transformed features array
np.ndarray:
The transformed targets array
"""
if not self._is_fitted:
raise NotFittedError(
"Cannot call transform on a validator that is not fitted"
)
X_transformed = self.feature_validator.transform(X)
if y is not None:
y_transformed = self.target_validator.transform(y)
return X_transformed, y_transformed
else:
return X_transformed, None
|
InputValidator
|
python
|
numba__numba
|
numba/cuda/tests/cudapy/test_dispatcher.py
|
{
"start": 16983,
"end": 26587
}
|
class ____(CUDATestCase):
def test_get_regs_per_thread_unspecialized(self):
# A kernel where the register usage per thread is likely to differ
# between different specializations
@cuda.jit
def pi_sin_array(x, n):
i = cuda.grid(1)
if i < n:
x[i] = 3.14 * math.sin(x[i])
# Call the kernel with different arguments to create two different
# definitions within the Dispatcher object
N = 10
arr_f32 = np.zeros(N, dtype=np.float32)
arr_f64 = np.zeros(N, dtype=np.float64)
pi_sin_array[1, N](arr_f32, N)
pi_sin_array[1, N](arr_f64, N)
# Check we get a positive integer for the two different variations
sig_f32 = void(float32[::1], int64)
sig_f64 = void(float64[::1], int64)
regs_per_thread_f32 = pi_sin_array.get_regs_per_thread(sig_f32)
regs_per_thread_f64 = pi_sin_array.get_regs_per_thread(sig_f64)
self.assertIsInstance(regs_per_thread_f32, int)
self.assertIsInstance(regs_per_thread_f64, int)
self.assertGreater(regs_per_thread_f32, 0)
self.assertGreater(regs_per_thread_f64, 0)
# Check that getting the registers per thread for all signatures
# provides the same values as getting the registers per thread for
# individual signatures.
regs_per_thread_all = pi_sin_array.get_regs_per_thread()
self.assertEqual(regs_per_thread_all[sig_f32.args],
regs_per_thread_f32)
self.assertEqual(regs_per_thread_all[sig_f64.args],
regs_per_thread_f64)
if regs_per_thread_f32 == regs_per_thread_f64:
# If the register usage is the same for both variants, there may be
# a bug, but this may also be an artifact of the compiler / driver
# / device combination, so produce an informational message only.
print('f32 and f64 variant thread usages are equal.')
print('This may warrant some investigation. Devices:')
cuda.detect()
def test_get_regs_per_thread_specialized(self):
@cuda.jit(void(float32[::1], int64))
def pi_sin_array(x, n):
i = cuda.grid(1)
if i < n:
x[i] = 3.14 * math.sin(x[i])
# Check we get a positive integer for the specialized variation
regs_per_thread = pi_sin_array.get_regs_per_thread()
self.assertIsInstance(regs_per_thread, int)
self.assertGreater(regs_per_thread, 0)
def test_get_const_mem_unspecialized(self):
@cuda.jit
def const_fmt_string(val, to_print):
# We guard the print with a conditional to prevent noise from the
# test suite
if to_print:
print(val)
# Call the kernel with different arguments to create two different
# definitions within the Dispatcher object
const_fmt_string[1, 1](1, False)
const_fmt_string[1, 1](1.0, False)
# Check we get a positive integer for the two different variations
sig_i64 = void(int64, boolean)
sig_f64 = void(float64, boolean)
const_mem_size_i64 = const_fmt_string.get_const_mem_size(sig_i64)
const_mem_size_f64 = const_fmt_string.get_const_mem_size(sig_f64)
self.assertIsInstance(const_mem_size_i64, int)
self.assertIsInstance(const_mem_size_f64, int)
# 6 bytes for the equivalent of b'%lld\n\0'
self.assertGreaterEqual(const_mem_size_i64, 6)
# 4 bytes for the equivalent of b'%f\n\0'
self.assertGreaterEqual(const_mem_size_f64, 4)
# Check that getting the const memory size for all signatures
# provides the same values as getting the const memory size for
# individual signatures.
const_mem_size_all = const_fmt_string.get_const_mem_size()
self.assertEqual(const_mem_size_all[sig_i64.args], const_mem_size_i64)
self.assertEqual(const_mem_size_all[sig_f64.args], const_mem_size_f64)
def test_get_const_mem_specialized(self):
arr = np.arange(32, dtype=np.int64)
sig = void(int64[::1])
@cuda.jit(sig)
def const_array_use(x):
C = cuda.const.array_like(arr)
i = cuda.grid(1)
x[i] = C[i]
const_mem_size = const_array_use.get_const_mem_size(sig)
self.assertIsInstance(const_mem_size, int)
self.assertGreaterEqual(const_mem_size, arr.nbytes)
def test_get_shared_mem_per_block_unspecialized(self):
N = 10
# A kernel where the shared memory per block is likely to differ
# between different specializations
@cuda.jit
def simple_smem(ary):
sm = cuda.shared.array(N, dtype=ary.dtype)
for j in range(N):
sm[j] = j
for j in range(N):
ary[j] = sm[j]
# Call the kernel with different arguments to create two different
# definitions within the Dispatcher object
arr_f32 = np.zeros(N, dtype=np.float32)
arr_f64 = np.zeros(N, dtype=np.float64)
simple_smem[1, 1](arr_f32)
simple_smem[1, 1](arr_f64)
sig_f32 = void(float32[::1])
sig_f64 = void(float64[::1])
sh_mem_f32 = simple_smem.get_shared_mem_per_block(sig_f32)
sh_mem_f64 = simple_smem.get_shared_mem_per_block(sig_f64)
self.assertIsInstance(sh_mem_f32, int)
self.assertIsInstance(sh_mem_f64, int)
self.assertEqual(sh_mem_f32, N * 4)
self.assertEqual(sh_mem_f64, N * 8)
# Check that getting the shared memory per block for all signatures
# provides the same values as getting the shared mem per block for
# individual signatures.
sh_mem_f32_all = simple_smem.get_shared_mem_per_block()
sh_mem_f64_all = simple_smem.get_shared_mem_per_block()
self.assertEqual(sh_mem_f32_all[sig_f32.args], sh_mem_f32)
self.assertEqual(sh_mem_f64_all[sig_f64.args], sh_mem_f64)
def test_get_shared_mem_per_block_specialized(self):
@cuda.jit(void(float32[::1]))
def simple_smem(ary):
sm = cuda.shared.array(100, dtype=float32)
i = cuda.grid(1)
if i == 0:
for j in range(100):
sm[j] = j
cuda.syncthreads()
ary[i] = sm[i]
shared_mem_per_block = simple_smem.get_shared_mem_per_block()
self.assertIsInstance(shared_mem_per_block, int)
self.assertEqual(shared_mem_per_block, 400)
def test_get_max_threads_per_block_unspecialized(self):
N = 10
@cuda.jit
def simple_maxthreads(ary):
i = cuda.grid(1)
ary[i] = i
arr_f32 = np.zeros(N, dtype=np.float32)
simple_maxthreads[1, 1](arr_f32)
sig_f32 = void(float32[::1])
max_threads_f32 = simple_maxthreads.get_max_threads_per_block(sig_f32)
self.assertIsInstance(max_threads_f32, int)
self.assertGreater(max_threads_f32, 0)
max_threads_f32_all = simple_maxthreads.get_max_threads_per_block()
self.assertEqual(max_threads_f32_all[sig_f32.args], max_threads_f32)
def test_get_local_mem_per_thread_unspecialized(self):
# NOTE: A large amount of local memory must be allocated
# otherwise the compiler will optimize out the call to
# cuda.local.array and use local registers instead
N = 1000
@cuda.jit
def simple_lmem(ary):
lm = cuda.local.array(N, dtype=ary.dtype)
for j in range(N):
lm[j] = j
for j in range(N):
ary[j] = lm[j]
# Call the kernel with different arguments to create two different
# definitions within the Dispatcher object
arr_f32 = np.zeros(N, dtype=np.float32)
arr_f64 = np.zeros(N, dtype=np.float64)
simple_lmem[1, 1](arr_f32)
simple_lmem[1, 1](arr_f64)
sig_f32 = void(float32[::1])
sig_f64 = void(float64[::1])
local_mem_f32 = simple_lmem.get_local_mem_per_thread(sig_f32)
local_mem_f64 = simple_lmem.get_local_mem_per_thread(sig_f64)
self.assertIsInstance(local_mem_f32, int)
self.assertIsInstance(local_mem_f64, int)
self.assertGreaterEqual(local_mem_f32, N * 4)
self.assertGreaterEqual(local_mem_f64, N * 8)
# Check that getting the local memory per thread for all signatures
# provides the same values as getting the shared mem per block for
# individual signatures.
local_mem_all = simple_lmem.get_local_mem_per_thread()
self.assertEqual(local_mem_all[sig_f32.args], local_mem_f32)
self.assertEqual(local_mem_all[sig_f64.args], local_mem_f64)
def test_get_local_mem_per_thread_specialized(self):
# NOTE: A large amount of local memory must be allocated
# otherwise the compiler will optimize out the call to
# cuda.local.array and use local registers instead
N = 1000
@cuda.jit(void(float32[::1]))
def simple_lmem(ary):
lm = cuda.local.array(N, dtype=ary.dtype)
for j in range(N):
lm[j] = j
for j in range(N):
ary[j] = lm[j]
local_mem_per_thread = simple_lmem.get_local_mem_per_thread()
self.assertIsInstance(local_mem_per_thread, int)
self.assertGreaterEqual(local_mem_per_thread, N * 4)
if __name__ == '__main__':
unittest.main()
|
TestDispatcherKernelProperties
|
python
|
getsentry__sentry
|
tests/sentry/seer/fetch_issues/test_utils.py
|
{
"start": 7354,
"end": 9545
}
|
class ____(TestCase):
def test_get_latest_issue_event_success(self):
data = load_data("python", timestamp=before_now(minutes=1))
event = self.store_event(data=data, project_id=self.project.id)
group = event.group
assert group is not None
result = get_latest_issue_event(group.id, self.organization.id)
assert result is not None
assert isinstance(result, dict)
assert result["id"] == group.id
assert result["title"] == group.title
assert len(result["events"]) == 1
assert result["events"][0]["id"] == event.event_id
def test_get_latest_issue_event_not_found(self):
nonexistent_group_id = 999999
result = get_latest_issue_event(nonexistent_group_id, self.organization.id)
assert result == {}
def test_get_latest_issue_event_with_short_id(self):
data = load_data("python", timestamp=before_now(minutes=1))
event = self.store_event(data=data, project_id=self.project.id)
group = event.group
assert group is not None
result = get_latest_issue_event(group.qualified_short_id, self.organization.id)
assert result is not None
assert isinstance(result, dict)
assert result["id"] == group.id
assert result["title"] == group.title
assert len(result["events"]) == 1
assert result["events"][0]["id"] == event.event_id
def test_get_latest_issue_event_with_short_id_not_found(self):
result = get_latest_issue_event("INVALID-SHORT-ID", self.organization.id)
assert result == {}
def test_get_latest_issue_event_no_events(self):
# Create a group but don't store any events for it
group = self.create_group(project=self.project)
result = get_latest_issue_event(group.id, self.organization.id)
assert result == {}
def test_get_latest_issue_event_wrong_organization(self):
event = self.store_event(data={}, project_id=self.project.id)
group = event.group
assert group is not None
results = get_latest_issue_event(group.id, self.organization.id + 1)
assert results == {}
|
TestGetLatestIssueEvent
|
python
|
pytorch__pytorch
|
test/torch_np/numpy_tests/core/test_multiarray.py
|
{
"start": 195606,
"end": 202258
}
|
class ____(MatmulCommon, TestCase):
def setUp(self):
self.matmul = np.matmul
def test_out_arg(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
msg = "Cannot cast"
out = np.zeros((5, 2), dtype=np.int32)
assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
# test out with type upcast to complex
out = np.zeros((5, 2), dtype=np.complex128)
c = self.matmul(a, b, out=out)
assert_(c is out)
c = c.astype(tgt.dtype)
assert_array_equal(c, tgt)
def test_empty_out(self):
# Check that the output cannot be broadcast, so that it cannot be
# size zero when the outer dimensions (iterator size) has size zero.
arr = np.ones((0, 1, 1))
out = np.ones((1, 1, 1))
assert self.matmul(arr, arr).shape == (0, 1, 1)
with pytest.raises((RuntimeError, ValueError)):
self.matmul(arr, arr, out=out)
def test_out_contiguous(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
v = np.array([1, 3], dtype=float)
tgt = np.dot(a, b)
tgt_mv = np.dot(a, v)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert_array_equal(c, tgt)
c = self.matmul(a, v, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
c = self.matmul(v, a.T, out=out[:, 0, 0])
assert_array_equal(c, tgt_mv)
# test out contiguous in only last dim
out = np.ones((10, 2), dtype=float)
c = self.matmul(a, b, out=out[::2, :])
assert_array_equal(c, tgt)
# test transposes of out, args
out = np.ones((5, 2), dtype=float)
c = self.matmul(b.T, a.T, out=out.T)
assert_array_equal(out, tgt)
@xfailIfTorchDynamo
def test_out_contiguous_2(self):
a = np.ones((5, 2), dtype=float)
b = np.array([[1, 3], [5, 7]], dtype=float)
# test out non-contiguous
out = np.ones((5, 2, 2), dtype=float)
c = self.matmul(a, b, out=out[..., 0])
assert c.tensor._base is out.tensor
m1 = np.arange(15.0).reshape(5, 3)
m2 = np.arange(21.0).reshape(3, 7)
m3 = np.arange(30.0).reshape(5, 6)[:, ::2] # non-contiguous
vc = np.arange(10.0)
vr = np.arange(6.0)
m0 = np.zeros((3, 0))
@parametrize(
"args",
(
# matrix-matrix
subtest((m1, m2), name="mm1"),
subtest((m2.T, m1.T), name="mm2"),
subtest((m2.T.copy(), m1.T), name="mm3"),
subtest((m2.T, m1.T.copy()), name="mm4"),
# matrix-matrix-transpose, contiguous and non
subtest((m1, m1.T), name="mmT1"),
subtest((m1.T, m1), name="mmT2"),
subtest((m1, m3.T), name="mmT3"),
subtest((m3, m1.T), name="mmT4"),
subtest((m3, m3.T), name="mmT5"),
subtest((m3.T, m3), name="mmT6"),
# matrix-matrix non-contiguous
subtest((m3, m2), name="mmN1"),
subtest((m2.T, m3.T), name="mmN2"),
subtest((m2.T.copy(), m3.T), name="mmN3"),
# vector-matrix, matrix-vector, contiguous
subtest((m1, vr[:3]), name="vm1"),
subtest((vc[:5], m1), name="vm2"),
subtest((m1.T, vc[:5]), name="vm3"),
subtest((vr[:3], m1.T), name="vm4"),
# vector-matrix, matrix-vector, vector non-contiguous
subtest((m1, vr[::2]), name="mvN1"),
subtest((vc[::2], m1), name="mvN2"),
subtest((m1.T, vc[::2]), name="mvN3"),
subtest((vr[::2], m1.T), name="mvN4"),
# vector-matrix, matrix-vector, matrix non-contiguous
subtest((m3, vr[:3]), name="mvN5"),
subtest((vc[:5], m3), name="mvN6"),
subtest((m3.T, vc[:5]), name="mvN7"),
subtest((vr[:3], m3.T), name="mvN8"),
# vector-matrix, matrix-vector, both non-contiguous
subtest((m3, vr[::2]), name="mvN9"),
subtest((vc[::2], m3), name="mvn10"),
subtest((m3.T, vc[::2]), name="mv11"),
subtest((vr[::2], m3.T), name="mv12"),
# size == 0
subtest((m0, m0.T), name="s0_1"),
subtest((m0.T, m0), name="s0_2"),
subtest((m1, m0), name="s0_3"),
subtest((m0.T, m1.T), name="s0_4"),
),
)
def test_dot_equivalent(self, args):
r1 = np.matmul(*args)
r2 = np.dot(*args)
assert_equal(r1, r2)
r3 = np.matmul(args[0].copy(), args[1].copy())
assert_equal(r1, r3)
@skip(reason="object arrays")
def test_matmul_exception_multiply(self):
# test that matmul fails if `__mul__` is missing
class add_not_multiply:
def __add__(self, other):
return self
a = np.full((3, 3), add_not_multiply())
with assert_raises(TypeError):
np.matmul(a, a)
@skip(reason="object arrays")
def test_matmul_exception_add(self):
# test that matmul fails if `__add__` is missing
class multiply_not_add:
def __mul__(self, other):
return self
a = np.full((3, 3), multiply_not_add())
with assert_raises(TypeError):
np.matmul(a, a)
def test_matmul_bool(self):
# gh-14439
a = np.array([[1, 0], [1, 1]], dtype=bool)
assert np.max(a.view(np.uint8)) == 1
b = np.matmul(a, a)
# matmul with boolean output should always be 0, 1
assert np.max(b.view(np.uint8)) == 1
# rg = np.random.default_rng(np.random.PCG64(43))
# d = rg.integers(2, size=4*5, dtype=np.int8)
# d = d.reshape(4, 5) > 0
np.random.seed(1234)
d = np.random.randint(2, size=(4, 5)) > 0
out1 = np.matmul(d, d.reshape(5, 4))
out2 = np.dot(d, d.reshape(5, 4))
assert_equal(out1, out2)
c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
assert not np.any(c)
|
TestMatmul
|
python
|
numba__numba
|
numba/tests/parfors_cache_usecases.py
|
{
"start": 454,
"end": 1778
}
|
class ____(TestCase):
"""
Tests for functionality of this module's functions.
Note this does not define any "test_*" method, instead check_module()
should be called by hand.
"""
def check_module(self, mod):
total_cache_hits = 0
for fn in [mod.arrayexprs_case, mod.prange_case, mod.caller_case]:
arr = np.ones(20)
np.testing.assert_allclose(
fn(arr), fn.py_func(arr),
)
# Accumulate cache hits
total_cache_hits += len(fn.stats.cache_hits)
self.assertGreater(
total_cache_hits, 0,
msg="At least one dispatcher has used the cache",
)
def run_module(self, mod):
# This just executes the module's functionality without asserting
# anything about the cache, it's used in tests that ensure that
# properties such as thread count aren't baked in to the cached object.
for fn in [mod.arrayexprs_case, mod.prange_case, mod.caller_case]:
arr = np.ones(20)
np.testing.assert_allclose(
fn(arr), fn.py_func(arr),
)
def self_test():
mod = sys.modules[__name__]
_TestModule().check_module(mod)
def self_run():
mod = sys.modules[__name__]
_TestModule().run_module(mod)
|
_TestModule
|
python
|
apache__airflow
|
providers/google/tests/unit/google/cloud/triggers/test_pubsub.py
|
{
"start": 1893,
"end": 6417
}
|
class ____:
def test_async_pubsub_pull_trigger_serialization_should_execute_successfully(self, trigger):
"""
Asserts that the PubsubPullTrigger correctly serializes its arguments
and classpath.
"""
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.pubsub.PubsubPullTrigger"
assert kwargs == {
"project_id": PROJECT_ID,
"subscription": "subscription",
"max_messages": MAX_MESSAGES,
"ack_messages": ACK_MESSAGES,
"poke_interval": TEST_POLL_INTERVAL,
"gcp_conn_id": TEST_GCP_CONN_ID,
"impersonation_chain": None,
}
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubAsyncHook.pull")
async def test_async_pubsub_pull_trigger_return_event(self, mock_pull):
mock_pull.return_value = generate_messages(1)
trigger = PubsubPullTrigger(
project_id=PROJECT_ID,
subscription="subscription",
max_messages=MAX_MESSAGES,
ack_messages=False,
poke_interval=TEST_POLL_INTERVAL,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=None,
)
expected_event = TriggerEvent(
{
"status": "success",
"message": [
{
"ack_id": "1",
"message": {
"data": "TWVzc2FnZSAx",
"attributes": {"type": "generated message"},
"message_id": "",
"ordering_key": "",
},
"delivery_attempt": 0,
}
],
}
)
response = await trigger.run().asend(None)
assert response == expected_event
@mock.patch("airflow.providers.google.cloud.triggers.pubsub.PubSubAsyncHook")
def test_hook(self, mock_async_hook):
trigger = PubsubPullTrigger(
project_id=PROJECT_ID,
subscription="subscription",
max_messages=MAX_MESSAGES,
ack_messages=False,
poke_interval=TEST_POLL_INTERVAL,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=None,
)
async_hook_actual = trigger.hook
mock_async_hook.assert_called_once_with(
gcp_conn_id=trigger.gcp_conn_id,
impersonation_chain=trigger.impersonation_chain,
project_id=trigger.project_id,
)
assert async_hook_actual == mock_async_hook.return_value
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubAsyncHook.pull")
async def test_async_pubsub_pull_trigger_exception_during_pull(self, mock_pull):
"""Test that exceptions during pull are propagated and not caught."""
mock_pull.side_effect = GoogleAPICallError("Connection error")
trigger = PubsubPullTrigger(
project_id=PROJECT_ID,
subscription="subscription",
max_messages=MAX_MESSAGES,
ack_messages=False,
poke_interval=TEST_POLL_INTERVAL,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=None,
)
with pytest.raises(GoogleAPICallError, match="Connection error"):
await trigger.run().asend(None)
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubAsyncHook.acknowledge")
@mock.patch("airflow.providers.google.cloud.hooks.pubsub.PubSubAsyncHook.pull")
async def test_async_pubsub_pull_trigger_exception_during_ack(self, mock_pull, mock_acknowledge):
"""Test that exceptions during message acknowledgement are propagated."""
# Return a coroutine that can be awaited
mock_pull.return_value = generate_messages(1)
mock_acknowledge.side_effect = GoogleAPICallError("Acknowledgement failed")
trigger = PubsubPullTrigger(
project_id=PROJECT_ID,
subscription="subscription",
max_messages=MAX_MESSAGES,
ack_messages=True,
poke_interval=TEST_POLL_INTERVAL,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=None,
)
with pytest.raises(GoogleAPICallError, match="Acknowledgement failed"):
await trigger.run().asend(None)
|
TestPubsubPullTrigger
|
python
|
Lightning-AI__lightning
|
src/lightning/fabric/strategies/strategy.py
|
{
"start": 1708,
"end": 16359
}
|
class ____(ABC):
"""Base class for all strategies that change the behaviour of the training, validation and test- loop."""
def __init__(
self,
accelerator: Optional[Accelerator] = None,
checkpoint_io: Optional[CheckpointIO] = None,
precision: Optional[Precision] = None,
) -> None:
self._accelerator: Optional[Accelerator] = accelerator
self._checkpoint_io: Optional[CheckpointIO] = checkpoint_io
self._precision: Optional[Precision] = None
# Call the precision setter for input validation
self.precision = precision
self._launcher: Optional[_Launcher] = None
self._backward_sync_control: Optional[_BackwardSyncControl] = None
@property
@abstractmethod
def root_device(self) -> torch.device:
"""Returns the root device."""
@property
@abstractmethod
def is_global_zero(self) -> bool:
"""Whether the current process is the rank zero process not only on the local node, but for all nodes."""
@property
def launcher(self) -> Optional[_Launcher]:
return self._launcher
@property
def accelerator(self) -> Optional[Accelerator]:
return self._accelerator
@accelerator.setter
def accelerator(self, accelerator: Accelerator) -> None:
self._accelerator = accelerator
@property
def checkpoint_io(self) -> CheckpointIO:
if self._checkpoint_io is None:
self._checkpoint_io = TorchCheckpointIO()
return self._checkpoint_io
@checkpoint_io.setter
def checkpoint_io(self, io: CheckpointIO) -> None:
self._checkpoint_io = io
@property
def precision(self) -> Precision:
return self._precision if self._precision is not None else Precision()
@precision.setter
def precision(self, precision: Optional[Precision]) -> None:
self._precision = precision
def _configure_launcher(self) -> None:
"""Attach the launcher based on Strategy."""
def setup_environment(self) -> None:
"""Setup any processes or distributed connections.
This must be called by the framework at the beginning of every process, before any distributed communication
takes place.
"""
assert self.accelerator is not None
self.accelerator.setup_device(self.root_device)
def process_dataloader(self, dataloader: DataLoader) -> DataLoader:
"""Wraps the dataloader if necessary.
Args:
dataloader: iterable. Ideally of type: :class:`torch.utils.data.DataLoader`
"""
return dataloader
def tensor_init_context(self) -> AbstractContextManager:
"""Controls how tensors get created (device, dtype)."""
precision_init_ctx = self.precision.tensor_init_context()
stack = ExitStack()
stack.enter_context(self.root_device)
stack.enter_context(precision_init_ctx)
return stack
def module_init_context(self, empty_init: Optional[bool] = None) -> AbstractContextManager:
"""A context manager wrapping the model instantiation.
Here, the strategy can control how the parameters of the model get created (device, dtype) and or apply other
patches to the model.
Args:
empty_init: Whether to initialize the model with empty weights (uninitialized memory).
If ``None``, the strategy will decide. Some strategies may not support all options.
"""
precision_module_ctx = self.precision.module_init_context()
stack = ExitStack()
stack.enter_context(self.root_device)
stack.enter_context(_EmptyInit(enabled=bool(empty_init)))
stack.enter_context(precision_module_ctx)
return stack
def setup_module_and_optimizers(
self, module: Module, optimizers: list[Optimizer], scheduler: Optional["_LRScheduler"] = None
) -> tuple[Module, list[Optimizer], Optional["_LRScheduler"]]:
"""Set up a model and multiple optimizers together.
The returned objects are expected to be in the same order they were passed in. The default implementation will
call :meth:`setup_module` and :meth:`setup_optimizer` on the inputs.
"""
module = self.setup_module(module)
optimizers = [self.setup_optimizer(optimizer) for optimizer in optimizers]
return module, optimizers, scheduler
def setup_module(self, module: Module) -> Module:
"""Performs setup for the model, e.g., by wrapping it by another class."""
return module
def setup_optimizer(self, optimizer: Optimizer) -> Optimizer:
"""Performs setup for the optimizer, e.g., by wrapping it by another class."""
return optimizer
@abstractmethod
def module_to_device(self, module: Module) -> None:
"""Moves the model to the correct device."""
def batch_to_device(self, batch: Any, device: Optional[torch.device] = None) -> Any:
"""Moves the batch to the correct device.
The returned batch is of the same type as the input batch, just
having all tensors on the correct device.
Args:
batch: The batch of samples to move to the correct device
device: The target device
"""
device = device or self.root_device
return move_data_to_device(batch, device)
def backward(self, tensor: Tensor, module: Optional[Module], *args: Any, **kwargs: Any) -> None:
r"""Forwards backward-calls to the precision plugin."""
self.precision.pre_backward(tensor, module)
self.precision.backward(tensor, module, *args, **kwargs)
self.precision.post_backward(tensor, module)
def optimizer_step(
self,
optimizer: Optimizable,
**kwargs: Any,
) -> Any:
"""Performs the actual optimizer step.
Args:
optimizer: the optimizer performing the step
**kwargs: Any extra arguments to ``optimizer.step``
"""
return self.precision.optimizer_step(optimizer, **kwargs)
@abstractmethod
def all_gather(self, tensor: Tensor, group: Optional[Any] = None, sync_grads: bool = False) -> Tensor:
"""Perform an all_gather on all processes.
Args:
tensor: the tensor to all_gather
group: the process group to gather results from
sync_grads: flag that allows users to synchronize gradients for all_gather op
"""
@abstractmethod
def all_reduce(
self,
tensor: Union[Tensor, Any],
group: Optional[Any] = None,
reduce_op: Optional[Union[ReduceOp, str]] = "mean",
) -> Union[Tensor, Any]:
"""Reduces the given tensor (e.g. across GPUs/processes).
Args:
tensor: the tensor to sync and reduce
group: the process group to reduce
reduce_op: the reduction operation. Defaults to 'mean'.
Can also be a string 'sum' or ReduceOp.
"""
@abstractmethod
def barrier(self, name: Optional[str] = None) -> None:
"""Synchronizes all processes which blocks processes until the whole group enters this function.
Args:
name: an optional name to pass into barrier.
"""
@abstractmethod
def broadcast(self, obj: TBroadcast, src: int = 0) -> TBroadcast:
"""Broadcasts an object to all processes.
Args:
obj: the object to broadcast
src: source rank
"""
def reduce_boolean_decision(self, decision: bool, all: bool = True) -> bool:
"""Reduce a boolean decision across all processes."""
return decision
def save_checkpoint(
self,
path: _PATH,
state: dict[str, Union[Module, Optimizer, Any]],
storage_options: Optional[Any] = None,
filter: Optional[dict[str, Callable[[str, Any], bool]]] = None,
) -> None:
"""Save model, optimizer, and other state as a checkpoint file.
Args:
path: A path to where the file(s) should be saved
state: A dictionary with contents to be saved. If the dict contains modules or optimizers, their
state-dict will be retrieved and converted automatically.
storage_options: Additional options for the ``CheckpointIO`` plugin
filter: An optional dictionary containing filter callables that return a boolean indicating whether the
given item should be saved (``True``) or filtered out (``False``). Each filter key should match a
state key, where its filter will be applied to the ``state_dict`` generated.
"""
state = self._convert_stateful_objects_in_state(state, filter=(filter or {}))
if self.is_global_zero:
self.checkpoint_io.save_checkpoint(checkpoint=state, path=path, storage_options=storage_options)
def get_module_state_dict(self, module: Module) -> dict[str, Union[Any, Tensor]]:
"""Returns model state."""
return module.state_dict()
def load_module_state_dict(
self, module: Module, state_dict: dict[str, Union[Any, Tensor]], strict: bool = True
) -> None:
"""Loads the given state into the model."""
module.load_state_dict(state_dict, strict=strict)
def get_optimizer_state(self, optimizer: Optimizer) -> dict[str, Tensor]:
"""Returns state of an optimizer.
Allows for syncing/collating optimizer state from processes in custom plugins.
"""
if hasattr(optimizer, "consolidate_state_dict"):
# there are optimizers like PyTorch's ZeroRedundancyOptimizer that shard their
# states, and to avoid OOM we consolidate the full state on rank 0 only
optimizer.consolidate_state_dict()
return optimizer.state_dict() if self.is_global_zero else {}
# for optimizers that are not sharded, we return the state dict on all ranks
return optimizer.state_dict()
def load_checkpoint(
self,
path: _PATH,
state: Optional[Union[Module, Optimizer, dict[str, Union[Module, Optimizer, Any]]]] = None,
strict: bool = True,
weights_only: Optional[bool] = None,
) -> dict[str, Any]:
"""Load the contents from a checkpoint and restore the state of the given objects.
Args:
path: A path to where the file is located
state: Can be one of:
- A dictionary of objects whose state will be restored in-place from the checkpoint path.
- ``None`` or the empty dict: The loaded checkpoint will be returned in full.
- A :class:`~torch.nn.Module` instance, if the checkpoint file contains a raw module state dict.
- A :class:`~torch.optim.Optimizer` instance, if the checkpoint file contains a raw optimizer state.
strict: Whether to enforce that the keys in `state` match the keys in the checkpoint.
Returns:
The remaining items that were not restored into the given state dictionary. If no state dictionary is
given, the full checkpoint will be returned.
"""
torch.cuda.empty_cache()
checkpoint = self.checkpoint_io.load_checkpoint(path, weights_only=weights_only)
if not state:
return checkpoint
if isinstance(state, Module):
self.load_module_state_dict(module=state, state_dict=checkpoint, strict=strict)
return {}
if isinstance(state, Optimizer):
state.load_state_dict(checkpoint)
return {}
_validate_keys_for_strict_loading(state.keys(), checkpoint.keys(), strict=strict)
for name, obj in state.copy().items():
if name not in checkpoint:
continue
if isinstance(obj, _Stateful):
if isinstance(obj, Module):
self.load_module_state_dict(module=obj, state_dict=checkpoint.pop(name), strict=strict)
else:
obj.load_state_dict(checkpoint.pop(name))
else:
state[name] = checkpoint.pop(name)
return checkpoint
def teardown(self) -> None:
"""This method is called to teardown the training process.
It is the right place to release memory and free other resources.
"""
self.precision.teardown()
assert self.accelerator is not None
self.accelerator.teardown()
self.checkpoint_io.teardown()
def clip_gradients_norm(
self,
module: torch.nn.Module,
optimizer: Optimizer,
max_norm: Union[float, int],
norm_type: Union[float, int] = 2.0,
error_if_nonfinite: bool = True,
) -> torch.Tensor:
"""Clip gradients by norm."""
self.precision.unscale_gradients(optimizer)
parameters = self.precision.main_params(optimizer)
return torch.nn.utils.clip_grad_norm_(
parameters, max_norm=max_norm, norm_type=norm_type, error_if_nonfinite=error_if_nonfinite
)
def clip_gradients_value(self, module: torch.nn.Module, optimizer: Optimizer, clip_val: Union[float, int]) -> None:
"""Clip gradients by value."""
self.precision.unscale_gradients(optimizer)
parameters = self.precision.main_params(optimizer)
return torch.nn.utils.clip_grad_value_(parameters, clip_value=clip_val)
@classmethod
def register_strategies(cls, strategy_registry: _StrategyRegistry) -> None:
pass
def _err_msg_joint_setup_required(self) -> str:
return (
f"The `{type(self).__name__}` does not support setting up the module and optimizer(s) independently."
" Please call `setup_module_and_optimizers(model, [optimizer, ...])` to jointly set them up."
)
def _convert_stateful_objects_in_state(
self, state: dict[str, Union[Module, Optimizer, Any]], filter: dict[str, Callable[[str, Any], bool]]
) -> dict[str, Any]:
converted_state: dict[str, Any] = {}
for key, obj in state.items():
# convert the state
if isinstance(obj, Module):
converted = self.get_module_state_dict(module=obj)
elif isinstance(obj, Optimizer):
converted = self.get_optimizer_state(optimizer=obj)
elif isinstance(obj, _Stateful):
converted = obj.state_dict()
else:
converted = obj
_apply_filter(key, filter, converted, converted_state)
return converted_state
|
Strategy
|
python
|
huggingface__transformers
|
src/transformers/models/roberta_prelayernorm/modeling_roberta_prelayernorm.py
|
{
"start": 21259,
"end": 22775
}
|
class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[RobertaPreLayerNormLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)]
)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
encoder_hidden_states: Optional[torch.FloatTensor] = None,
encoder_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(
hidden_states,
attention_mask,
encoder_hidden_states, # as a positional argument for gradient checkpointing
encoder_attention_mask=encoder_attention_mask,
past_key_values=past_key_values,
cache_position=cache_position,
**kwargs,
)
return BaseModelOutputWithPastAndCrossAttentions(
last_hidden_state=hidden_states,
past_key_values=past_key_values if use_cache else None,
)
# Copied from transformers.models.bert.modeling_bert.BertPooler
|
RobertaPreLayerNormEncoder
|
python
|
airbytehq__airbyte
|
airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/steps/bump_version.py
|
{
"start": 944,
"end": 4501
}
|
class ____(StepModifyingFiles):
context: ConnectorContext
@property
def title(self) -> str:
return f"Set connector version to {self.new_version}"
def __init__(
self,
context: ConnectorContext,
connector_directory: dagger.Directory,
new_version: str,
) -> None:
super().__init__(context, connector_directory)
self.new_version = new_version
@staticmethod
async def _set_version_in_metadata(new_version: str, connector_directory: dagger.Directory) -> dagger.Directory:
raw_metadata = await dagger_read_file(connector_directory, METADATA_FILE_NAME)
current_metadata = yaml.safe_load(raw_metadata)
try:
current_version = current_metadata["data"]["dockerImageTag"]
except KeyError:
raise ConnectorVersionNotFoundError("dockerImageTag not found in metadata file")
# We use replace here instead of mutating the deserialized yaml to avoid messing up with the comments in the metadata file.
new_raw_metadata = raw_metadata.replace("dockerImageTag: " + current_version, "dockerImageTag: " + new_version)
updated_connector_dir = dagger_write_file(connector_directory, METADATA_FILE_NAME, new_raw_metadata)
return updated_connector_dir
@staticmethod
async def _set_version_in_poetry_package(
container_with_poetry: dagger.Container, connector_directory: dagger.Directory, new_version: str
) -> dagger.Directory:
try:
connector_directory_with_updated_pyproject = await (
container_with_poetry.with_directory("/connector", connector_directory)
.with_workdir("/connector")
.with_exec(["poetry", "version", new_version], use_entrypoint=True)
.directory("/connector")
)
except dagger.ExecError as e:
raise PoetryVersionBumpError(f"Failed to bump version in pyproject.toml: {e}")
return connector_directory_with_updated_pyproject
async def _run(self) -> StepResult:
original_connector_directory = self.modified_directory
try:
self.modified_directory = await self._set_version_in_metadata(self.new_version, original_connector_directory)
self.modified_files.append(METADATA_FILE_NAME)
except (FileNotFoundError, ConnectorVersionNotFoundError) as e:
return StepResult(
step=self,
status=StepStatus.FAILURE,
stderr="Connector does not have a metadata file or the version is not set in the metadata file",
exc_info=e,
)
if self.context.connector.pyproject_file_path.is_file():
try:
poetry_container = with_poetry(self.context)
self.modified_directory = await self._set_version_in_poetry_package(
poetry_container, self.modified_directory, self.new_version
)
self.modified_files.append(PYPROJECT_FILE_NAME)
except PoetryVersionBumpError as e:
return StepResult(
step=self,
status=StepStatus.FAILURE,
stderr="Failed to bump version in pyproject.toml",
exc_info=e,
)
return StepResult(
step=self,
status=StepStatus.SUCCESS,
stdout=f"Updated connector to {self.new_version}",
output=self.modified_directory,
)
|
SetConnectorVersion
|
python
|
GoogleCloudPlatform__python-docs-samples
|
appengine/standard/endpoints/multiapi/main.py
|
{
"start": 1239,
"end": 1661
}
|
class ____(remote.Service):
@endpoints.method(Request, Response, path="bookmark")
def get_bookmark(self, request):
return Response()
@endpoints.method(Request, Response)
def best_sellers_list(self, request):
return Response()
# [END endpoints_books]
# [END endpoints_multiclass]
# [START endpoints_api_server]
api = endpoints.api_server([api_collection])
# [END endpoints_api_server]
|
Books
|
python
|
pytorch__pytorch
|
torch/utils/data/datapipes/iter/combinatorics.py
|
{
"start": 1988,
"end": 6513
}
|
class ____(IterDataPipe[_T_co]):
r"""
Shuffle the input DataPipe with a buffer (functional name: ``shuffle``).
The buffer with ``buffer_size`` is filled with elements from the datapipe first. Then,
each item will be yielded from the buffer by reservoir sampling via iterator.
``buffer_size`` is required to be larger than ``0``. For ``buffer_size == 1``, the
datapipe is not shuffled. In order to fully shuffle all elements from datapipe,
``buffer_size`` is required to be greater than or equal to the size of datapipe.
When it is used with :class:`torch.utils.data.DataLoader`, the methods to
set up random seed are different based on :attr:`num_workers`.
For single-process mode (:attr:`num_workers == 0`), the random seed is set before
the :class:`~torch.utils.data.DataLoader` in the main process. For multi-process
mode (:attr:`num_worker > 0`), `worker_init_fn` is used to set up a random seed
for each worker process.
Args:
datapipe: The IterDataPipe being shuffled
buffer_size: The buffer size for shuffling (default to ``10000``)
unbatch_level: Specifies if it is necessary to unbatch source data before
applying the shuffle
Example:
>>> # xdoctest: +SKIP
>>> from torchdata.datapipes.iter import IterableWrapper
>>> dp = IterableWrapper(range(10))
>>> shuffle_dp = dp.shuffle()
>>> list(shuffle_dp)
[0, 4, 1, 6, 3, 2, 9, 5, 7, 8]
"""
datapipe: IterDataPipe[_T_co]
buffer_size: int
_buffer: list[_T_co]
_enabled: bool
_seed: int | None
_rng: random.Random
def __init__(
self,
datapipe: IterDataPipe[_T_co],
*,
buffer_size: int = 10000,
unbatch_level: int = 0,
) -> None:
super().__init__()
# TODO: Performance optimization
# buffer can be a fixed size and remove expensive `append()` and `len()` operations
self._buffer: list[_T_co] = []
if buffer_size <= 0:
raise AssertionError("buffer_size should be larger than 0")
if unbatch_level == 0:
self.datapipe = datapipe
else:
self.datapipe = datapipe.unbatch(unbatch_level=unbatch_level)
self.buffer_size = buffer_size
self._enabled = True
self._seed = None
self._rng = random.Random()
def set_shuffle(self, shuffle=True):
self._enabled = shuffle
return self
def set_seed(self, seed: int):
self._seed = seed
return self
def __iter__(self) -> Iterator[_T_co]:
if not self._enabled:
yield from self.datapipe
else:
for x in self.datapipe:
if len(self._buffer) == self.buffer_size:
idx = self._rng.randint(0, len(self._buffer) - 1)
val, self._buffer[idx] = self._buffer[idx], x
yield val
else:
self._buffer.append(x)
while self._buffer:
idx = self._rng.randint(0, len(self._buffer) - 1)
yield self._buffer.pop(idx)
def __len__(self) -> int:
if isinstance(self.datapipe, Sized):
return len(self.datapipe)
raise TypeError(f"{type(self).__name__} instance doesn't have valid length")
def reset(self) -> None:
self._buffer = []
if self._enabled:
if self._seed is None:
self._seed = int(torch.empty((), dtype=torch.int64).random_().item())
self._rng.seed(self._seed)
self._seed = None
def __getstate__(self):
state = (
self.datapipe,
self.buffer_size,
self._enabled,
self._seed,
self._buffer,
self._rng.getstate(),
self._valid_iterator_id,
self._number_of_samples_yielded,
)
if IterDataPipe.getstate_hook is not None:
return IterDataPipe.getstate_hook(state)
return state
def __setstate__(self, state):
(
self.datapipe,
self.buffer_size,
self._enabled,
self._seed,
self._buffer,
rng_state,
self._valid_iterator_id,
self._number_of_samples_yielded,
) = state
self._rng = random.Random()
self._rng.setstate(rng_state)
def __del__(self) -> None:
self._buffer.clear()
|
ShufflerIterDataPipe
|
python
|
keon__algorithms
|
tests/test_sort.py
|
{
"start": 734,
"end": 3449
}
|
class ____(unittest.TestCase):
def test_bogo_sort(self):
self.assertTrue(is_sorted(bogo_sort([1, 23, 5])))
def test_bitonic_sort(self):
self.assertTrue(is_sorted(bitonic_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_bubble_sort(self):
self.assertTrue(is_sorted(bubble_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_comb_sort(self):
self.assertTrue(is_sorted(comb_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_counting_sort(self):
self.assertTrue(is_sorted(counting_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_cycle_sort(self):
self.assertTrue(is_sorted(cycle_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_exchange_sort(self):
self.assertTrue(is_sorted(exchange_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_heap_sort(self):
self.assertTrue(is_sorted(max_heap_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
self.assertTrue(is_sorted(min_heap_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_insertion_sort(self):
self.assertTrue(is_sorted(bitonic_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_merge_sort(self):
self.assertTrue(is_sorted(merge_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_pancake_sort(self):
self.assertTrue(is_sorted(pancake_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_pigeonhole_sort(self):
self.assertTrue(is_sorted(pigeonhole_sort([1, 5, 65, 23, 57, 1232])))
def test_quick_sort(self):
self.assertTrue(is_sorted(quick_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_selection_sort(self):
self.assertTrue(is_sorted(selection_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
def test_bucket_sort(self):
self.assertTrue(is_sorted(bucket_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_shell_sort(self):
self.assertTrue(is_sorted(shell_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_radix_sort(self):
self.assertTrue(is_sorted(radix_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_gnome_sort(self):
self.assertTrue(is_sorted(gnome_sort([1, 3, 2, 5, 65, 23, 57, 1232])))
def test_cocktail_shaker_sort(self):
self.assertTrue(is_sorted(cocktail_shaker_sort([1, 3, 2, 5, 65,
23, 57, 1232])))
|
TestSuite
|
python
|
django__django
|
tests/model_regress/models.py
|
{
"start": 1251,
"end": 1413
}
|
class ____(models.Model):
name = models.CharField(max_length=10, primary_key=True)
# Chained foreign keys with to_field produce incorrect query #18432
|
NonAutoPK
|
python
|
allegroai__clearml
|
clearml/backend_api/session/session.py
|
{
"start": 1707,
"end": 46551
}
|
class ____(TokenManager):
"""ClearML API Session class."""
_AUTHORIZATION_HEADER = "Authorization"
_WORKER_HEADER = (
"X-ClearML-Worker",
"X-Trains-Worker",
)
_ASYNC_HEADER = (
"X-ClearML-Async",
"X-Trains-Async",
)
_CLIENT_HEADER = (
"X-ClearML-Client",
"X-Trains-Client",
)
_async_status_code = 202
_session_requests = 0
_session_initial_timeout = (3.0, 10.0)
_session_timeout = (10.0, 300.0)
_write_session_data_size = 15000
_write_session_timeout = (300.0, 300.0)
_sessions_created = 0
_ssl_error_count_verbosity = 2
_offline_mode = ENV_OFFLINE_MODE.get()
_offline_default_version = "2.9"
# we want to keep track of sessions, but we also want to allow them to be collected by the GC if they are not used anymore
_sessions_weakrefs = []
_client = [(__package__.partition(".")[0], __version__)]
api_version = "2.9" # this default version should match the lowest api version we have under service
server_version = "1.0.0"
max_api_version = "2.9"
feature_set = "basic"
default_demo_host = "https://demoapi.demo.clear.ml"
default_host = "https://api.clear.ml"
default_web = "https://app.clear.ml"
default_files = "https://files.clear.ml"
default_key = "" # "EGRTCO8JMSIGI6S39GTP43NFWXDQOW"
default_secret = "" # "x!XTov_G-#vspE*Y(h$Anm&DIc5Ou-F)jsl$PdOyj5wG1&E!Z8"
force_max_api_version = ENV_FORCE_MAX_API_VERSION.get()
legacy_file_servers = ["https://files.community.clear.ml"]
# TODO: add requests.codes.gateway_timeout once we support async commits
_retry_codes = [
requests.codes.bad_gateway,
requests.codes.service_unavailable,
requests.codes.bandwidth_limit_exceeded,
requests.codes.too_many_requests,
]
@property
def access_key(self) -> str:
return self.__access_key
@property
def secret_key(self) -> str:
return self.__secret_key
@property
def auth_token(self) -> Optional[str]:
return self.__auth_token
@property
def host(self) -> str:
return self.__host
@property
def worker(self) -> str:
return self.__worker
def __init__(
self,
worker: Optional[str] = None,
api_key: Optional[str] = None,
secret_key: Optional[str] = None,
host: Optional[str] = None,
logger: Optional[logging.Logger] = None,
verbose: Optional[bool] = None,
config: Optional[ConfigTree] = None,
http_retries_config: Optional[dict] = None,
**kwargs: Any
) -> None:
self.__class__._sessions_weakrefs.append(weakref.ref(self))
self._verbose = verbose if verbose is not None else ENV_VERBOSE.get()
if logger is not None:
self._logger = logger
else:
level = resolve_logging_level(ENV_VERBOSE.get(converter=str)) if self._verbose else logging.INFO
self._logger = get_logger(level=level, stream=sys.stderr if level is logging.DEBUG else None)
self.__worker = worker or self.get_worker_host_name()
self.client = ", ".join("{}-{}".format(*x) for x in self._client)
self.__init_api_key = api_key
self.__init_secret_key = secret_key
self.__init_host = host
self.__init_http_retries_config = http_retries_config
self.__token_manager_kwargs = kwargs
if config is not None:
self.config = config
else:
from clearml.config import ConfigWrapper
self.config = ConfigWrapper._init()
self._connect()
@classmethod
def add_client(cls, client: str, value: str, first: bool = True) -> None:
# noinspection PyBroadException
try:
if not any(True for c in cls._client if c[0] == client):
if first:
cls._client.insert(0, (client, value))
else:
cls._client.append((client, value))
cls.client = ", ".join("{}-{}".format(*x) for x in cls._client)
except Exception:
pass
def _connect(self) -> None:
if self._offline_mode:
return
self._ssl_error_count_verbosity = self.config.get(
"api.ssl_error_count_verbosity", self._ssl_error_count_verbosity
)
self.__host = self.__init_host or self.get_api_server_host(config=self.config)
if not self.__host:
raise ValueError("ClearML host was not set, check your configuration file or environment variable")
self.__host = self.__host.strip("/")
self.__http_retries_config = (
self.__init_http_retries_config or self.config.get("api.http.retries", ConfigTree()).as_plain_ordered_dict()
)
self.__http_retries_config["status_forcelist"] = self._get_retry_codes()
self.__http_retries_config["config"] = self.config
self.__http_session = get_http_session_with_retry(**self.__http_retries_config)
self.__http_session.write_timeout = self._write_session_timeout
self.__http_session.request_size_threshold = self._write_session_data_size
self.__max_req_size = self.config.get("api.http.max_req_size", None)
if not self.__max_req_size:
raise ValueError("missing max request size")
token_expiration_threshold_sec = self.config.get("api.auth.token_expiration_threshold_sec", 60)
req_token_expiration_sec = self.config.get(
"api.auth.request_token_expiration_sec",
self.config.get("api.auth.req_token_expiration_sec", None),
)
self.__auth_token = None
self._update_default_api_method()
if ENV_AUTH_TOKEN.get():
self.__access_key = self.__secret_key = None
self.__auth_token = ENV_AUTH_TOKEN.get()
# if we use a token we override make sure we are at least 3600 seconds (1 hour)
# away from the token expiration date, ask for a new one.
token_expiration_threshold_sec = max(token_expiration_threshold_sec, 3600)
else:
self.__access_key = self.__init_api_key or ENV_ACCESS_KEY.get(
default=(self.config.get("api.credentials.access_key", None) or self.default_key)
)
self.__secret_key = self.__init_secret_key or ENV_SECRET_KEY.get(
default=(self.config.get("api.credentials.secret_key", None) or self.default_secret)
)
if not self.secret_key and not self.access_key and not self.__auth_token:
raise MissingConfigError()
super(Session, self).__init__(
**self.__token_manager_kwargs,
token_expiration_threshold_sec=token_expiration_threshold_sec,
req_token_expiration_sec=req_token_expiration_sec
)
self.refresh_token()
local_logger = self._LocalLogger(self._logger)
# update api version from server response
try:
token_dict = TokenManager.get_decoded_token(self.token)
api_version = token_dict.get("api_version")
if not api_version:
api_version = "2.2" if token_dict.get("env", "") == "prod" else Session.api_version
Session.server_version = token_dict.get("server_version")
if Session.server_version:
self.add_client("clearml-server", Session.server_version)
Session.max_api_version = Session.api_version = str(api_version)
Session.feature_set = str(token_dict.get("feature_set", self.feature_set) or "basic")
except (jwt.DecodeError, ValueError):
local_logger().warning("Failed parsing server API level, defaulting to {}".format(Session.api_version))
# now setup the session reporting, so one consecutive retries will show warning
# we do that here, so if we have problems authenticating, we see them immediately
# notice: this is across the board warning omission
urllib_log_warning_setup(
total_retries=self.__http_retries_config.get("total", 0),
display_warning_after=3,
)
if self.force_max_api_version and self.check_min_api_version(self.force_max_api_version):
Session.max_api_version = Session.api_version = str(self.force_max_api_version)
# update only after we have max_api
self.__class__._sessions_created += 1
if self._load_vaults():
from clearml.config import ConfigWrapper, ConfigSDKWrapper
ConfigWrapper.set_config_impl(self.config)
ConfigSDKWrapper.clear_config_impl()
self._apply_config_sections(local_logger)
self._update_default_api_method()
def _update_default_api_method(self) -> None:
if not ENV_API_DEFAULT_REQ_METHOD.get(default=None) and self.config.get("api.http.default_method", None):
def_method = str(self.config.get("api.http.default_method", None)).strip()
if def_method.upper() not in ("GET", "POST", "PUT"):
raise ValueError(
"api.http.default_method variable must be 'get', 'post' or 'put' (any case is allowed)."
)
Request.def_method = def_method
Request._method = Request.def_method
def _get_retry_codes(self) -> List[int]:
retry_codes = set(self._retry_codes)
extra = self.config.get("api.http.extra_retry_codes", [])
if ENV_API_EXTRA_RETRY_CODES.get():
extra = [s.strip() for s in ENV_API_EXTRA_RETRY_CODES.get().split(",") if s.strip()]
for code in extra or []:
try:
retry_codes.add(int(code))
except (ValueError, TypeError):
print("Warning: invalid extra HTTP retry code detected: {}".format(code))
if retry_codes.difference(self._retry_codes):
print("Using extra HTTP retry codes {}".format(sorted(retry_codes.difference(self._retry_codes))))
return list(retry_codes)
def _read_vaults(self) -> Optional[List[dict]]:
# () -> Optional[List[dict]]
if not self.check_min_api_version("2.15") or self.feature_set == "basic":
return
def parse(vault: dict) -> Optional[Union[ConfigTree, dict]]:
# noinspection PyBroadException
try:
d = vault.get("data", None)
if d:
r = ConfigFactory.parse_string(d)
if isinstance(r, (ConfigTree, dict)):
return r
except Exception as e:
(self._logger or get_logger()).warning(
"Failed parsing vault {}: {}".format(vault.get("description", "<unknown>"), e)
)
# noinspection PyBroadException
try:
# Use params and not data/json otherwise payload might be dropped if we're using GET with a strict firewall
res = self.send_request("users", "get_vaults", params="enabled=true&types=config&types=config")
if res.ok:
vaults = res.json().get("data", {}).get("vaults", [])
data = list(filter(None, map(parse, vaults)))
if data:
return data
elif res.status_code != 404:
raise Exception(res.json().get("meta", {}).get("result_msg", res.text))
except Exception as ex:
(self._logger or get_logger()).warning("Failed getting vaults: {}".format(ex))
def _load_vaults(self) -> Optional[bool]:
# () -> Optional[bool]
if ENV_DISABLE_VAULT_SUPPORT.get():
# (self._logger or get_logger()).debug("Vault support is disabled")
return
data = self._read_vaults()
if data:
self.config.set_overrides(*data)
return True
def _apply_config_sections(self, local_logger: Any) -> None:
# noqa: F821
default = self.config.get("sdk.apply_environment", False)
if ENV_ENABLE_ENV_CONFIG_SECTION.get(default=default):
try:
keys = apply_environment(self.config)
if keys:
print("Environment variables set from configuration: {}".format(keys))
except Exception as ex:
local_logger().warning("Failed applying environment from configuration: {}".format(ex))
default = self.config.get("sdk.apply_files", default=False)
if ENV_ENABLE_FILES_CONFIG_SECTION.get(default=default):
try:
apply_files(self.config)
except Exception as ex:
local_logger().warning("Failed applying files from configuration: {}".format(ex))
def _send_request(
self,
service: str,
action: str,
version: Optional[str] = None,
method: Optional[str] = None,
headers: Optional[dict] = None,
auth: Optional[requests.auth.AuthBase] = None,
data: Any = None,
json: Optional[Union[dict, list]] = None,
refresh_token_if_unauthorized: bool = True,
params: Optional[dict] = None,
) -> Optional[requests.Response]:
"""Internal implementation for making a raw API request.
- Constructs the api endpoint name
- Injects the worker id into the headers
- Allows custom authorization using a requests auth object
- Intercepts `Unauthorized` responses and automatically attempts to refresh the session token once in this
case (only once). This is done since permissions are embedded in the token, and addresses a case where
server-side permissions have changed but are not reflected in the current token. Refreshing the token will
generate a token with the updated permissions.
NOTE: This method does not handle authorization. Credentials or token should be provided using the auth or
headers arguments, otherwise a successful authorization depends on the session containing a valid cookie
set during the last login call (which may not be there if the server's cookie domain does not match the URL
we use to access the server)
"""
if self._offline_mode:
return None
if not method:
method = Request.def_method
res = None
host = self.host
headers = headers.copy() if headers else {}
for h in self._WORKER_HEADER:
headers[h] = self.worker
for h in self._CLIENT_HEADER:
headers[h] = self.client
token_refreshed_on_error = False
url = ("{host}/v{version}/{service}.{action}" if version else "{host}/{service}.{action}").format(**locals())
retry_counter = 0
while True:
if data and len(data) > self._write_session_data_size:
timeout = self._write_session_timeout
elif self._session_requests < 1:
timeout = self._session_initial_timeout
else:
timeout = self._session_timeout
try:
if self._verbose and self._logger:
size = len(data or "")
if json and self._logger.level == logging.DEBUG:
size += len(json_lib.dumps(json))
self._logger.debug(
"%s: %s [%d bytes, %d headers]",
method.upper(),
url,
size,
len(headers or {}),
)
res = self.__http_session.request(
method,
url,
headers=headers,
auth=auth,
data=data,
json=json,
timeout=timeout,
params=params,
)
if self._verbose and self._logger:
self._logger.debug("--> took %s", res.elapsed)
# except Exception as ex:
except SSLError as ex:
retry_counter += 1
# we should retry
if retry_counter >= self._ssl_error_count_verbosity:
(self._logger or get_logger()).warning("SSLError Retrying {}".format(ex))
sleep(0.1)
continue
except (
ChunkedEncodingError,
ContentDecodingError,
StreamConsumedError,
) as ex:
retry_counter += 1
# we should retry
if retry_counter >= self._ssl_error_count_verbosity:
(self._logger or get_logger()).warning("Network decoding error Retrying {}".format(ex))
sleep(0.1)
continue
if (
refresh_token_if_unauthorized
and res.status_code == requests.codes.unauthorized
and not token_refreshed_on_error
):
# it seems we're unauthorized, so we'll try to refresh our token once in case permissions changed since
# the last time we got the token, and try again
self.refresh_token()
token_refreshed_on_error = True
# try again
retry_counter += 1
continue
if res.status_code == requests.codes.service_unavailable and self.config.get(
"api.http.wait_on_maintenance_forever", True
):
(self._logger or get_logger()).warning(
"Service unavailable: {} is undergoing maintenance, retrying...".format(host)
)
retry_counter += 1
continue
break
self._session_requests += 1
return res
def add_auth_headers(self, headers: Dict[str, str]) -> Dict[str, str]:
headers[self._AUTHORIZATION_HEADER] = "Bearer {}".format(self.token)
return headers
def send_request(
self,
service: str,
action: str,
version: Optional[str] = None,
method: Optional[str] = None,
headers: Optional[dict] = None,
data: Optional[Union[dict, bytes, IOBase]] = None,
json: Optional[Any] = None,
async_enable: bool = False,
params: Optional[dict] = None,
) -> requests.Response:
"""
Send a raw API request.
:param service: service name
:param action: action name
:param version: version number (default is the preconfigured api version)
:param method: method type (default is 'get')
:param headers: request headers (authorization and content type headers will be automatically added)
:param json: json to send in the request body (jsonable object or builtin types construct. if used,
content type will be application/json)
:param data: Dictionary, bytes, or file-like object to send in the request body
:param async_enable: whether request is asynchronous
:param params: additional query parameters
:return: requests Response instance
"""
if not method:
method = Request.def_method
headers = self.add_auth_headers(headers.copy() if headers else {})
if async_enable:
for h in self._ASYNC_HEADER:
headers[h] = "1"
return self._send_request(
service=service,
action=action,
version=version,
method=method,
headers=headers,
data=data,
json=json,
params=params,
)
def send_request_batch(
self,
service: str,
action: str,
version: Optional[str] = None,
headers: Optional[dict] = None,
data: Optional[Iterable[bytes]] = None,
json: Optional[Iterable[Any]] = None,
method: Optional[str] = None,
) -> List[requests.Response]:
"""
Send a raw batch API request. Batch requests always use application/json-lines content type.
:param service: service name
:param action: action name
:param version: version number (default is the preconfigured api version)
:param headers: request headers (authorization and content type headers will be automatically added)
:param json: iterable of json items (batched items, jsonable objects or builtin types constructs). These will
be sent as a multi-line payload in the request body.
:param data: iterable of bytes objects (batched items). These will be sent as a multi-line payload in the
request body.
:param method: HTTP method
:return: requests Response instance
"""
if not all(isinstance(x, (list, tuple, type(None), types.GeneratorType)) for x in (data, json)):
raise ValueError("Expecting list, tuple or generator in 'data' or 'json'")
if not data and not json:
# Missing data (data or json), batch requests are meaningless without it.
return None
if not method:
method = Request.def_method
headers = headers.copy() if headers else {}
headers["Content-Type"] = "application/json-lines"
if data:
req_data = "\n".join(data)
else:
req_data = "\n".join(json_lib.dumps(x) for x in json)
cur = 0
results = []
while True:
size = self.__max_req_size
slice = req_data[cur : cur + size]
if not slice:
break
if len(slice) < size:
# this is the remainder, no need to search for newline
pass
elif slice[-1] != "\n":
# search for the last newline in order to send a coherent request
size = slice.rfind("\n") + 1
# readjust the slice
slice = req_data[cur : cur + size]
if not slice:
(self._logger or get_logger()).error(
"{}.{} request exceeds limit {} > {} bytes".format(
service, action, len(req_data), self.__max_req_size
)
)
# skip the payload that could not be sent
size = req_data[cur:].find("\n") + 1
if size == 0:
# error occured on the last package
break
if slice:
res = self.send_request(
method=method,
service=service,
action=action,
data=slice,
headers=headers,
version=version,
)
results.append(res)
cur += size
return results
def validate_request(self, req_obj: Request) -> None:
"""Validate an API request against the current version and the request's schema"""
try:
# make sure we're using a compatible version for this request
# validate the request (checks required fields and specific field version restrictions)
validate = req_obj.validate
except AttributeError:
raise TypeError('"req_obj" parameter must be an backend_api.session.Request object')
validate()
def send_async(self, req_obj: Request) -> CallResult:
"""
Asynchronously sends an API request using a request object.
:param req_obj: The request object
:type req_obj: Request
:return: CallResult object containing the raw response, response metadata and parsed response object.
"""
return self.send(req_obj=req_obj, async_enable=True)
def send(
self,
req_obj: Request,
async_enable: bool = False,
headers: Optional[dict] = None,
) -> Optional[CallResult]:
"""
Sends an API request using a request object.
:param req_obj: The request object
:type req_obj: Request
:param async_enable: Request this method be executed in an asynchronous manner
:param headers: Additional headers to send with request
:return: CallResult object containing the raw response, response metadata and parsed response object.
"""
self.validate_request(req_obj)
if self._offline_mode:
return None
if isinstance(req_obj, BatchRequest):
# TODO: support async for batch requests as well
if async_enable:
raise NotImplementedError("Async behavior is currently not implemented for batch requests")
json_data = req_obj.get_json()
res = self.send_request_batch(
service=req_obj._service,
action=req_obj._action,
version=req_obj._version,
json=json_data,
method=req_obj._method,
headers=headers,
)
# TODO: handle multiple results in this case
if res is not None:
try:
res = next(r for r in res if r.status_code != 200)
except StopIteration:
# all are 200
res = res[0]
else:
res = self.send_request(
service=req_obj._service,
action=req_obj._action,
version=req_obj._version,
json=req_obj.to_dict(),
method=req_obj._method,
async_enable=async_enable,
headers=headers,
)
call_result = CallResult.from_result(
res=res,
request_cls=req_obj.__class__,
logger=self._logger,
service=req_obj._service,
action=req_obj._action,
session=self,
)
return call_result
@classmethod
def _make_all_sessions_go_online(cls) -> None:
for active_session in cls._get_all_active_sessions():
# noinspection PyProtectedMember
active_session._connect()
@classmethod
def _get_all_active_sessions(cls) -> List[Any]:
active_sessions = []
new_sessions_weakrefs = []
for session_weakref in cls._sessions_weakrefs:
session = session_weakref()
if session:
active_sessions.append(session)
new_sessions_weakrefs.append(session_weakref)
cls._sessions_weakrefs = new_sessions_weakrefs
return active_sessions
@classmethod
def get_api_server_host(cls, config: Optional[ConfigTree] = None) -> str:
if not config:
from ...config import config_obj
config = config_obj
return ENV_HOST.get(
default=(config.get("api.api_server", None) or config.get("api.host", None) or cls.default_host)
).rstrip("/")
@classmethod
def get_app_server_host(cls, config: Optional[ConfigTree] = None) -> str:
"""
Get app server (webserver) host. This is done either from configuration or by inferring from the apiserver host.
"""
if not config:
from ...config import config_obj
config = config_obj
# get from config/environment
web_host = ENV_WEB_HOST.get(default=config.get("api.web_server", "")).rstrip("/")
if web_host:
return web_host
# return default
host = cls.get_api_server_host(config)
if host == cls.default_host and cls.default_web:
return cls.default_web
# compose ourselves
if "://demoapi." in host:
return host.replace("://demoapi.", "://demoapp.", 1)
if "://api." in host:
return host.replace("://api.", "://app.", 1)
parsed = urlparse(host)
if parsed.port == 8008:
return host.replace(":8008", ":8080", 1)
raise ValueError("Could not detect ClearML web application server")
@classmethod
def get_files_server_host(cls, config: Optional[ConfigTree] = None) -> str:
"""
Get the files server host name for the current configuration, either using the configuration value or by
parsing the other api or app host names
:param config: configuration object to use instead of the default configuration (ConfigTree)
:return: files server host name (str)
"""
if not config:
from ...config import config_obj
config = config_obj
# get from config/environment
files_host = ENV_FILES_HOST.get(default=(config.get("api.files_server", ""))).rstrip("/")
if files_host:
return files_host
# return default
host = cls.get_api_server_host(config)
if host == cls.default_host and cls.default_files:
return cls.default_files
# compose ourselves
app_host = cls.get_app_server_host(config)
parsed = urlparse(app_host)
if parsed.port:
parsed = parsed._replace(netloc=parsed.netloc.replace(":%d" % parsed.port, ":8081", 1))
elif parsed.netloc.startswith("demoapp."):
parsed = parsed._replace(netloc=parsed.netloc.replace("demoapp.", "demofiles.", 1))
elif parsed.netloc.startswith("app."):
parsed = parsed._replace(netloc=parsed.netloc.replace("app.", "files.", 1))
else:
parsed = parsed._replace(netloc=parsed.netloc + ":8081")
return urlunparse(parsed)
@classmethod
def check_min_server_version(cls, min_server_version: str) -> bool:
"""
Return True if Session.server_version is greater or equal >= to min_server_version
"""
return cls._version_tuple(cls.server_version) >= cls._version_tuple(str(min_server_version))
@classmethod
def check_min_api_version(cls, min_api_version: str, raise_error: bool = False) -> bool:
"""
Return True if Session.api_version is greater or equal >= to min_api_version
"""
# If no session was created, create a default one, in order to get the backend api version.
if cls._sessions_created <= 0:
if cls._offline_mode:
# allow to change the offline mode version by setting ENV_OFFLINE_MODE to the required API version
if cls.api_version != cls._offline_default_version:
offline_api = ENV_OFFLINE_MODE.get(converter=lambda x: x)
if offline_api:
try:
# check cast to float, but leave original str if we pass it.
# minimum version is 2.3
if float(offline_api) >= 2.3:
cls._offline_default_version = str(offline_api)
except ValueError:
pass
cls.max_api_version = cls.api_version = cls._offline_default_version
else:
# if the requested version is lower than the minimum we support,
# no need to actually check what the server has, we assume it must have at least our version.
if cls._version_tuple(cls.api_version) >= cls._version_tuple(str(min_api_version)):
return True
# noinspection PyBroadException
try:
cls()
except (MissingConfigError, ConfigurationError):
if raise_error and not ENV_IGNORE_MISSING_CONFIG.get():
raise
except LoginError:
if raise_error:
raise
except Exception:
pass
return cls._version_tuple(cls.api_version) >= cls._version_tuple(str(min_api_version))
@classmethod
def check_min_api_server_version(cls, min_api_version: str, raise_error: bool = False) -> bool:
"""
Return True if Session.max_api_version is greater or equal >= to min_api_version
Notice this is the api version server reported, not the current SDK max supported api version
"""
if cls.check_min_api_version(min_api_version, raise_error=raise_error):
return True
return cls._version_tuple(cls.max_api_version) >= cls._version_tuple(str(min_api_version))
@classmethod
def get_worker_host_name(cls) -> str:
from ...config import dev_worker_name
return dev_worker_name() or gethostname()
@classmethod
def get_clients(cls) -> List[Tuple[str, str]]:
return cls._client
@classmethod
def verify_feature_set(cls, feature_set: str) -> None:
if isinstance(feature_set, str):
feature_set = [feature_set]
if cls.feature_set not in feature_set:
raise ValueError("ClearML-server does not support requested feature set '{}'".format(feature_set))
@staticmethod
def _version_tuple(v: str) -> Tuple[int]:
v = tuple(map(int, (v.split("."))))
return v + (0,) * max(0, 3 - len(v))
def _do_refresh_token(self, old_token: str, exp: Optional[int] = None) -> str:
"""TokenManager abstract method implementation.
Here we ignore the old token and simply obtain a new token.
"""
verbose = self._verbose and self._logger
if verbose:
self._logger.info(
"Refreshing token from {} (access_key={}, exp={})".format(self.host, self.access_key, exp)
)
auth = None
headers = None
token = None
if self.__auth_token: # try using initially provided token
token = self.__auth_token
elif self.access_key and self.secret_key: # otherwise, try using basic auth (if key/secret exists)
auth = HTTPBasicAuth(self.access_key, self.secret_key)
else: # otherwise, use the latest raw token
token = self.raw_token
if token:
headers = dict(Authorization="Bearer {}".format(token))
if not auth and not headers:
# No authorization info, something went wrong
self._logger.warning(
"refreshing token with no authorization info (no token or credentials, this might fail "
"if session does not have a valid cookie)"
)
res = None
try:
res = self._send_request(
method=Request.def_method,
service="auth",
action="login",
auth=auth,
headers=headers,
refresh_token_if_unauthorized=False,
params={"expiration_sec": exp} if exp else {},
)
try:
resp = res.json()
except ValueError:
resp = {}
if res.status_code != 200:
msg = resp.get("meta", {}).get("result_msg", res.reason)
raise LoginError("Failed getting token (error {} from {}): {}".format(res.status_code, self.host, msg))
if verbose:
self._logger.info("Received new token")
# make sure we keep the token updated on the OS environment, so that child processes will have access.
if ENV_AUTH_TOKEN.get():
ENV_AUTH_TOKEN.set(resp["data"]["token"])
# in any case, the initial token should only be used once (but only do it in case we actually managed
# to generate a new token in case this failed due to transient reasons)
self.__auth_token = None
return resp["data"]["token"]
except LoginError:
six.reraise(*sys.exc_info())
except KeyError as ex:
# check if this is a misconfigured api server (getting 200 without the data section)
if res and res.status_code == 200:
raise ValueError(
"It seems *api_server* is misconfigured. Is this the ClearML API server {} ?".format(self.host)
)
else:
raise LoginError(
"Response data mismatch: No 'token' in 'data' value from res, receive : {}, "
"exception: {}".format(res, ex)
)
except Exception as ex:
raise LoginError("Unrecognized Authentication Error: {} {}".format(type(ex), ex))
@staticmethod
def __get_browser_token(webserver: str) -> Optional[str]:
# try to get the token if we are running inside a browser session (i.e. CoLab, Kaggle etc.)
if not os.environ.get("JPY_PARENT_PID"):
return None
try:
from google.colab import output # noqa
from google.colab._message import MessageError # noqa
from IPython import display # noqa
# must have cookie to same-origin: None for this one to work
display.display(
display.Javascript(
"""
window._ApiKey = new Promise((resolve, reject) => {
const timeout = setTimeout(() => reject("Failed authenticating existing browser session"), 5000)
fetch("%s/api/auth.login", {
method: 'GET',
credentials: 'include'
})
.then((response) => resolve(response.json()))
.then((json) => {
clearTimeout(timeout);
}).catch((err) => {
clearTimeout(timeout);
reject(err);
});
});
"""
% webserver.rstrip("/")
)
)
response = output.eval_js("_ApiKey")
if not response:
return None
result_code = response.get("meta", {}).get("result_code")
token = response.get("data", {}).get("token")
except: # noqa
return None
if result_code != 200:
raise ValueError("Automatic authenticating failed, please login to {} and try again".format(webserver))
return token
def __str__(self) -> str:
return "{self.__class__.__name__}[{self.host}, {self.access_key}/{secret_key}]".format(
self=self, secret_key=self.secret_key[:5] + "*" * (len(self.secret_key) - 5)
)
class _LocalLogger:
def __init__(self, local_logger: logging.Logger) -> None:
self.logger = local_logger
def __call__(self) -> logging.Logger:
if not self.logger:
self.logger = get_logger()
return self.logger
def browser_login(clearml_server: Optional[str] = None) -> ():
"""
Alternative authentication / login method, (instead of configuring ~/clearml.conf or Environment variables)
** Only applicable when running inside a browser session,
for example Google Colab, Kaggle notebook, Jupyter Notebooks etc. **
Notice: If called inside a python script, or when running with an agent, this function is ignored
:param clearml_server: Optional, set the clearml server address, default: https://app.clear.ml
"""
# check if we are running inside a Jupyter notebook of a sort
if not os.environ.get("JPY_PARENT_PID"):
return
# if we are running remotely or in offline mode, skip login
from clearml.config import running_remotely
# noinspection PyProtectedMember
if running_remotely():
return
# if we have working local configuration, nothing to do
try:
Session()
# make sure we set environment variables to point to our api/app/files hosts
ENV_WEB_HOST.set(Session.get_app_server_host())
ENV_HOST.set(Session.get_api_server_host())
ENV_FILES_HOST.set(Session.get_files_server_host())
return
except: # noqa
pass
# conform clearml_server address
if clearml_server:
if not clearml_server.lower().startswith("http"):
clearml_server = "http://{}".format(clearml_server)
parsed = urlparse(clearml_server)
if parsed.port:
parsed = parsed._replace(netloc=parsed.netloc.replace(":%d" % parsed.port, ":8008", 1))
if parsed.netloc.startswith("demoapp."):
parsed = parsed._replace(netloc=parsed.netloc.replace("demoapp.", "demoapi.", 1))
elif parsed.netloc.startswith("app."):
parsed = parsed._replace(netloc=parsed.netloc.replace("app.", "api.", 1))
elif parsed.netloc.startswith("api."):
pass
else:
parsed = parsed._replace(netloc="api." + parsed.netloc)
clearml_server = urlunparse(parsed)
# set for later usage
ENV_HOST.set(clearml_server)
token = None
counter = 0
clearml_app_server = Session.get_app_server_host()
while not token:
# try to get authentication toke
try:
# noinspection PyProtectedMember
token = Session._Session__get_browser_token(clearml_app_server)
except ValueError:
token = None
except Exception: # noqa
token = None
# if we could not get a token, instruct the user to login
if not token:
if not counter:
print(
"ClearML automatic browser login failed, please login or create a new account\n"
"To get started with ClearML: setup your own `clearml-server`, "
"or create a free account at {}\n".format(clearml_app_server)
)
print(
"Please login to {} , then press [Enter] to connect ".format(clearml_app_server),
end="",
)
input()
elif counter < 1:
print(
"Oh no we failed to connect \N{worried face}, "
"try to logout and login again - Press [Enter] to retry ",
end="",
)
input()
else:
print(
"\n"
"We cannot connect automatically (adblocker / incognito?) \N{worried face} \n"
"Please go to {}/settings/workspace-configuration \n"
"Then press \x1B[1m\x1B[48;2;26;30;44m\x1B[37m + Create new credentials \x1b[0m \n"
"And copy/paste your \x1B[1m\x1B[4mAccess Key\x1b[0m here: ".format(clearml_app_server.lstrip("/")),
end="",
)
creds = input()
if creds:
print(" Setting access key ")
ENV_ACCESS_KEY.set(creds.strip())
print("Now copy/paste your \x1B[1m\x1B[4mSecret Key\x1b[0m here: ", end="")
creds = input()
if creds:
print(" Setting secret key ")
ENV_SECRET_KEY.set(creds.strip())
if ENV_ACCESS_KEY.get() and ENV_SECRET_KEY.get():
# store in conf file for persistence in runtime
# noinspection PyBroadException
try:
with open(get_config_file(), "wt") as f:
f.write(
"api.credentials.access_key={}\napi.credentials.secret_key={}\n".format(
ENV_ACCESS_KEY.get(), ENV_SECRET_KEY.get()
)
)
except Exception:
pass
break
counter += 1
print("")
if counter:
# these emojis actually requires python 3.6+
# print("\nHurrah! \N{face with party horn and party hat} \N{confetti ball} \N{party popper}")
print("\nHurrah! \U0001f973 \U0001f38a \U0001f389")
if token:
# set Token
ENV_AUTH_TOKEN.set(token)
if token or (ENV_ACCESS_KEY.get() and ENV_SECRET_KEY.get()):
# make sure we set environment variables to point to our api/app/files hosts
ENV_WEB_HOST.set(Session.get_app_server_host())
ENV_HOST.set(Session.get_api_server_host())
ENV_FILES_HOST.set(Session.get_files_server_host())
# verify token
Session()
# success
print("\N{robot face} ClearML connected successfully - let's build something! \N{rocket}")
|
Session
|
python
|
realpython__materials
|
geoshops/nearbyshops/migrations/0001_initial.py
|
{
"start": 135,
"end": 837
}
|
class ____(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Shop',
fields=[
('id', models.AutoField(
auto_created=True, primary_key=True, serialize=False,
verbose_name='ID')),
('name', models.CharField(max_length=100)),
(
'location',
django.contrib.gis.db.models.fields.PointField(srid=4326)
),
('address', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
],
),
]
|
Migration
|
python
|
getsentry__sentry
|
src/sentry/integrations/api/serializers/models/integration.py
|
{
"start": 7552,
"end": 8410
}
|
class ____(Serializer):
def serialize(
self,
obj: IntegrationProvider,
attrs: Mapping[str, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> IntegrationProviderResponse:
org_slug = kwargs.pop("organization").slug
metadata: Any = obj.metadata
metadata = metadata and metadata.asdict() or None
return {
"key": obj.key,
"slug": obj.key,
"name": obj.name,
"metadata": metadata,
"canAdd": obj.can_add,
"canDisable": obj.can_disable,
"features": [f.value for f in obj.features],
"setupDialog": dict(
url=f"/organizations/{org_slug}/integrations/{obj.key}/setup/",
**obj.setup_dialog_config,
),
}
|
IntegrationProviderSerializer
|
python
|
crytic__slither
|
slither/detectors/compiler_bugs/enum_conversion.py
|
{
"start": 1047,
"end": 2886
}
|
class ____(AbstractDetector):
"""
Detect dangerous conversion to enum
"""
ARGUMENT = "enum-conversion"
HELP = "Detect dangerous enum conversion"
IMPACT = DetectorClassification.MEDIUM
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#dangerous-enum-conversion"
WIKI_TITLE = "Dangerous enum conversion"
WIKI_DESCRIPTION = "Detect out-of-range `enum` conversion (`solc` < `0.4.5`)."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
pragma solidity 0.4.2;
contract Test{
enum E{a}
function bug(uint a) public returns(E){
return E(a);
}
}
```
Attackers can trigger unexpected behaviour by calling `bug(1)`."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = "Use a recent compiler version. If `solc` <`0.4.5` is required, check the `enum` conversion range."
VULNERABLE_SOLC_VERSIONS = make_solc_versions(4, 0, 4)
def _detect(self) -> List[Output]:
"""Detect dangerous conversion to enum"""
results = []
for c in self.compilation_unit.contracts:
ret = _detect_dangerous_enum_conversions(c)
for node, var in ret:
func_info: DETECTOR_INFO = [node, " has a dangerous enum conversion\n"]
# Output each node with the function info header as a separate result.
variable_info: DETECTOR_INFO = [
"\t- Variable: ",
var,
f" of type: {str(var.type)}\n",
]
node_info: DETECTOR_INFO = ["\t- Enum conversion: ", node, "\n"]
json = self.generate_result(func_info + variable_info + node_info)
results.append(json)
return results
|
EnumConversion
|
python
|
doocs__leetcode
|
solution/1100-1199/1155.Number of Dice Rolls With Target Sum/Solution.py
|
{
"start": 0,
"end": 414
}
|
class ____:
def numRollsToTarget(self, n: int, k: int, target: int) -> int:
f = [[0] * (target + 1) for _ in range(n + 1)]
f[0][0] = 1
mod = 10**9 + 7
for i in range(1, n + 1):
for j in range(1, min(i * k, target) + 1):
for h in range(1, min(j, k) + 1):
f[i][j] = (f[i][j] + f[i - 1][j - h]) % mod
return f[n][target]
|
Solution
|
python
|
keras-team__keras
|
keras/src/ops/numpy.py
|
{
"start": 123493,
"end": 124012
}
|
class ____(Operation):
def call(self, x):
return backend.numpy.isneginf(x)
def compute_output_spec(self, x):
return KerasTensor(x.shape, dtype="bool")
@keras_export(["keras.ops.isneginf", "keras.ops.numpy.isneginf"])
def isneginf(x):
"""Test element-wise for negative infinity.
Args:
x: Input tensor.
Returns:
Output boolean tensor.
"""
if any_symbolic_tensors((x,)):
return Isneginf().symbolic_call(x)
return backend.numpy.isneginf(x)
|
Isneginf
|
python
|
falconry__falcon
|
falcon/util/time.py
|
{
"start": 359,
"end": 1797
}
|
class ____(datetime.tzinfo):
"""GMT timezone class implementing the :class:`datetime.tzinfo` interface.
.. deprecated:: 4.0
:class:`TimezoneGMT` is deprecated, use :attr:`datetime.timezone.utc`
instead. (This class will be removed in Falcon 5.0.)
"""
GMT_ZERO = datetime.timedelta(hours=0)
@deprecated(
'TimezoneGMT is deprecated, use datetime.timezone.utc instead. '
'(TimezoneGMT will be removed in Falcon 5.0.)'
)
def __init__(self) -> None:
super().__init__()
def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta:
"""Get the offset from UTC.
Args:
dt(datetime.datetime): Ignored
Returns:
datetime.timedelta: GMT offset, which is equivalent to UTC and
so is always 0.
"""
return self.GMT_ZERO
def tzname(self, dt: datetime.datetime | None) -> str:
"""Get the name of this timezone.
Args:
dt(datetime.datetime): Ignored
Returns:
str: "GMT"
"""
return 'GMT'
def dst(self, dt: datetime.datetime | None) -> datetime.timedelta:
"""Return the daylight saving time (DST) adjustment.
Args:
dt(datetime.datetime): Ignored
Returns:
datetime.timedelta: DST adjustment for GMT, which is always 0.
"""
return self.GMT_ZERO
|
TimezoneGMT
|
python
|
google__pytype
|
pytype/compare_test.py
|
{
"start": 9004,
"end": 14257
}
|
class ____(CompareTestBase):
def setUp(self):
super().setUp()
self._d = abstract.Dict(self._ctx)
self._var = self._program.NewVariable()
self._var.AddBinding(abstract.Unknown(self._ctx), [], self._node)
def test_compatible_with__when_empty(self):
self.assertFalsy(self._d)
def test_compatible_with__after_setitem(self):
# Once a slot is added, dict is ambiguous.
self._d.setitem_slot(self._node, self._var, self._var)
self.assertAmbiguous(self._d)
def test_compatible_with__after_set_str_item(self):
self._d.set_str_item(self._node, "key", self._var)
self.assertTruthy(self._d)
def test_compatible_with__after_unknown_update(self):
# Updating an empty dict with an unknown value makes the former ambiguous.
self._d.update(self._node, abstract.Unknown(self._ctx))
self.assertAmbiguous(self._d)
def test_compatible_with__after_empty_update(self):
empty_dict = abstract.Dict(self._ctx)
self._d.update(self._node, empty_dict)
self.assertFalsy(self._d)
def test_compatible_with__after_unambiguous_update(self):
unambiguous_dict = abstract.Dict(self._ctx)
unambiguous_dict.set_str_item(
self._node, "a", self._ctx.new_unsolvable(self._node)
)
self._d.update(self._node, unambiguous_dict)
self.assertTruthy(self._d)
def test_compatible_with__after_ambiguous_update(self):
ambiguous_dict = abstract.Dict(self._ctx)
ambiguous_dict.merge_instance_type_parameter(
self._node, abstract_utils.K, self._ctx.new_unsolvable(self._node)
)
ambiguous_dict.is_concrete = False
self._d.update(self._node, ambiguous_dict)
self.assertAmbiguous(self._d)
def test_compatible_with__after_concrete_update(self):
self._d.update(self._node, {})
self.assertFalsy(self._d)
self._d.update(self._node, {"a": self._ctx.new_unsolvable(self._node)})
self.assertTruthy(self._d)
def test_pop(self):
self._d.set_str_item(self._node, "a", self._var)
node, ret = self._d.pop_slot(
self._node, self._convert.build_string(self._node, "a")
)
self.assertFalsy(self._d)
self.assertIs(node, self._node)
self.assertIs(ret, self._var)
def test_pop_with_default(self):
self._d.set_str_item(self._node, "a", self._var)
node, ret = self._d.pop_slot(
self._node,
self._convert.build_string(self._node, "a"),
self._convert.none.to_variable(self._node),
) # default is ignored
self.assertFalsy(self._d)
self.assertIs(node, self._node)
self.assertIs(ret, self._var)
def test_bad_pop(self):
self._d.set_str_item(self._node, "a", self._var)
self.assertRaises(
error_types.DictKeyMissing,
self._d.pop_slot,
self._node,
self._convert.build_string(self._node, "b"),
)
self.assertTruthy(self._d)
def test_bad_pop_with_default(self):
val = self._convert.primitive_instances[int]
self._d.set_str_item(self._node, "a", val.to_variable(self._node))
node, ret = self._d.pop_slot(
self._node,
self._convert.build_string(self._node, "b"),
self._convert.none.to_variable(self._node),
)
self.assertTruthy(self._d)
self.assertIs(node, self._node)
self.assertListEqual(ret.data, [self._convert.none])
def test_ambiguous_pop(self):
val = self._convert.primitive_instances[int]
self._d.set_str_item(self._node, "a", val.to_variable(self._node))
ambiguous_key = self._convert.primitive_instances[str]
node, ret = self._d.pop_slot(
self._node, ambiguous_key.to_variable(self._node)
)
self.assertAmbiguous(self._d)
self.assertIs(node, self._node)
self.assertListEqual(ret.data, [val])
def test_ambiguous_pop_with_default(self):
val = self._convert.primitive_instances[int]
self._d.set_str_item(self._node, "a", val.to_variable(self._node))
ambiguous_key = self._convert.primitive_instances[str]
default_var = self._convert.none.to_variable(self._node)
node, ret = self._d.pop_slot(
self._node, ambiguous_key.to_variable(self._node), default_var
)
self.assertAmbiguous(self._d)
self.assertIs(node, self._node)
self.assertSetEqual(set(ret.data), {val, self._convert.none})
def test_ambiguous_dict_after_pop(self):
ambiguous_key = self._convert.primitive_instances[str]
val = self._convert.primitive_instances[int]
node, _ = self._d.setitem_slot(
self._node,
ambiguous_key.to_variable(self._node),
val.to_variable(self._node),
)
_, ret = self._d.pop_slot(node, self._convert.build_string(node, "a"))
self.assertAmbiguous(self._d)
self.assertListEqual(ret.data, [val])
def test_ambiguous_dict_after_pop_with_default(self):
ambiguous_key = self._convert.primitive_instances[str]
val = self._convert.primitive_instances[int]
node, _ = self._d.setitem_slot(
self._node,
ambiguous_key.to_variable(self._node),
val.to_variable(self._node),
)
_, ret = self._d.pop_slot(
node,
self._convert.build_string(node, "a"),
self._convert.none.to_variable(node),
)
self.assertAmbiguous(self._d)
self.assertSetEqual(set(ret.data), {val, self._convert.none})
|
DictTest
|
python
|
encode__starlette
|
starlette/middleware/httpsredirect.py
|
{
"start": 150,
"end": 848
}
|
class ____:
def __init__(self, app: ASGIApp) -> None:
self.app = app
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
if scope["type"] in ("http", "websocket") and scope["scheme"] in ("http", "ws"):
url = URL(scope=scope)
redirect_scheme = {"http": "https", "ws": "wss"}[url.scheme]
netloc = url.hostname if url.port in (80, 443) else url.netloc
url = url.replace(scheme=redirect_scheme, netloc=netloc)
response = RedirectResponse(url, status_code=307)
await response(scope, receive, send)
else:
await self.app(scope, receive, send)
|
HTTPSRedirectMiddleware
|
python
|
pytorch__pytorch
|
torch/distributions/constraints.py
|
{
"start": 19758,
"end": 21629
}
|
class ____(Constraint):
"""
Constraint functor that applies a sequence of constraints
`cseq` at the submatrices at dimension `dim`,
in a way compatible with :func:`torch.stack`.
"""
def __init__(self, cseq, dim=0):
assert all(isinstance(c, Constraint) for c in cseq)
self.cseq = list(cseq)
self.dim = dim
super().__init__()
@property
def is_discrete(self) -> bool: # type: ignore[override]
return any(c.is_discrete for c in self.cseq)
@property
def event_dim(self) -> int: # type: ignore[override]
dim = max(c.event_dim for c in self.cseq)
if self.dim + dim < 0:
dim += 1
return dim
def check(self, value):
assert -value.dim() <= self.dim < value.dim()
vs = [value.select(self.dim, i) for i in range(value.size(self.dim))]
return torch.stack(
[constr.check(v) for v, constr in zip(vs, self.cseq)], self.dim
)
# Public interface.
dependent = _Dependent()
dependent_property = _DependentProperty
independent = _IndependentConstraint
boolean = _Boolean()
one_hot = _OneHot()
nonnegative_integer = _IntegerGreaterThan(0)
positive_integer = _IntegerGreaterThan(1)
integer_interval = _IntegerInterval
real = _Real()
real_vector = independent(real, 1)
positive = _GreaterThan(0.0)
nonnegative = _GreaterThanEq(0.0)
greater_than = _GreaterThan
greater_than_eq = _GreaterThanEq
less_than = _LessThan
multinomial = _Multinomial
unit_interval = _Interval(0.0, 1.0)
interval = _Interval
half_open_interval = _HalfOpenInterval
simplex = _Simplex()
lower_triangular = _LowerTriangular()
lower_cholesky = _LowerCholesky()
corr_cholesky = _CorrCholesky()
square = _Square()
symmetric = _Symmetric()
positive_semidefinite = _PositiveSemidefinite()
positive_definite = _PositiveDefinite()
cat = _Cat
stack = _Stack
|
_Stack
|
python
|
doocs__leetcode
|
solution/1700-1799/1765.Map of Highest Peak/Solution.py
|
{
"start": 0,
"end": 676
}
|
class ____:
def highestPeak(self, isWater: List[List[int]]) -> List[List[int]]:
m, n = len(isWater), len(isWater[0])
ans = [[-1] * n for _ in range(m)]
q = deque()
for i, row in enumerate(isWater):
for j, v in enumerate(row):
if v:
q.append((i, j))
ans[i][j] = 0
while q:
i, j = q.popleft()
for a, b in pairwise((-1, 0, 1, 0, -1)):
x, y = i + a, j + b
if 0 <= x < m and 0 <= y < n and ans[x][y] == -1:
ans[x][y] = ans[i][j] + 1
q.append((x, y))
return ans
|
Solution
|
python
|
wandb__wandb
|
tests/unit_tests/test_retry.py
|
{
"start": 3315,
"end": 4252
}
|
class ____:
def test_reraises_exc_failing_predicate(self):
wrapped = mock.Mock(spec=retry.Backoff)
filtered = retry.FilteredBackoff(
filter=lambda e: False,
wrapped=wrapped,
)
with pytest.raises(MyError):
filtered.next_sleep_or_reraise(MyError("don't retry me"))
wrapped.next_sleep_or_reraise.assert_not_called()
def test_delegates_exc_passing_predicate(self):
retriable_exc = MyError("retry me")
wrapped = mock.Mock(
spec=retry.Backoff,
next_sleep_or_reraise=mock.Mock(return_value=123 * SECOND),
)
filtered = retry.FilteredBackoff(
filter=lambda e: e == retriable_exc,
wrapped=wrapped,
)
assert filtered.next_sleep_or_reraise(retriable_exc) == 123 * SECOND
wrapped.next_sleep_or_reraise.assert_called_once_with(retriable_exc)
|
TestFilteredBackoff
|
python
|
pandas-dev__pandas
|
asv_bench/benchmarks/sparse.py
|
{
"start": 1844,
"end": 2626
}
|
class ____:
params = [True, False]
param_names = ["sort_labels"]
def setup(self, sort_labels):
s = Series([np.nan] * 10000)
s[0] = 3.0
s[100] = -1.0
s[999] = 12.1
s_mult_lvl = s.set_axis(MultiIndex.from_product([range(10)] * 4))
self.ss_mult_lvl = s_mult_lvl.astype("Sparse")
s_two_lvl = s.set_axis(MultiIndex.from_product([range(100)] * 2))
self.ss_two_lvl = s_two_lvl.astype("Sparse")
def time_sparse_series_to_coo(self, sort_labels):
self.ss_mult_lvl.sparse.to_coo(
row_levels=[0, 1], column_levels=[2, 3], sort_labels=sort_labels
)
def time_sparse_series_to_coo_single_level(self, sort_labels):
self.ss_two_lvl.sparse.to_coo(sort_labels=sort_labels)
|
ToCoo
|
python
|
readthedocs__readthedocs.org
|
readthedocs/redirects/migrations/0001_initial.py
|
{
"start": 100,
"end": 3449
}
|
class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0002_add_importedfile_model"),
]
operations = [
migrations.CreateModel(
name="Redirect",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"redirect_type",
models.CharField(
help_text="The type of redirect you wish to use.",
max_length=255,
verbose_name="Redirect Type",
choices=[
(b"prefix", "Prefix Redirect"),
(b"page", "Page Redirect"),
(b"exact", "Exact Redirect"),
(b"sphinx_html", "Sphinx HTMLDir -> HTML"),
(b"sphinx_htmldir", "Sphinx HTML -> HTMLDir"),
],
),
),
(
"from_url",
models.CharField(
help_text="Absolute path, excluding the domain. Example: <b>/docs/</b> or <b>/install.html</b>",
max_length=255,
verbose_name="From URL",
db_index=True,
blank=True,
),
),
(
"to_url",
models.CharField(
help_text="Absolute or relative url. Examples: <b>/tutorial/install.html</b>",
max_length=255,
verbose_name="To URL",
db_index=True,
blank=True,
),
),
(
"http_status",
models.SmallIntegerField(
default=301,
verbose_name="HTTP Status",
choices=[
(301, "301 - Permanent Redirect"),
(302, "302 - Temporary Redirect"),
],
),
),
(
"status",
models.BooleanField(
default=True, choices=[(True, "Active"), (False, "Inactive")]
),
),
("create_dt", models.DateTimeField(auto_now_add=True)),
("update_dt", models.DateTimeField(auto_now=True)),
(
"project",
models.ForeignKey(
related_name="redirects",
verbose_name="Project",
to="projects.Project",
on_delete=models.CASCADE,
),
),
],
options={
"ordering": ("-update_dt",),
"verbose_name": "redirect",
"verbose_name_plural": "redirects",
},
),
]
|
Migration
|
python
|
lepture__authlib
|
authlib/integrations/flask_client/integration.py
|
{
"start": 217,
"end": 806
}
|
class ____(FrameworkIntegration):
def update_token(self, token, refresh_token=None, access_token=None):
token_update.send(
current_app,
name=self.name,
token=token,
refresh_token=refresh_token,
access_token=access_token,
)
@staticmethod
def load_config(oauth, name, params):
rv = {}
for k in params:
conf_key = f"{name}_{k}".upper()
v = oauth.app.config.get(conf_key, None)
if v is not None:
rv[k] = v
return rv
|
FlaskIntegration
|
python
|
sqlalchemy__sqlalchemy
|
test/sql/test_resultset.py
|
{
"start": 104187,
"end": 120381
}
|
class ____(fixtures.TablesTest):
__requires__ = ("sqlite",)
@classmethod
def setup_bind(cls):
cls.engine = engine = engines.testing_engine(
"sqlite://", options={"scope": "class"}
)
return engine
@classmethod
def define_tables(cls, metadata):
Table(
"test",
metadata,
Column("x", Integer, primary_key=True),
Column("y", String(50)),
)
@classmethod
def insert_data(cls, connection):
connection.execute(
cls.tables.test.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(1, 12)],
)
@contextmanager
def _proxy_fixture(self, cls):
self.table = self.tables.test
class ExcCtx(default.DefaultExecutionContext):
def post_exec(self):
if cls is _cursor.CursorFetchStrategy:
pass
elif cls is _cursor.BufferedRowCursorFetchStrategy:
self.cursor_fetch_strategy = cls(
self.cursor, self.execution_options
)
elif cls is _cursor.FullyBufferedCursorFetchStrategy:
self.cursor_fetch_strategy = cls(
self.cursor,
self.cursor.description,
self.cursor.fetchall(),
)
else:
assert False
self.patcher = patch.object(
self.engine.dialect, "execution_ctx_cls", ExcCtx
)
with self.patcher:
yield
def _test_proxy(self, cls):
with self._proxy_fixture(cls):
rows = []
with self.engine.connect() as conn:
r = conn.execute(select(self.table))
assert isinstance(r.cursor_strategy, cls)
for i in range(5):
rows.append(r.fetchone())
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
rows = r.fetchmany(3)
eq_(rows, [(i, "t_%d" % i) for i in range(6, 9)])
rows = r.fetchall()
eq_(rows, [(i, "t_%d" % i) for i in range(9, 12)])
r = conn.execute(select(self.table))
rows = r.fetchmany(None)
eq_(rows[0], (1, "t_1"))
# number of rows here could be one, or the whole thing
assert len(rows) == 1 or len(rows) == 11
r = conn.execute(select(self.table).limit(1))
r.fetchone()
eq_(r.fetchone(), None)
r = conn.execute(select(self.table).limit(5))
rows = r.fetchmany(6)
eq_(rows, [(i, "t_%d" % i) for i in range(1, 6)])
# result keeps going just fine with blank results...
eq_(r.fetchmany(2), [])
eq_(r.fetchmany(2), [])
eq_(r.fetchall(), [])
eq_(r.fetchone(), None)
# until we close
r.close()
self._assert_result_closed(r)
r = conn.execute(select(self.table).limit(5))
eq_(r.first(), (1, "t_1"))
self._assert_result_closed(r)
r = conn.execute(select(self.table).limit(5))
eq_(r.scalar(), 1)
self._assert_result_closed(r)
def _assert_result_closed(self, r):
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchone
)
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchmany, 2
)
assert_raises_message(
sa_exc.ResourceClosedError, "object is closed", r.fetchall
)
def test_basic_plain(self):
self._test_proxy(_cursor.CursorFetchStrategy)
def test_basic_buffered_row_result_proxy(self):
self._test_proxy(_cursor.BufferedRowCursorFetchStrategy)
def test_basic_fully_buffered_result_proxy(self):
self._test_proxy(_cursor.FullyBufferedCursorFetchStrategy)
def test_basic_buffered_column_result_proxy(self):
self._test_proxy(_cursor.CursorFetchStrategy)
def test_resultprocessor_plain(self):
self._test_result_processor(_cursor.CursorFetchStrategy, False)
def test_resultprocessor_plain_cached(self):
self._test_result_processor(_cursor.CursorFetchStrategy, True)
def test_resultprocessor_buffered_row(self):
self._test_result_processor(
_cursor.BufferedRowCursorFetchStrategy, False
)
def test_resultprocessor_buffered_row_cached(self):
self._test_result_processor(
_cursor.BufferedRowCursorFetchStrategy, True
)
def test_resultprocessor_fully_buffered(self):
self._test_result_processor(
_cursor.FullyBufferedCursorFetchStrategy, False
)
def test_resultprocessor_fully_buffered_cached(self):
self._test_result_processor(
_cursor.FullyBufferedCursorFetchStrategy, True
)
def _test_result_processor(self, cls, use_cache):
class MyType(TypeDecorator):
impl = String()
cache_ok = True
def process_result_value(self, value, dialect):
return "HI " + value
with self._proxy_fixture(cls):
with self.engine.connect() as conn:
if use_cache:
cache = {}
conn = conn.execution_options(compiled_cache=cache)
stmt = select(literal("THERE", type_=MyType()))
for i in range(2):
r = conn.execute(stmt)
eq_(r.scalar(), "HI THERE")
@testing.fixture
def row_growth_fixture(self):
with self._proxy_fixture(_cursor.BufferedRowCursorFetchStrategy):
with self.engine.begin() as conn:
conn.execute(
self.table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 3000)],
)
yield conn
@testing.combinations(
("no option", None, {0: 5, 1: 25, 9: 125, 135: 625, 274: 1000}),
("lt 1000", 27, {0: 5, 16: 27, 70: 27, 150: 27, 250: 27}),
(
"gt 1000",
1500,
{0: 5, 1: 25, 9: 125, 135: 625, 274: 1500, 1351: 1500},
),
(
"gt 1500",
2000,
{0: 5, 1: 25, 9: 125, 135: 625, 274: 2000, 1351: 2000},
),
id_="iaa",
argnames="max_row_buffer,checks",
)
def test_buffered_row_growth(
self, row_growth_fixture, max_row_buffer, checks
):
if max_row_buffer:
result = row_growth_fixture.execution_options(
max_row_buffer=max_row_buffer
).execute(self.table.select())
else:
result = row_growth_fixture.execute(self.table.select())
assertion = {}
max_size = max(checks.values())
for idx, row in enumerate(result, 0):
if idx in checks:
assertion[idx] = result.cursor_strategy._bufsize
le_(len(result.cursor_strategy._rowbuffer), max_size)
def test_buffered_fetchmany_fixed(self, row_growth_fixture):
"""The BufferedRow cursor strategy will defer to the fetchmany
size passed when given rather than using the buffer growth
heuristic.
"""
result = row_growth_fixture.execute(self.table.select())
eq_(len(result.cursor_strategy._rowbuffer), 1)
rows = result.fetchmany(300)
eq_(len(rows), 300)
eq_(len(result.cursor_strategy._rowbuffer), 0)
rows = result.fetchmany(300)
eq_(len(rows), 300)
eq_(len(result.cursor_strategy._rowbuffer), 0)
bufsize = result.cursor_strategy._bufsize
result.fetchone()
# the fetchone() caused it to buffer a full set of rows
eq_(len(result.cursor_strategy._rowbuffer), bufsize - 1)
# assert partitions uses fetchmany(), therefore controlling
# how the buffer is used
lens = []
for partition in result.partitions(180):
lens.append(len(partition))
eq_(len(result.cursor_strategy._rowbuffer), 0)
for lp in lens[0:-1]:
eq_(lp, 180)
def test_buffered_fetchmany_yield_per(self, connection):
table = self.tables.test
connection.execute(
table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 3000)],
)
result = connection.execute(table.select())
assert isinstance(result.cursor_strategy, _cursor.CursorFetchStrategy)
result.fetchmany(5)
result = result.yield_per(100)
assert isinstance(
result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy
)
eq_(result.cursor_strategy._bufsize, 100)
eq_(result.cursor_strategy._growth_factor, 0)
eq_(len(result.cursor_strategy._rowbuffer), 0)
result.fetchone()
eq_(len(result.cursor_strategy._rowbuffer), 99)
for i, row in enumerate(result):
if i == 188:
break
# buffer of 98, plus buffer of 99 - 89, 10 rows
eq_(len(result.cursor_strategy._rowbuffer), 10)
for i, row in enumerate(result):
if i == 206:
break
eq_(i, 206)
def test_iterator_remains_unbroken(self, connection):
"""test related to #8710.
demonstrate that we can't close the cursor by catching
GeneratorExit inside of our iteration. Leaving the iterable
block using break, then picking up again, would be directly
impacted by this. So this provides a clear rationale for
providing context manager support for result objects.
"""
table = self.tables.test
connection.execute(
table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 250)],
)
result = connection.execute(table.select())
result = result.yield_per(100)
for i, row in enumerate(result):
if i == 188:
# this will raise GeneratorExit inside the iterator.
# so we can't close the DBAPI cursor here, we have plenty
# more rows to yield
break
eq_(i, 188)
# demonstrate getting more rows
for i, row in enumerate(result, 188):
if i == 206:
break
eq_(i, 206)
@testing.combinations(True, False, argnames="close_on_init")
@testing.combinations(
"fetchone", "fetchmany", "fetchall", argnames="fetch_style"
)
def test_buffered_fetch_auto_soft_close(
self, connection, close_on_init, fetch_style
):
"""test #7274"""
table = self.tables.test
connection.execute(
table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 30)],
)
result = connection.execute(table.select().limit(15))
assert isinstance(result.cursor_strategy, _cursor.CursorFetchStrategy)
if close_on_init:
# close_on_init - the initial buffering will exhaust the cursor,
# should soft close immediately
result = result.yield_per(30)
else:
# not close_on_init - soft close will occur after fetching an
# empty buffer
result = result.yield_per(5)
assert isinstance(
result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy
)
with mock.patch.object(result, "_soft_close") as soft_close:
if fetch_style == "fetchone":
while True:
row = result.fetchone()
if row:
eq_(soft_close.mock_calls, [])
else:
# fetchone() is also used by first(), scalar()
# and one() which want to embed a hard close in one
# step
eq_(soft_close.mock_calls, [mock.call(hard=False)])
break
elif fetch_style == "fetchmany":
while True:
rows = result.fetchmany(5)
if rows:
eq_(soft_close.mock_calls, [])
else:
eq_(soft_close.mock_calls, [mock.call()])
break
elif fetch_style == "fetchall":
rows = result.fetchall()
eq_(soft_close.mock_calls, [mock.call()])
else:
assert False
result.close()
def test_buffered_fetchmany_yield_per_all(self, connection):
table = self.tables.test
connection.execute(
table.insert(),
[{"x": i, "y": "t_%d" % i} for i in range(15, 500)],
)
result = connection.execute(table.select())
assert isinstance(result.cursor_strategy, _cursor.CursorFetchStrategy)
result.fetchmany(5)
result = result.yield_per(0)
assert isinstance(
result.cursor_strategy, _cursor.BufferedRowCursorFetchStrategy
)
eq_(result.cursor_strategy._bufsize, 0)
eq_(result.cursor_strategy._growth_factor, 0)
eq_(len(result.cursor_strategy._rowbuffer), 0)
result.fetchone()
eq_(len(result.cursor_strategy._rowbuffer), 490)
for i, row in enumerate(result):
if i == 188:
break
eq_(len(result.cursor_strategy._rowbuffer), 301)
# already buffered, so this doesn't change things
result.yield_per(10)
result.fetchmany(5)
eq_(len(result.cursor_strategy._rowbuffer), 296)
self._test_result_processor(
_cursor.BufferedRowCursorFetchStrategy, False
)
@testing.combinations(
_cursor.CursorFetchStrategy,
_cursor.BufferedRowCursorFetchStrategy,
# does not handle error in fetch
# _cursor.FullyBufferedCursorFetchStrategy,
argnames="strategy_cls",
)
@testing.combinations(
"fetchone",
"fetchmany",
"fetchmany_w_num",
"fetchall",
argnames="method_name",
)
def test_handle_error_in_fetch(self, strategy_cls, method_name):
class cursor:
def raise_(self):
raise OSError("random non-DBAPI error during cursor operation")
def fetchone(self):
self.raise_()
def fetchmany(self, num=None):
self.raise_()
def fetchall(self):
self.raise_()
def close(self):
self.raise_()
with self._proxy_fixture(strategy_cls):
with self.engine.connect() as conn:
r = conn.execute(select(self.table))
assert isinstance(r.cursor_strategy, strategy_cls)
with mock.patch.object(r, "cursor", cursor()):
with testing.expect_raises_message(
IOError, "random non-DBAPI"
):
if method_name == "fetchmany_w_num":
r.fetchmany(10)
else:
getattr(r, method_name)()
getattr(r, method_name)()
r.close()
def test_buffered_row_close_error_during_fetchone(self):
def raise_(**kw):
raise OSError("random non-DBAPI error during cursor operation")
with self._proxy_fixture(_cursor.BufferedRowCursorFetchStrategy):
with self.engine.connect() as conn:
r = conn.execute(select(self.table).limit(1))
r.fetchone()
with (
mock.patch.object(r, "_soft_close", raise_),
testing.expect_raises_message(IOError, "random non-DBAPI"),
):
r.first()
r.close()
|
AlternateCursorResultTest
|
python
|
realpython__materials
|
python-maze-solver/source_code_final/src/maze_solver/graphs/converter.py
|
{
"start": 276,
"end": 2459
}
|
class ____(NamedTuple):
node1: Node
node2: Node
@property
def flip(self) -> "Edge":
return Edge(self.node2, self.node1)
@property
def distance(self) -> float:
return math.dist(
(self.node1.row, self.node1.column),
(self.node2.row, self.node2.column),
)
def weight(self, bonus=1, penalty=2) -> float:
match self.node2.role:
case Role.REWARD:
return self.distance - bonus
case Role.ENEMY:
return self.distance + penalty
case _:
return self.distance
def make_graph(maze: Maze) -> nx.DiGraph:
return nx.DiGraph(
(edge.node1, edge.node2, {"weight": edge.weight()})
for edge in get_directed_edges(maze, get_nodes(maze))
)
def get_directed_edges(maze: Maze, nodes: set[Node]) -> set[Edge]:
return (edges := get_edges(maze, nodes)) | {edge.flip for edge in edges}
def get_nodes(maze: Maze) -> set[Node]:
nodes: set[Node] = set()
for square in maze:
if square.role in (Role.EXTERIOR, Role.WALL):
continue
if square.role is not Role.NONE:
nodes.add(square)
if (
square.border.intersection
or square.border.dead_end
or square.border.corner
):
nodes.add(square)
return nodes
def get_edges(maze: Maze, nodes: set[Node]) -> set[Edge]:
edges: set[Edge] = set()
for source_node in nodes:
# Follow right:
node = source_node
for x in range(node.column + 1, maze.width):
if node.border & Border.RIGHT:
break
node = maze.squares[node.row * maze.width + x]
if node in nodes:
edges.add(Edge(source_node, node))
break
# Follow down:
node = source_node
for y in range(node.row + 1, maze.height):
if node.border & Border.BOTTOM:
break
node = maze.squares[y * maze.width + node.column]
if node in nodes:
edges.add(Edge(source_node, node))
break
return edges
|
Edge
|
python
|
pandas-dev__pandas
|
pandas/tests/api/test_api.py
|
{
"start": 450,
"end": 937
}
|
class ____:
def check(self, namespace, expected, ignored=None):
# see which names are in the namespace, minus optional
# ignored ones
# compare vs the expected
result = sorted(
f for f in dir(namespace) if not f.startswith("__") and f != "annotations"
)
if ignored is not None:
result = sorted(set(result) - set(ignored))
expected = sorted(expected)
tm.assert_almost_equal(result, expected)
|
Base
|
python
|
dagster-io__dagster
|
python_modules/dagster/dagster/_core/storage/file_manager.py
|
{
"start": 1507,
"end": 1965
}
|
class ____(FileHandle):
"""A reference to a file on a local filesystem."""
def __init__(self, path: str):
self._path = check.str_param(path, "path")
@public
@property
def path(self) -> str:
"""The file's path."""
return self._path
@public
@property
def path_desc(self) -> str:
"""A representation of the file path for display purposes only."""
return self._path
@public
|
LocalFileHandle
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.