language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
pytorch__pytorch
|
test/inductor/test_ordered_set.py
|
{
"start": 31852,
"end": 32262
}
|
class ____(TestBasicOps, TestCase):
def setUp(self):
super().setUp()
self.case = "triple OrderedSet"
self.values = [0, "zero", operator.add]
self.OrderedSet = OrderedSet(self.values)
self.dup = OrderedSet(self.values)
self.length = 3
self.repr = None
# ------------------------------------------------------------------------------
|
TestBasicOpsTriple
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-dbt/dagster_dbt/cloud_v2/run_handler.py
|
{
"start": 2449,
"end": 9301
}
|
class ____:
"""Represents the run results of a dbt Cloud job run."""
run_id: int
run_results: Mapping[str, Any]
@classmethod
def from_run_results_json(cls, run_results_json: Mapping[str, Any]) -> "DbtCloudJobRunResults":
return cls(
run_id=int(run_results_json["metadata"]["env"]["DBT_CLOUD_RUN_ID"]),
run_results=run_results_json,
)
def to_default_asset_events(
self,
client: DbtCloudWorkspaceClient,
manifest: Mapping[str, Any],
dagster_dbt_translator: Optional[DagsterDbtTranslator] = None,
context: Optional[AssetExecutionContext] = None,
) -> Iterator[Union[AssetCheckEvaluation, AssetCheckResult, AssetMaterialization, Output]]:
"""Convert the run results of a dbt Cloud job run to a set of corresponding Dagster events.
Args:
client (DbtCloudWorkspaceClient): The client for the dbt Cloud workspace.
manifest (Mapping[str, Any]): The dbt manifest blob.
dagster_dbt_translator (DagsterDbtTranslator): Optionally, a custom translator for
linking dbt nodes to Dagster assets.
context (Optional[AssetExecutionContext]): The execution context.
Returns:
Iterator[Union[AssetCheckEvaluation, AssetCheckResult, AssetMaterialization, Output]]:
A set of corresponding Dagster events.
In a Dagster asset definition, the following are yielded:
- Output for refables (e.g. models, seeds, snapshots.)
- AssetCheckResult for dbt tests.
For ad hoc usage, the following are yielded:
- AssetMaterialization for refables (e.g. models, seeds, snapshots.)
- AssetCheckEvaluation for dbt tests.
"""
dagster_dbt_translator = dagster_dbt_translator or DagsterDbtTranslator()
has_asset_def: bool = bool(context and context.has_assets_def)
run = DbtCloudRun.from_run_details(run_details=client.get_run_details(run_id=self.run_id))
invocation_id: str = self.run_results["metadata"]["invocation_id"]
for result in self.run_results["results"]:
unique_id: str = result["unique_id"]
dbt_resource_props: Mapping[str, Any] = manifest["nodes"].get(unique_id)
if not dbt_resource_props:
logger.warning(
f"Unique ID {unique_id} not found in manifest. "
f"This can happen if you are parsing old runs fetched via the sensor, "
f"or if your manifest is out of date. "
f"Reloading your code location will fix the latter."
)
continue
select: str = ".".join(dbt_resource_props["fqn"])
default_metadata = {
"unique_id": unique_id,
"invocation_id": invocation_id,
"execution_duration": result["execution_time"],
}
if run.url:
default_metadata["run_url"] = MetadataValue.url(run.url)
resource_type: str = dbt_resource_props["resource_type"]
result_status: str = result["status"]
materialization: str = dbt_resource_props["config"]["materialized"]
is_ephemeral = materialization == "ephemeral"
# Build the specs for the given unique ID
asset_specs, _ = build_dbt_specs(
manifest=manifest,
translator=dagster_dbt_translator,
select=select,
exclude="",
selector="",
io_manager_key=None,
project=None,
)
if (
resource_type in REFABLE_NODE_TYPES
and result_status == NodeStatus.Success
and not is_ephemeral
):
spec = asset_specs[0]
metadata = {
**default_metadata,
COMPLETED_AT_TIMESTAMP_METADATA_KEY: MetadataValue.timestamp(
get_completed_at_timestamp(result=result)
),
}
if context and has_asset_def:
yield Output(
value=None,
output_name=spec.key.to_python_identifier(),
metadata=metadata,
)
else:
yield AssetMaterialization(
asset_key=spec.key,
metadata=metadata,
)
elif resource_type == NodeType.Test:
metadata = {
**default_metadata,
"status": result_status,
COMPLETED_AT_TIMESTAMP_METADATA_KEY: MetadataValue.timestamp(
get_completed_at_timestamp(result=result)
),
}
if result["failures"] is not None:
metadata["dagster_dbt/failed_row_count"] = result["failures"]
asset_check_key = get_asset_check_key_for_test(
manifest=manifest,
dagster_dbt_translator=dagster_dbt_translator,
test_unique_id=unique_id,
project=None,
)
if (
context
and has_asset_def
and asset_check_key is not None
and asset_check_key in context.selected_asset_check_keys
):
# The test is an asset check in an asset, so yield an `AssetCheckResult`.
yield AssetCheckResult(
passed=result_status == TestStatus.Pass,
asset_key=asset_check_key.asset_key,
check_name=asset_check_key.name,
metadata=metadata,
severity=(
AssetCheckSeverity.WARN
if result_status == TestStatus.Warn
else AssetCheckSeverity.ERROR
),
)
elif not has_asset_def and asset_check_key is not None:
yield AssetCheckEvaluation(
passed=result_status == TestStatus.Pass,
asset_key=asset_check_key.asset_key,
check_name=asset_check_key.name,
metadata=metadata,
severity=(
AssetCheckSeverity.WARN
if result_status == TestStatus.Warn
else AssetCheckSeverity.ERROR
),
)
|
DbtCloudJobRunResults
|
python
|
great-expectations__great_expectations
|
great_expectations/expectations/expectation.py
|
{
"start": 85637,
"end": 97348
}
|
class ____(BatchExpectation, ABC):
"""Base class for ColumnMapExpectations.
ColumnMapExpectations are evaluated for a column and ask a yes/no question about every row in the column.
Based on the result, they then calculate the percentage of rows that gave a positive answer.
If the percentage is high enough, the Expectation considers that data valid.
ColumnMapExpectations must implement a `_validate(...)` method containing logic
for determining whether the Expectation is successfully validated.
Raises:
InvalidExpectationConfigurationError: If `column` is missing from configuration.
Args:
domain_keys (tuple): A tuple of the keys used to determine the domain of the
expectation.
success_keys (tuple): A tuple of the keys used to determine the success of
the expectation.
""" # noqa: E501 # FIXME CoP
column: StrictStr = Field(min_length=1, description=COLUMN_DESCRIPTION)
mostly: MostlyField = 1
row_condition: RowConditionType = None
condition_parser: Union[ConditionParser, None] = None
catch_exceptions: bool = True
map_metric: ClassVar[Optional[str]] = None
domain_keys: ClassVar[Tuple[str, ...]] = (
"batch_id",
"column",
"row_condition",
"condition_parser",
)
domain_type: ClassVar[MetricDomainTypes] = MetricDomainTypes.COLUMN
success_keys: ClassVar[Tuple[str, ...]] = ("mostly",)
class Config:
@staticmethod
def schema_extra(schema: Dict[str, Any], model: Type[ColumnMapExpectation]) -> None:
BatchExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"domain_type": {
"title": "Domain Type",
"type": "string",
"const": model.domain_type,
"description": "Column Map",
}
}
)
@classmethod
@override
def is_abstract(cls) -> bool:
return not cls.map_metric or super().is_abstract()
@override
def get_validation_dependencies(
self,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
**kwargs: dict,
) -> ValidationDependencies:
validation_dependencies: ValidationDependencies = super().get_validation_dependencies(
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
assert isinstance(self.map_metric, str), (
"ColumnMapExpectation must override get_validation_dependencies "
"or declare exactly one map_metric"
)
assert self.metric_dependencies == tuple(), (
"ColumnMapExpectation must be configured using map_metric, "
"and cannot have metric_dependencies declared."
)
metric_kwargs: dict
metric_kwargs = get_metric_kwargs(
metric_name=f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_configuration=MetricConfiguration(
metric_name=f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
metric_kwargs = get_metric_kwargs(
metric_name="table.row_count",
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name="table.row_count",
metric_configuration=MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
result_format_str: Optional[str] = validation_dependencies.result_format.get(
"result_format"
)
include_unexpected_rows: Optional[bool] = validation_dependencies.result_format.get(
"include_unexpected_rows"
)
if result_format_str == ResultFormat.BOOLEAN_ONLY:
return validation_dependencies
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
if include_unexpected_rows:
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
if result_format_str == ResultFormat.BASIC:
return validation_dependencies
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
metric_kwargs = get_metric_kwargs(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
configuration=self.configuration,
runtime_configuration=runtime_configuration,
)
validation_dependencies.set_metric_configuration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
metric_configuration=MetricConfiguration(
metric_name=f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}",
metric_domain_kwargs=metric_kwargs["metric_domain_kwargs"],
metric_value_kwargs=metric_kwargs["metric_value_kwargs"],
),
)
return validation_dependencies
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
result_format: str | dict[str, Any] = self._get_result_format(
runtime_configuration=runtime_configuration
)
include_unexpected_rows: bool
unexpected_index_column_names: int | str | list[str] | None
if isinstance(result_format, dict):
include_unexpected_rows = result_format.get("include_unexpected_rows", False)
unexpected_index_column_names = result_format.get("unexpected_index_column_names", None)
else:
include_unexpected_rows = False
unexpected_index_column_names = None
total_count: Optional[int] = metrics.get("table.row_count")
null_count: Optional[int] = metrics.get(
f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
)
unexpected_count: Optional[int] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
)
unexpected_values: Optional[List[Any]] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_VALUES.value}"
)
unexpected_index_list: Optional[List[int]] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_LIST.value}"
)
unexpected_index_query: Optional[str] = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_INDEX_QUERY.value}"
)
unexpected_rows: pd.DataFrame | None = None
if include_unexpected_rows:
unexpected_rows = metrics.get(
f"{self.map_metric}.{SummarizationMetricNameSuffixes.UNEXPECTED_ROWS.value}"
)
if total_count is None or null_count is None:
total_count = nonnull_count = 0
else:
nonnull_count = total_count - null_count
if unexpected_count is None or total_count == 0 or nonnull_count == 0:
# Vacuously true
success = True
else:
success = _mostly_success(
nonnull_count,
unexpected_count,
self._get_success_kwargs()["mostly"],
)
return _format_map_output(
result_format=parse_result_format(result_format),
success=success,
element_count=total_count,
nonnull_count=nonnull_count,
unexpected_count=unexpected_count,
unexpected_list=unexpected_values,
unexpected_index_list=unexpected_index_list,
unexpected_rows=unexpected_rows,
unexpected_index_query=unexpected_index_query,
unexpected_index_column_names=unexpected_index_column_names,
)
|
ColumnMapExpectation
|
python
|
jazzband__django-oauth-toolkit
|
oauth2_provider/views/application.py
|
{
"start": 281,
"end": 581
}
|
class ____(LoginRequiredMixin):
"""
This mixin is used to provide an Application queryset filtered by the current request.user.
"""
fields = "__all__"
def get_queryset(self):
return get_application_model().objects.filter(user=self.request.user)
|
ApplicationOwnerIsUserMixin
|
python
|
numpy__numpy
|
numpy/random/tests/test_direct.py
|
{
"start": 5190,
"end": 12280
}
|
class ____:
dtype = np.uint64
data2 = data1 = {}
@classmethod
def setup_class(cls):
cls.bit_generator = PCG64
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
cls.invalid_init_types = []
cls.invalid_init_values = []
@classmethod
def _read_csv(cls, filename):
with open(filename) as csv:
seed = csv.readline()
seed = seed.split(',')
seed = [int(s.strip(), 0) for s in seed[1:]]
data = []
for line in csv:
data.append(int(line.split(',')[-1].strip(), 0))
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data1['data'])
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw()
assert_equal(uints, self.data1['data'][0])
bit_generator = self.bit_generator(*self.data2['seed'])
uints = bit_generator.random_raw(1000)
assert_equal(uints, self.data2['data'])
def test_random_raw(self):
bit_generator = self.bit_generator(*self.data1['seed'])
uints = bit_generator.random_raw(output=False)
assert uints is None
uints = bit_generator.random_raw(1000, output=False)
assert uints is None
def test_gauss_inv(self):
n = 25
rs = RandomState(self.bit_generator(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
rs = RandomState(self.bit_generator(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
def test_uniform_double(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals))
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float64)
def test_uniform_float(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
vals = uniform32_from_uint(self.data1['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
rs = Generator(self.bit_generator(*self.data2['seed']))
vals = uniform32_from_uint(self.data2['data'], self.bits)
uniforms = rs.random(len(vals), dtype=np.float32)
assert_allclose(uniforms, vals)
assert_equal(uniforms.dtype, np.float32)
def test_repr(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in repr(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') in repr(rs)
def test_str(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
assert 'Generator' in str(rs)
assert str(self.bit_generator.__name__) in str(rs)
assert f'{id(rs):#x}'.upper().replace('X', 'x') not in str(rs)
def test_pickle(self):
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
bitgen_pkl = pickle.dumps(bit_generator)
reloaded = pickle.loads(bitgen_pkl)
reloaded_state = reloaded.state
assert_array_equal(Generator(bit_generator).standard_normal(1000),
Generator(reloaded).standard_normal(1000))
assert bit_generator is not reloaded
assert_state_equal(reloaded_state, state)
ss = SeedSequence(100)
aa = pickle.loads(pickle.dumps(ss))
assert_equal(ss.state, aa.state)
def test_pickle_preserves_seed_sequence(self):
# GH 26234
# Add explicit test that bit generators preserve seed sequences
import pickle
bit_generator = self.bit_generator(*self.data1['seed'])
ss = bit_generator.seed_seq
bg_plk = pickle.loads(pickle.dumps(bit_generator))
ss_plk = bg_plk.seed_seq
assert_equal(ss.state, ss_plk.state)
assert_equal(ss.pool, ss_plk.pool)
bit_generator.seed_seq.spawn(10)
bg_plk = pickle.loads(pickle.dumps(bit_generator))
ss_plk = bg_plk.seed_seq
assert_equal(ss.state, ss_plk.state)
assert_equal(ss.n_children_spawned, ss_plk.n_children_spawned)
def test_invalid_state_type(self):
bit_generator = self.bit_generator(*self.data1['seed'])
with pytest.raises(TypeError):
bit_generator.state = {'1'}
def test_invalid_state_value(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
state['bit_generator'] = 'otherBitGenerator'
with pytest.raises(ValueError):
bit_generator.state = state
def test_invalid_init_type(self):
bit_generator = self.bit_generator
for st in self.invalid_init_types:
with pytest.raises(TypeError):
bit_generator(*st)
def test_invalid_init_values(self):
bit_generator = self.bit_generator
for st in self.invalid_init_values:
with pytest.raises((ValueError, OverflowError)):
bit_generator(*st)
def test_benchmark(self):
bit_generator = self.bit_generator(*self.data1['seed'])
bit_generator._benchmark(1)
bit_generator._benchmark(1, 'double')
with pytest.raises(ValueError):
bit_generator._benchmark(1, 'int32')
@pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
def test_cffi(self):
bit_generator = self.bit_generator(*self.data1['seed'])
cffi_interface = bit_generator.cffi
assert isinstance(cffi_interface, interface)
other_cffi_interface = bit_generator.cffi
assert other_cffi_interface is cffi_interface
@pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
def test_ctypes(self):
bit_generator = self.bit_generator(*self.data1['seed'])
ctypes_interface = bit_generator.ctypes
assert isinstance(ctypes_interface, interface)
other_ctypes_interface = bit_generator.ctypes
assert other_ctypes_interface is ctypes_interface
def test_getstate(self):
bit_generator = self.bit_generator(*self.data1['seed'])
state = bit_generator.state
alt_state = bit_generator.__getstate__()
assert isinstance(alt_state, tuple)
assert_state_equal(state, alt_state[0])
assert isinstance(alt_state[1], SeedSequence)
|
Base
|
python
|
huggingface__transformers
|
tests/models/eomt/test_image_processing_eomt.py
|
{
"start": 1433,
"end": 4024
}
|
class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
min_resolution=30,
max_resolution=400,
size=None,
do_resize=True,
do_pad=True,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
num_labels=10,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.do_pad = do_pad
self.size = size if size is not None else {"shortest_edge": 18, "longest_edge": 18}
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
# for the post_process_functions
self.batch_size = 2
self.num_queries = 3
self.num_classes = 2
self.height = 18
self.width = 18
self.num_labels = num_labels
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
"num_labels": self.num_labels,
}
def prepare_fake_eomt_outputs(self, batch_size, patch_offsets=None):
return EomtForUniversalSegmentationOutput(
masks_queries_logits=torch.randn((batch_size, self.num_queries, self.height, self.width)),
class_queries_logits=torch.randn((batch_size, self.num_queries, self.num_classes + 1)),
patch_offsets=patch_offsets,
)
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
def prepare_semantic_single_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
example = ds[0]
return example["image"], example["map"]
def prepare_semantic_batch_inputs():
ds = load_dataset("hf-internal-testing/fixtures_ade20k", split="test")
return list(ds["image"][:2]), list(ds["map"][:2])
@require_torch
@require_vision
|
EomtImageProcessingTester
|
python
|
apache__airflow
|
providers/microsoft/azure/src/airflow/providers/microsoft/azure/operators/asb.py
|
{
"start": 8166,
"end": 9402
}
|
class ____(BaseOperator):
"""
Delete the Queue in the Azure Service Bus namespace.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureServiceBusDeleteQueueOperator`
:param queue_name: The name of the queue in Service Bus namespace.
:param azure_service_bus_conn_id: Reference to the
:ref: `Azure Service Bus connection <howto/connection:azure_service_bus>`.
"""
template_fields: Sequence[str] = ("queue_name",)
ui_color = "#e4f0e8"
def __init__(
self,
*,
queue_name: str,
azure_service_bus_conn_id: str = "azure_service_bus_default",
**kwargs,
) -> None:
super().__init__(**kwargs)
self.queue_name = queue_name
self.azure_service_bus_conn_id = azure_service_bus_conn_id
def execute(self, context: Context) -> None:
"""Delete Queue in Service Bus namespace, by connecting to Service Bus Admin client."""
# Create the hook
hook = AdminClientHook(azure_service_bus_conn_id=self.azure_service_bus_conn_id)
# delete queue with name
hook.delete_queue(self.queue_name)
|
AzureServiceBusDeleteQueueOperator
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 415183,
"end": 416379
}
|
class ____(sgqlc.types.Interface):
"""Common fields across different project field value types"""
__schema__ = github_schema
__field_names__ = ("created_at", "creator", "database_id", "field", "id", "item", "updated_at")
created_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="createdAt")
"""Identifies the date and time when the object was created."""
creator = sgqlc.types.Field(Actor, graphql_name="creator")
"""The actor who created the item."""
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
"""Identifies the primary key from the database."""
field = sgqlc.types.Field(sgqlc.types.non_null("ProjectV2FieldConfiguration"), graphql_name="field")
"""The project field that contains this value."""
id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="id")
item = sgqlc.types.Field(sgqlc.types.non_null("ProjectV2Item"), graphql_name="item")
"""The project item that contains this value."""
updated_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="updatedAt")
"""Identifies the date and time when the object was last updated."""
|
ProjectV2ItemFieldValueCommon
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_ethical_ai_expectations/great_expectations_ethical_ai_expectations/expectations/expect_table_linear_feature_importances_to_be.py
|
{
"start": 2745,
"end": 8324
}
|
class ____(BatchExpectation):
"""Expect Feature Importances of specified columns in table for Linear Regression to meet threshold."""
# These examples will be shown in the public gallery, and also executed as unit tests for your Expectation
examples = [
{
"data": {
"x_1": [3, 5, 7],
"x_2": [1, 1, 1],
"x_3": [0.01, 0.02, 0.01],
"y": [0.031, 0.052, 0.071],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {
"n_features": 1,
"important_columns": ["x_1"],
"y_column": "y",
"threshold": 0.35,
},
"out": {
"success": True,
},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"n_features": 2, "y_column": "y", "threshold": 0.35},
"out": {"success": False},
},
],
}
]
# This dictionary contains metadata for display in the public gallery
library_metadata = {
"tags": ["ai/ml", "fair-ai", "hackathon-22"],
"contributors": ["@austiezr"],
"requirements": ["scikit-learn"],
}
metric_dependencies = ("table.modeling.linear.feature_importances",)
success_keys = (
"n_features",
"important_columns",
"y_column",
"threshold",
)
default_kwarg_values = {
"n_features": None,
"important_columns": None,
"y_column": None,
"threshold": None,
"result_format": "BASIC",
"catch_exceptions": False,
"meta": None,
}
def validate_configuration(self, configuration: Optional[ExpectationConfiguration]) -> None:
"""
Validates that a configuration has been set, and sets a configuration if it has yet to be set. Ensures that
necessary configuration arguments have been provided for the validation of the expectation.
Args:
configuration (OPTIONAL[ExpectationConfiguration]): \
An optional Expectation Configuration entry that will be used to configure the expectation
Returns:
None. Raises InvalidExpectationConfigurationError if the config is not validated successfully
"""
super().validate_configuration(configuration)
configuration = configuration or self.configuration
n_features = configuration.kwargs.get("n_features")
columns = configuration.kwargs.get("important_columns")
threshold = configuration.kwargs.get("threshold")
y_column = configuration.kwargs.get("y_column")
try:
assert columns is not None or threshold is not None, (
"at least one of important_columns or threshold is required"
)
assert isinstance(n_features, int) or n_features is None, (
"n_features must be an integer"
)
if columns is not None:
assert (isinstance(columns, (tuple, list))) and all(
isinstance(i, str) for i in columns
), "columns must be a tuple or list of string column names"
assert (isinstance(threshold, float) and (0 <= threshold <= 1)) or threshold is None, (
"threshold must be a float between 0 and 1"
)
assert y_column is not None, "target y_column must be specified"
assert isinstance(y_column, str), "y_column must be a string column name"
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
super().validate_configuration(configuration)
def _validate(
self,
metrics,
runtime_configuration=None,
execution_engine=None,
):
importances = dict(
sorted(
metrics["table.modeling.linear.feature_importances"].items(),
key=lambda item: item[1],
reverse=True,
)
)
configuration = self.configuration
n_features = configuration["kwargs"].get("n_features")
columns = configuration["kwargs"].get("important_columns")
threshold = configuration["kwargs"].get("threshold")
if columns:
column_success = []
for i in columns:
if importances[i] >= threshold:
column_success.append(True)
else:
column_success.append(False)
column_success = all(column_success)
else:
column_success = True
if n_features:
n_features_success = []
for i in importances:
if importances[i] >= threshold:
n_features_success.append(True)
n_features_success = len(n_features_success) == int(n_features)
else:
n_features_success = True
success = column_success and n_features_success
return {"success": success, "result": {"observed_value": importances}}
if __name__ == "__main__":
ExpectTableLinearFeatureImportancesToBe().print_diagnostic_checklist()
|
ExpectTableLinearFeatureImportancesToBe
|
python
|
huggingface__transformers
|
tests/models/roc_bert/test_modeling_roc_bert.py
|
{
"start": 19096,
"end": 32386
}
|
class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
RoCBertModel,
RoCBertForMaskedLM,
RoCBertForCausalLM,
RoCBertForMultipleChoice,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertForPreTraining,
)
if is_torch_available()
else ()
)
# Doesn't run generation tests. There are interface mismatches when using `generate` -- TODO @gante
all_generative_model_classes = ()
pipeline_model_mapping = (
{
"feature-extraction": RoCBertModel,
"fill-mask": RoCBertForMaskedLM,
"question-answering": RoCBertForQuestionAnswering,
"text-classification": RoCBertForSequenceClassification,
"text-generation": RoCBertForCausalLM,
"token-classification": RoCBertForTokenClassification,
"zero-shot": RoCBertForSequenceClassification,
}
if is_torch_available()
else {}
)
# TODO: Fix the failed tests when this model gets more usage
def is_pipeline_test_to_skip(
self,
pipeline_test_case_name,
config_class,
model_architecture,
tokenizer_name,
image_processor_name,
feature_extractor_name,
processor_name,
):
if pipeline_test_case_name in [
"FillMaskPipelineTests",
"FeatureExtractionPipelineTests",
"TextClassificationPipelineTests",
"TokenClassificationPipelineTests",
]:
# Get error: IndexError: index out of range in self.
# `word_shape_file` and `word_pronunciation_file` should be shrunk during tiny model creation,
# otherwise `IndexError` could occur in some embedding layers. Skip for now until this model has
# more usage.
return True
return False
# Overwriting to add `is_decoder` flag
def prepare_config_and_inputs_for_generate(self, batch_size=2):
config, inputs = super().prepare_config_and_inputs_for_generate(batch_size)
config.is_decoder = True
return config, inputs
# special case for ForPreTraining model
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class in get_values(MODEL_FOR_PRETRAINING_MAPPING):
inputs_dict["labels_input_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["labels_input_shape_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["labels_input_pronunciation_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["attack_input_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["attack_input_shape_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
inputs_dict["attack_input_pronunciation_ids"] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device
)
return inputs_dict
def setUp(self):
self.model_tester = RoCBertModelTester(self)
self.config_tester = ConfigTester(self, config_class=RoCBertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_for_masked_lm(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*config_and_inputs)
def test_for_multiple_choice(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs)
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_for_question_answering(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*config_and_inputs)
def test_for_sequence_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
def test_for_token_classification(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*config_and_inputs)
def test_for_pretraining(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
def test_model_as_decoder(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*config_and_inputs)
def test_model_as_decoder_with_default_input_mask(self):
(
config,
input_ids,
input_shape_ids,
input_pronunciation_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
input_mask = None
self.model_tester.create_and_check_model_as_decoder(
config,
input_ids,
input_shape_ids,
input_pronunciation_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
@slow
def test_model_from_pretrained(self):
model_name = "weiweishi/roc-bert-base-zh"
model = RoCBertModel.from_pretrained(model_name)
self.assertIsNotNone(model)
def attention_mask_padding_matches_padding_free_with_position_ids(
self, attn_implementation: str, fa_kwargs: bool = False
):
"""
Overwritten to account for the embeddings that rely on position ids.
"""
if not self.has_attentions:
self.skipTest(reason="Model architecture does not support attentions")
max_new_tokens = 30
support_flag = {
"sdpa": "_supports_sdpa",
"flash_attention_2": "_supports_flash_attn",
"flash_attention_3": "_supports_flash_attn",
}
for model_class in self.all_generative_model_classes:
if attn_implementation != "eager" and not getattr(model_class, support_flag[attn_implementation]):
self.skipTest(f"{model_class.__name__} does not support {attn_implementation}")
# can't infer if new attn mask API is supported by assume that only model with attention backend support it
if not model_class._supports_attention_backend:
self.skipTest(f"{model_class.__name__} does not support new attention mask API")
if model_class._is_stateful: # non-transformer models most probably have no packing support
self.skipTest(f"{model_class.__name__} doesn't support packing!")
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
if config.is_encoder_decoder:
self.skipTest("Model is an encoder-decoder")
if 0 not in inputs_dict.get("attention_mask", []) or "attention_mask" not in inputs_dict:
self.skipTest("Model dummy inputs should contain padding in their attention mask")
if "input_ids" not in inputs_dict or inputs_dict["input_ids"].ndim != 2:
self.skipTest("Model dummy inputs should contain text input ids")
# make sure that all models have enough positions for generation
dummy_input_ids = inputs_dict["input_ids"]
if hasattr(config, "max_position_embeddings"):
config.max_position_embeddings = max_new_tokens + dummy_input_ids.shape[1] + 1
model = model_class(config)
if "position_ids" not in inspect.signature(model.forward).parameters:
self.skipTest("Model does not support position_ids")
if (not fa_kwargs) and "position_ids" not in inspect.signature(model.forward).parameters:
continue # this model doesn't accept position ids as input
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
# Drop all keys except for the minimal set. Hard to manipulate with multimodals etc
inputs_dict = {k: v for k, v in inputs_dict.items() if k in ["input_ids", "attention_mask"]}
# Ensure left padding, to adapt for some models
if 0 in inputs_dict["attention_mask"][:, -1]:
inputs_dict["attention_mask"] = inputs_dict["attention_mask"].flip(1)
dummy_attention_mask = inputs_dict["attention_mask"]
dummy_input_ids[~dummy_attention_mask.bool()] = config.get_text_config().pad_token_id
# Main difference to other models, we need to prepare position ids according to the attention mask
# as we use it to extract embeddings that rely on the correct position - naively increasing sequences do
# not suffice anymore atp. The solution here calculates an increasing sequences for all 1s and puts 0s else.
inputs_dict["position_ids"] = ((inputs_dict["attention_mask"] == 1).long().cumsum(dim=1) - 1) * (
inputs_dict["attention_mask"] == 1
).long()
model = (
model_class.from_pretrained(
tmpdirname,
dtype=torch.bfloat16,
attn_implementation=attn_implementation,
)
.to(torch_device)
.eval()
)
if fa_kwargs:
# flatten
features = [
{"input_ids": i[a.bool()].tolist()} for i, a in zip(dummy_input_ids, dummy_attention_mask)
]
# add position_ids + fa_kwargs
data_collator = DataCollatorWithFlattening(return_tensors="pt", return_flash_attn_kwargs=True)
batch = data_collator(features)
padfree_inputs_dict = {
k: t.to(torch_device) if torch.is_tensor(t) else t for k, t in batch.items()
}
else:
# create packed position_ids
position_ids = (
torch.cat([torch.arange(length) for length in dummy_attention_mask.sum(1).tolist()])
.long()
.unsqueeze(0)
.to(torch_device)
)
padfree_inputs_dict = {
"input_ids": dummy_input_ids[dummy_attention_mask.bool()].unsqueeze(0),
"position_ids": position_ids,
}
# We need to do simple forward without cache in order to trigger packed SDPA/flex/eager attention path
res_padded = model(**inputs_dict, use_cache=False)
res_padfree = model(**padfree_inputs_dict, use_cache=False)
logits_padded = res_padded.logits[dummy_attention_mask.bool()]
logits_padfree = res_padfree.logits[0]
# acceptable numerical instability
tol = torch.finfo(torch.bfloat16).eps
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
def flash_attn_inference_equivalence(
self, attn_implementation: str, padding_side: str, atol: float = 4e-2, rtol: float = 4e-2
):
super().flash_attn_inference_equivalence(
attn_implementation,
padding_side,
# relaxing the tolerance here
atol=6e-2,
rtol=4e-2,
)
@require_torch
|
RoCBertModelTest
|
python
|
getsentry__sentry
|
tests/acceptance/test_organization_monitors.py
|
{
"start": 377,
"end": 6049
}
|
class ____(AcceptanceTestCase):
def setUp(self) -> None:
super().setUp()
self.path = f"/organizations/{self.organization.slug}/insights/crons/"
self.team = self.create_team(organization=self.organization, name="Mariachi Band")
self.project = self.create_project(
organization=self.organization, teams=[self.team], name="Bengal"
)
self.create_team_membership(self.team, user=self.user)
self.login_as(self.user)
def test_empty_crons_page(self) -> None:
self.browser.get(self.path)
self.browser.wait_until(xpath="//h3[text()='Monitor Your Cron Jobs']")
def test_quick_start_flow(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.click_when_visible("[aria-label='Create php Monitor']")
self.browser.click_when_visible(xpath="//li[@role='tab']//*[text()='Manual']")
self.browser.wait_until('[name="name"]')
name_input = self.browser.find_element_by_name("name")
name_input.send_keys("My Monitor")
schedule_input = self.browser.find_element_by_name("config.schedule")
schedule_input.clear()
schedule_input.send_keys("10 0 * * *")
self.browser.click_when_visible("#project")
self.browser.click_when_visible(f'[data-test-id="{self.project.slug}"]')
self.browser.click_when_visible('button[aria-label="Create"]')
self.browser.wait_until(xpath="//h1[text()='My Monitor']")
def test_create_cron_monitor(self) -> None:
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.click_when_visible("a[aria-label='Add Cron Monitor']")
self.browser.wait_until('[name="name"]')
name_input = self.browser.find_element_by_name("name")
name_input.send_keys("My Monitor")
self.browser.click_when_visible("#project")
self.browser.click_when_visible(f'[data-test-id="{self.project.slug}"]')
schedule_input = self.browser.find_element_by_name("config.schedule")
schedule_input.clear()
schedule_input.send_keys("10 0 * * *")
margin = self.browser.find_element_by_name("config.checkinMargin")
margin.send_keys("5")
max_runtime = self.browser.find_element_by_name("config.maxRuntime")
max_runtime.send_keys("10")
self.browser.click_when_visible('button[aria-label="Create"]')
self.browser.wait_until(xpath="//h1[text()='My Monitor']")
self.browser.element_exists(xpath="//*[text()='At 12:10 AM']")
self.browser.element_exists(xpath="//*[text()='Check-ins missed after 5 mins']")
self.browser.element_exists(xpath="//*[text()='Check-ins longer than 10 mins or errors']")
def test_list_monitors(self) -> None:
monitor = Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
name="My Monitor",
config={
"schedule": "0 0 * * *",
"schedule_type": ScheduleType.CRONTAB,
"max_runtime": None,
"checkin_margin": None,
},
)
with mock.patch(
"django.utils.timezone.now",
return_value=(datetime.now(tz=UTC)),
):
ts = timezone.now() - timedelta(days=1)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
last_checkin=ts,
)
MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.OK,
date_added=ts,
)
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until(xpath="//a//*[text()='My Monitor']")
self.browser.wait_until('[data-test-id="monitor-checkin-tick"]')
def test_edit_monitor(self) -> None:
Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
name="My Monitor",
config={
"schedule": "0 0 * * *",
"schedule_type": ScheduleType.CRONTAB,
"max_runtime": None,
"checkin_margin": None,
},
)
self.browser.get(self.path)
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.click_when_visible(xpath="//a//*[text()='My Monitor']")
self.browser.click_when_visible('a[aria-label="Edit Monitor"]')
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
name_input = self.browser.find_element_by_name("name")
name_input.clear()
name_input.send_keys("My Edited Monitor")
slug_input = self.browser.find_element_by_name("slug")
slug_input.clear()
slug_input.send_keys("my-monitor-edited-slug")
schedule_input = self.browser.find_element_by_name("config.schedule")
schedule_input.clear()
schedule_input.send_keys("5 0 * * *")
self.browser.click_when_visible('button[aria-label="Save Changes"]')
self.browser.wait_until(xpath="//h1[text()='My Edited Monitor']")
assert self.browser.element_exists(xpath="//*[text()='At 12:05 AM']")
assert "my-monitor-edited-slug" in self.browser.current_url
|
OrganizationMonitorsTest
|
python
|
google__pytype
|
pytype/tools/traces/traces.py
|
{
"start": 9180,
"end": 9449
}
|
class ____(visitor.BaseVisitor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.line = 0
def generic_visit(self, node):
lineno = getattr(node, "lineno", 0)
if lineno > self.line:
self.line = lineno
|
_LineNumberVisitor
|
python
|
automl__auto-sklearn
|
autosklearn/pipeline/components/feature_preprocessing/extra_trees_preproc_for_regression.py
|
{
"start": 579,
"end": 5839
}
|
class ____(AutoSklearnPreprocessingAlgorithm):
def __init__(
self,
n_estimators,
criterion,
min_samples_leaf,
min_samples_split,
max_features,
bootstrap=False,
max_leaf_nodes=None,
max_depth="None",
min_weight_fraction_leaf=0.0,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
):
self.n_estimators = n_estimators
self.estimator_increment = 10
if criterion not in ("mse", "friedman_mse", "mae"):
raise ValueError(
"'criterion' is not in ('mse', 'friedman_mse', "
"'mae'): %s" % criterion
)
self.criterion = criterion
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.max_leaf_nodes = max_leaf_nodes
self.max_depth = max_depth
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.preprocessor = None
def fit(self, X, Y):
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.feature_selection import SelectFromModel
self.n_estimators = int(self.n_estimators)
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.max_features = float(self.max_features)
self.bootstrap = check_for_bool(self.bootstrap)
self.n_jobs = int(self.n_jobs)
self.verbose = int(self.verbose)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
num_features = X.shape[1]
max_features = int(float(self.max_features) * (np.log(num_features) + 1))
# Use at most half of the features
max_features = max(1, min(int(X.shape[1] / 2), max_features))
estimator = ExtraTreesRegressor(
n_estimators=self.n_estimators,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features,
max_leaf_nodes=self.max_leaf_nodes,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
random_state=self.random_state,
)
estimator.fit(X, Y)
self.preprocessor = SelectFromModel(
estimator=estimator, threshold="mean", prefit=True
)
return self
def transform(self, X):
if self.preprocessor is None:
raise NotImplementedError
return self.preprocessor.transform(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "ETR",
"name": "Extra Trees Regressor Preprocessing",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": True,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
n_estimators = Constant("n_estimators", 100)
criterion = CategoricalHyperparameter(
"criterion", ["mse", "friedman_mse", "mae"]
)
max_features = UniformFloatHyperparameter(
"max_features", 0.1, 1.0, default_value=1.0
)
max_depth = UnParametrizedHyperparameter(name="max_depth", value="None")
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1
)
min_weight_fraction_leaf = Constant("min_weight_fraction_leaf", 0.0)
bootstrap = CategoricalHyperparameter(
"bootstrap", ["True", "False"], default_value="False"
)
cs.add_hyperparameters(
[
n_estimators,
criterion,
max_features,
max_depth,
max_leaf_nodes,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
bootstrap,
]
)
return cs
|
ExtraTreesPreprocessorRegression
|
python
|
scikit-learn__scikit-learn
|
sklearn/model_selection/_search.py
|
{
"start": 16353,
"end": 48085
}
|
class ____(MetaEstimatorMixin, BaseEstimator, metaclass=ABCMeta):
"""Abstract base class for hyper parameter search with cross-validation."""
_parameter_constraints: dict = {
"estimator": [HasMethods(["fit"])],
"scoring": [
StrOptions(set(get_scorer_names())),
callable,
list,
tuple,
dict,
None,
],
"n_jobs": [numbers.Integral, None],
"refit": ["boolean", str, callable],
"cv": ["cv_object"],
"verbose": ["verbose"],
"pre_dispatch": [numbers.Integral, str],
"error_score": [StrOptions({"raise"}), numbers.Real],
"return_train_score": ["boolean"],
}
@abstractmethod
def __init__(
self,
estimator,
*,
scoring=None,
n_jobs=None,
refit=True,
cv=None,
verbose=0,
pre_dispatch="2*n_jobs",
error_score=np.nan,
return_train_score=True,
):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
self.return_train_score = return_train_score
def __sklearn_tags__(self):
tags = super().__sklearn_tags__()
sub_estimator_tags = get_tags(self.estimator)
tags.estimator_type = sub_estimator_tags.estimator_type
tags.classifier_tags = deepcopy(sub_estimator_tags.classifier_tags)
tags.regressor_tags = deepcopy(sub_estimator_tags.regressor_tags)
# allows cross-validation to see 'precomputed' metrics
tags.input_tags.pairwise = sub_estimator_tags.input_tags.pairwise
tags.input_tags.sparse = sub_estimator_tags.input_tags.sparse
tags.array_api_support = sub_estimator_tags.array_api_support
return tags
def score(self, X, y=None, **params):
"""Return the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict
Parameters to be passed to the underlying scorer(s).
.. versionadded:: 1.4
Only available if `enable_metadata_routing=True`. See
:ref:`Metadata Routing User Guide <metadata_routing>` for more
details.
Returns
-------
score : float
The score defined by ``scoring`` if provided, and the
``best_estimator_.score`` method otherwise.
"""
_check_refit(self, "score")
check_is_fitted(self)
_raise_for_params(params, self, "score")
if _routing_enabled():
score_params = process_routing(self, "score", **params).scorer["score"]
else:
score_params = dict()
if self.scorer_ is None:
raise ValueError(
"No score function explicitly defined, "
"and the estimator doesn't provide one %s" % self.best_estimator_
)
if isinstance(self.scorer_, dict):
if self.multimetric_:
scorer = self.scorer_[self.refit]
else:
scorer = self.scorer_
return scorer(self.best_estimator_, X, y, **score_params)
# callable
score = self.scorer_(self.best_estimator_, X, y, **score_params)
if self.multimetric_:
score = score[self.refit]
return score
@available_if(_search_estimator_has("score_samples"))
def score_samples(self, X):
"""Call score_samples on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``score_samples``.
.. versionadded:: 0.24
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements
of the underlying estimator.
Returns
-------
y_score : ndarray of shape (n_samples,)
The ``best_estimator_.score_samples`` method.
"""
check_is_fitted(self)
return self.best_estimator_.score_samples(X)
@available_if(_search_estimator_has("predict"))
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,)
The predicted labels or values for `X` based on the estimator with
the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.predict(X)
@available_if(_search_estimator_has("predict_proba"))
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
Predicted class probabilities for `X` based on the estimator with
the best found parameters. The order of the classes corresponds
to that in the fitted attribute :term:`classes_`.
"""
check_is_fitted(self)
return self.best_estimator_.predict_proba(X)
@available_if(_search_estimator_has("predict_log_proba"))
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_pred : ndarray of shape (n_samples,) or (n_samples, n_classes)
Predicted class log-probabilities for `X` based on the estimator
with the best found parameters. The order of the classes
corresponds to that in the fitted attribute :term:`classes_`.
"""
check_is_fitted(self)
return self.best_estimator_.predict_log_proba(X)
@available_if(_search_estimator_has("decision_function"))
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
y_score : ndarray of shape (n_samples,) or (n_samples, n_classes) \
or (n_samples, n_classes * (n_classes-1) / 2)
Result of the decision function for `X` based on the estimator with
the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.decision_function(X)
@available_if(_search_estimator_has("transform"))
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
Xt : {ndarray, sparse matrix} of shape (n_samples, n_features)
`X` transformed in the new space based on the estimator with
the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.transform(X)
@available_if(_search_estimator_has("inverse_transform"))
def inverse_transform(self, X):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
Returns
-------
X_original : {ndarray, sparse matrix} of shape (n_samples, n_features)
Result of the `inverse_transform` function for `X` based on the
estimator with the best found parameters.
"""
check_is_fitted(self)
return self.best_estimator_.inverse_transform(X)
@property
def n_features_in_(self):
"""Number of features seen during :term:`fit`.
Only available when `refit=True`.
"""
# For consistency with other estimators we raise an AttributeError so
# that hasattr() fails if the search estimator isn't fitted.
try:
check_is_fitted(self)
except NotFittedError as nfe:
raise AttributeError(
"{} object has no n_features_in_ attribute.".format(
self.__class__.__name__
)
) from nfe
return self.best_estimator_.n_features_in_
@property
def classes_(self):
"""Class labels.
Only available when `refit=True` and the estimator is a classifier.
"""
_search_estimator_has("classes_")(self)
return self.best_estimator_.classes_
def _run_search(self, evaluate_candidates):
"""Repeatedly calls `evaluate_candidates` to conduct a search.
This method, implemented in sub-classes, makes it possible to
customize the scheduling of evaluations: GridSearchCV and
RandomizedSearchCV schedule evaluations for their whole parameter
search space at once but other more sequential approaches are also
possible: for instance is possible to iteratively schedule evaluations
for new regions of the parameter search space based on previously
collected evaluation results. This makes it possible to implement
Bayesian optimization or more generally sequential model-based
optimization by deriving from the BaseSearchCV abstract base class.
For example, Successive Halving is implemented by calling
`evaluate_candidates` multiples times (once per iteration of the SH
process), each time passing a different set of candidates with `X`
and `y` of increasing sizes.
Parameters
----------
evaluate_candidates : callable
This callback accepts:
- a list of candidates, where each candidate is a dict of
parameter settings.
- an optional `cv` parameter which can be used to e.g.
evaluate candidates on different dataset splits, or
evaluate candidates on subsampled data (as done in the
Successive Halving estimators). By default, the original
`cv` parameter is used, and it is available as a private
`_checked_cv_orig` attribute.
- an optional `more_results` dict. Each key will be added to
the `cv_results_` attribute. Values should be lists of
length `n_candidates`
It returns a dict of all results so far, formatted like
``cv_results_``.
Important note (relevant whether the default cv is used or not):
in randomized splitters, and unless the random_state parameter of
cv was set to an int, calling cv.split() multiple times will
yield different splits. Since cv.split() is called in
evaluate_candidates, this means that candidates will be evaluated
on different splits each time evaluate_candidates is called. This
might be a methodological issue depending on the search strategy
that you're implementing. To prevent randomized splitters from
being used, you may use _split._yields_constant_splits()
Examples
--------
::
def _run_search(self, evaluate_candidates):
'Try C=0.1 only if C=1 is better than C=10'
all_results = evaluate_candidates([{'C': 1}, {'C': 10}])
score = all_results['mean_test_score']
if score[0] < score[1]:
evaluate_candidates([{'C': 0.1}])
"""
raise NotImplementedError("_run_search not implemented.")
def _check_refit_for_multimetric(self, scores):
"""Check `refit` is compatible with `scores` is valid"""
multimetric_refit_msg = (
"For multi-metric scoring, the parameter refit must be set to a "
"scorer key or a callable to refit an estimator with the best "
"parameter setting on the whole data and make the best_* "
"attributes available for that metric. If this is not needed, "
f"refit should be set to False explicitly. {self.refit!r} was "
"passed."
)
valid_refit_dict = isinstance(self.refit, str) and self.refit in scores
if (
self.refit is not False
and not valid_refit_dict
and not callable(self.refit)
):
raise ValueError(multimetric_refit_msg)
@staticmethod
def _select_best_index(refit, refit_metric, results):
"""Select index of the best combination of hyperparemeters."""
if callable(refit):
# If callable, refit is expected to return the index of the best
# parameter set.
best_index = refit(results)
if not isinstance(best_index, numbers.Integral):
raise TypeError("best_index_ returned is not an integer")
if best_index < 0 or best_index >= len(results["params"]):
raise IndexError("best_index_ index out of range")
else:
best_index = results[f"rank_test_{refit_metric}"].argmin()
return best_index
def _get_scorers(self):
"""Get the scorer(s) to be used.
This is used in ``fit`` and ``get_metadata_routing``.
Returns
-------
scorers, refit_metric
"""
refit_metric = "score"
if callable(self.scoring):
scorers = self.scoring
elif self.scoring is None or isinstance(self.scoring, str):
scorers = check_scoring(self.estimator, self.scoring)
else:
scorers = _check_multimetric_scoring(self.estimator, self.scoring)
self._check_refit_for_multimetric(scorers)
refit_metric = self.refit
scorers = _MultimetricScorer(
scorers=scorers, raise_exc=(self.error_score == "raise")
)
return scorers, refit_metric
def _check_scorers_accept_sample_weight(self):
# TODO(slep006): remove when metadata routing is the only way
scorers, _ = self._get_scorers()
# In the multimetric case, warn the user for each scorer separately
if isinstance(scorers, _MultimetricScorer):
for name, scorer in scorers._scorers.items():
if not scorer._accept_sample_weight():
warnings.warn(
f"The scoring {name}={scorer} does not support sample_weight, "
"which may lead to statistically incorrect results when "
f"fitting {self} with sample_weight. "
)
return scorers._accept_sample_weight()
# In most cases, scorers is a Scorer object
# But it's a function when user passes scoring=function
if hasattr(scorers, "_accept_sample_weight"):
accept = scorers._accept_sample_weight()
else:
accept = "sample_weight" in signature(scorers).parameters
if not accept:
warnings.warn(
f"The scoring {scorers} does not support sample_weight, "
"which may lead to statistically incorrect results when "
f"fitting {self} with sample_weight. "
)
return accept
def _get_routed_params_for_fit(self, params):
"""Get the parameters to be used for routing.
This is a method instead of a snippet in ``fit`` since it's used twice,
here in ``fit``, and in ``HalvingRandomSearchCV.fit``.
"""
if _routing_enabled():
routed_params = process_routing(self, "fit", **params)
else:
params = params.copy()
groups = params.pop("groups", None)
routed_params = Bunch(
estimator=Bunch(fit=params),
splitter=Bunch(split={"groups": groups}),
scorer=Bunch(score={}),
)
# NOTE: sample_weight is forwarded to the scorer if sample_weight
# is not None and scorers accept sample_weight. For _MultimetricScorer,
# sample_weight is forwarded if any scorer accepts sample_weight
if (
params.get("sample_weight") is not None
and self._check_scorers_accept_sample_weight()
):
routed_params.scorer.score["sample_weight"] = params["sample_weight"]
return routed_params
@_fit_context(
# *SearchCV.estimator is not validated yet
prefer_skip_nested_validation=False
)
def fit(self, X, y=None, **params):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like of shape (n_samples, n_features) or (n_samples, n_samples)
Training vectors, where `n_samples` is the number of samples and
`n_features` is the number of features. For precomputed kernel or
distance matrix, the expected shape of X is (n_samples, n_samples).
y : array-like of shape (n_samples, n_output) \
or (n_samples,), default=None
Target relative to X for classification or regression;
None for unsupervised learning.
**params : dict of str -> object
Parameters passed to the ``fit`` method of the estimator, the scorer,
and the CV splitter.
If a fit parameter is an array-like whose length is equal to
`num_samples` then it will be split by cross-validation along with
`X` and `y`. For example, the :term:`sample_weight` parameter is
split because `len(sample_weights) = len(X)`. However, this behavior
does not apply to `groups` which is passed to the splitter configured
via the `cv` parameter of the constructor. Thus, `groups` is used
*to perform the split* and determines which samples are
assigned to the each side of the a split.
Returns
-------
self : object
Instance of fitted estimator.
"""
estimator = self.estimator
scorers, refit_metric = self._get_scorers()
X, y = indexable(X, y)
params = _check_method_params(X, params=params)
routed_params = self._get_routed_params_for_fit(params)
cv_orig = check_cv(self.cv, y, classifier=is_classifier(estimator))
n_splits = cv_orig.get_n_splits(X, y, **routed_params.splitter.split)
base_estimator = clone(self.estimator)
parallel = Parallel(n_jobs=self.n_jobs, pre_dispatch=self.pre_dispatch)
fit_and_score_kwargs = dict(
scorer=scorers,
fit_params=routed_params.estimator.fit,
score_params=routed_params.scorer.score,
return_train_score=self.return_train_score,
return_n_test_samples=True,
return_times=True,
return_parameters=False,
error_score=self.error_score,
verbose=self.verbose,
)
results = {}
with parallel:
all_candidate_params = []
all_out = []
all_more_results = defaultdict(list)
def evaluate_candidates(candidate_params, cv=None, more_results=None):
cv = cv or cv_orig
candidate_params = list(candidate_params)
n_candidates = len(candidate_params)
if self.verbose > 0:
print(
"Fitting {0} folds for each of {1} candidates,"
" totalling {2} fits".format(
n_splits, n_candidates, n_candidates * n_splits
)
)
out = parallel(
delayed(_fit_and_score)(
clone(base_estimator),
X,
y,
train=train,
test=test,
parameters=parameters,
split_progress=(split_idx, n_splits),
candidate_progress=(cand_idx, n_candidates),
**fit_and_score_kwargs,
)
for (cand_idx, parameters), (split_idx, (train, test)) in product(
enumerate(candidate_params),
enumerate(cv.split(X, y, **routed_params.splitter.split)),
)
)
if len(out) < 1:
raise ValueError(
"No fits were performed. "
"Was the CV iterator empty? "
"Were there no candidates?"
)
elif len(out) != n_candidates * n_splits:
raise ValueError(
"cv.split and cv.get_n_splits returned "
"inconsistent results. Expected {} "
"splits, got {}".format(n_splits, len(out) // n_candidates)
)
_warn_or_raise_about_fit_failures(out, self.error_score)
# For callable self.scoring, the return type is only know after
# calling. If the return type is a dictionary, the error scores
# can now be inserted with the correct key. The type checking
# of out will be done in `_insert_error_scores`.
if callable(self.scoring):
_insert_error_scores(out, self.error_score)
all_candidate_params.extend(candidate_params)
all_out.extend(out)
if more_results is not None:
for key, value in more_results.items():
all_more_results[key].extend(value)
nonlocal results
results = self._format_results(
all_candidate_params, n_splits, all_out, all_more_results
)
return results
self._run_search(evaluate_candidates)
# multimetric is determined here because in the case of a callable
# self.scoring the return type is only known after calling
first_test_score = all_out[0]["test_scores"]
self.multimetric_ = isinstance(first_test_score, dict)
# check refit_metric now for a callable scorer that is multimetric
if callable(self.scoring) and self.multimetric_:
self._check_refit_for_multimetric(first_test_score)
refit_metric = self.refit
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
self.best_index_ = self._select_best_index(
self.refit, refit_metric, results
)
if not callable(self.refit):
# With a non-custom callable, we can select the best score
# based on the best index
self.best_score_ = results[f"mean_test_{refit_metric}"][
self.best_index_
]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
# here we clone the estimator as well as the parameters, since
# sometimes the parameters themselves might be estimators, e.g.
# when we search over different estimators in a pipeline.
# ref: https://github.com/scikit-learn/scikit-learn/pull/26786
self.best_estimator_ = clone(base_estimator).set_params(
**clone(self.best_params_, safe=False)
)
refit_start_time = time.time()
if y is not None:
self.best_estimator_.fit(X, y, **routed_params.estimator.fit)
else:
self.best_estimator_.fit(X, **routed_params.estimator.fit)
refit_end_time = time.time()
self.refit_time_ = refit_end_time - refit_start_time
if hasattr(self.best_estimator_, "feature_names_in_"):
self.feature_names_in_ = self.best_estimator_.feature_names_in_
# Store the only scorer not as a dict for single metric evaluation
if isinstance(scorers, _MultimetricScorer):
self.scorer_ = scorers._scorers
else:
self.scorer_ = scorers
self.cv_results_ = results
self.n_splits_ = n_splits
return self
def _format_results(self, candidate_params, n_splits, out, more_results=None):
n_candidates = len(candidate_params)
out = _aggregate_score_dicts(out)
results = dict(more_results or {})
for key, val in results.items():
# each value is a list (as per evaluate_candidate's convention)
# we convert it to an array for consistency with the other keys
results[key] = np.asarray(val)
def _store(key_name, array, weights=None, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
# When iterated first by splits, then by parameters
# We want `array` to have `n_candidates` rows and `n_splits` cols.
array = np.array(array, dtype=np.float64).reshape(n_candidates, n_splits)
if splits:
for split_idx in range(n_splits):
# Uses closure to alter the results
results["split%d_%s" % (split_idx, key_name)] = array[:, split_idx]
array_means = np.average(array, axis=1, weights=weights)
results["mean_%s" % key_name] = array_means
if key_name.startswith(("train_", "test_")) and np.any(
~np.isfinite(array_means)
):
warnings.warn(
(
f"One or more of the {key_name.split('_')[0]} scores "
f"are non-finite: {array_means}"
),
category=UserWarning,
)
# Weighted std is not directly available in numpy
array_stds = np.sqrt(
np.average(
(array - array_means[:, np.newaxis]) ** 2, axis=1, weights=weights
)
)
results["std_%s" % key_name] = array_stds
if rank:
# When the fit/scoring fails `array_means` contains NaNs, we
# will exclude them from the ranking process and consider them
# as tied with the worst performers.
if np.isnan(array_means).all():
# All fit/scoring routines failed.
rank_result = np.ones_like(array_means, dtype=np.int32)
else:
min_array_means = np.nanmin(array_means) - 1
array_means = xpx.nan_to_num(
array_means, fill_value=min_array_means
)
rank_result = rankdata(-array_means, method="min").astype(
np.int32, copy=False
)
results["rank_%s" % key_name] = rank_result
_store("fit_time", out["fit_time"])
_store("score_time", out["score_time"])
# Store a list of param dicts at the key 'params'
for param, ma in _yield_masked_array_for_each_param(candidate_params):
results[param] = ma
results["params"] = candidate_params
test_scores_dict = _normalize_score_results(out["test_scores"])
if self.return_train_score:
train_scores_dict = _normalize_score_results(out["train_scores"])
for scorer_name in test_scores_dict:
# Computed the (weighted) mean and std for test scores alone
_store(
"test_%s" % scorer_name,
test_scores_dict[scorer_name],
splits=True,
rank=True,
weights=None,
)
if self.return_train_score:
_store(
"train_%s" % scorer_name,
train_scores_dict[scorer_name],
splits=True,
)
return results
def get_metadata_routing(self):
"""Get metadata routing of this object.
Please check :ref:`User Guide <metadata_routing>` on how the routing
mechanism works.
.. versionadded:: 1.4
Returns
-------
routing : MetadataRouter
A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating
routing information.
"""
router = MetadataRouter(owner=self)
router.add(
estimator=self.estimator,
method_mapping=MethodMapping().add(caller="fit", callee="fit"),
)
scorer, _ = self._get_scorers()
router.add(
scorer=scorer,
method_mapping=MethodMapping()
.add(caller="score", callee="score")
.add(caller="fit", callee="score"),
)
router.add(
splitter=self.cv,
method_mapping=MethodMapping().add(caller="fit", callee="split"),
)
return router
def _sk_visual_block_(self):
if hasattr(self, "best_estimator_"):
key, estimator = "best_estimator_", self.best_estimator_
else:
key, estimator = "estimator", self.estimator
return _VisualBlock(
"parallel",
[estimator],
names=[f"{key}: {estimator.__class__.__name__}"],
name_details=[str(estimator)],
)
|
BaseSearchCV
|
python
|
encode__starlette
|
starlette/formparsers.py
|
{
"start": 4157,
"end": 11086
}
|
class ____:
spool_max_size = 1024 * 1024 # 1MB
"""The maximum size of the spooled temporary file used to store file data."""
max_part_size = 1024 * 1024 # 1MB
"""The maximum size of a part in the multipart request."""
def __init__(
self,
headers: Headers,
stream: AsyncGenerator[bytes, None],
*,
max_files: int | float = 1000,
max_fields: int | float = 1000,
max_part_size: int = 1024 * 1024, # 1MB
) -> None:
assert multipart is not None, "The `python-multipart` library must be installed to use form parsing."
self.headers = headers
self.stream = stream
self.max_files = max_files
self.max_fields = max_fields
self.items: list[tuple[str, str | UploadFile]] = []
self._current_files = 0
self._current_fields = 0
self._current_partial_header_name: bytes = b""
self._current_partial_header_value: bytes = b""
self._current_part = MultipartPart()
self._charset = ""
self._file_parts_to_write: list[tuple[MultipartPart, bytes]] = []
self._file_parts_to_finish: list[MultipartPart] = []
self._files_to_close_on_error: list[SpooledTemporaryFile[bytes]] = []
self.max_part_size = max_part_size
def on_part_begin(self) -> None:
self._current_part = MultipartPart()
def on_part_data(self, data: bytes, start: int, end: int) -> None:
message_bytes = data[start:end]
if self._current_part.file is None:
if len(self._current_part.data) + len(message_bytes) > self.max_part_size:
raise MultiPartException(f"Part exceeded maximum size of {int(self.max_part_size / 1024)}KB.")
self._current_part.data.extend(message_bytes)
else:
self._file_parts_to_write.append((self._current_part, message_bytes))
def on_part_end(self) -> None:
if self._current_part.file is None:
self.items.append(
(
self._current_part.field_name,
_user_safe_decode(self._current_part.data, self._charset),
)
)
else:
self._file_parts_to_finish.append(self._current_part)
# The file can be added to the items right now even though it's not
# finished yet, because it will be finished in the `parse()` method, before
# self.items is used in the return value.
self.items.append((self._current_part.field_name, self._current_part.file))
def on_header_field(self, data: bytes, start: int, end: int) -> None:
self._current_partial_header_name += data[start:end]
def on_header_value(self, data: bytes, start: int, end: int) -> None:
self._current_partial_header_value += data[start:end]
def on_header_end(self) -> None:
field = self._current_partial_header_name.lower()
if field == b"content-disposition":
self._current_part.content_disposition = self._current_partial_header_value
self._current_part.item_headers.append((field, self._current_partial_header_value))
self._current_partial_header_name = b""
self._current_partial_header_value = b""
def on_headers_finished(self) -> None:
disposition, options = parse_options_header(self._current_part.content_disposition)
try:
self._current_part.field_name = _user_safe_decode(options[b"name"], self._charset)
except KeyError:
raise MultiPartException('The Content-Disposition header field "name" must be provided.')
if b"filename" in options:
self._current_files += 1
if self._current_files > self.max_files:
raise MultiPartException(f"Too many files. Maximum number of files is {self.max_files}.")
filename = _user_safe_decode(options[b"filename"], self._charset)
tempfile = SpooledTemporaryFile(max_size=self.spool_max_size)
self._files_to_close_on_error.append(tempfile)
self._current_part.file = UploadFile(
file=tempfile, # type: ignore[arg-type]
size=0,
filename=filename,
headers=Headers(raw=self._current_part.item_headers),
)
else:
self._current_fields += 1
if self._current_fields > self.max_fields:
raise MultiPartException(f"Too many fields. Maximum number of fields is {self.max_fields}.")
self._current_part.file = None
def on_end(self) -> None:
pass
async def parse(self) -> FormData:
# Parse the Content-Type header to get the multipart boundary.
_, params = parse_options_header(self.headers["Content-Type"])
charset = params.get(b"charset", "utf-8")
if isinstance(charset, bytes):
charset = charset.decode("latin-1")
self._charset = charset
try:
boundary = params[b"boundary"]
except KeyError:
raise MultiPartException("Missing boundary in multipart.")
# Callbacks dictionary.
callbacks: MultipartCallbacks = {
"on_part_begin": self.on_part_begin,
"on_part_data": self.on_part_data,
"on_part_end": self.on_part_end,
"on_header_field": self.on_header_field,
"on_header_value": self.on_header_value,
"on_header_end": self.on_header_end,
"on_headers_finished": self.on_headers_finished,
"on_end": self.on_end,
}
# Create the parser.
parser = multipart.MultipartParser(boundary, callbacks)
try:
# Feed the parser with data from the request.
async for chunk in self.stream:
parser.write(chunk)
# Write file data, it needs to use await with the UploadFile methods
# that call the corresponding file methods *in a threadpool*,
# otherwise, if they were called directly in the callback methods above
# (regular, non-async functions), that would block the event loop in
# the main thread.
for part, data in self._file_parts_to_write:
assert part.file # for type checkers
await part.file.write(data)
for part in self._file_parts_to_finish:
assert part.file # for type checkers
await part.file.seek(0)
self._file_parts_to_write.clear()
self._file_parts_to_finish.clear()
except MultiPartException as exc:
# Close all the files if there was an error.
for file in self._files_to_close_on_error:
file.close()
raise exc
parser.finalize()
return FormData(self.items)
|
MultiPartParser
|
python
|
eventlet__eventlet
|
eventlet/db_pool.py
|
{
"start": 10243,
"end": 10855
}
|
class ____(BaseConnectionPool):
"""A pool which gives out plain database connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
@classmethod
def connect(cls, db_module, connect_timeout, *args, **kw):
t = timeout.Timeout(connect_timeout, ConnectTimeout())
try:
return db_module.connect(*args, **kw)
finally:
t.cancel()
# default connection pool is the tpool one
ConnectionPool = TpooledConnectionPool
|
RawConnectionPool
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-k-pairs-with-smallest-sums.py
|
{
"start": 154,
"end": 1121
}
|
class ____(object):
def kSmallestPairs(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[List[int]]
"""
pairs = []
if len(nums1) > len(nums2):
tmp = self.kSmallestPairs(nums2, nums1, k)
for pair in tmp:
pairs.append([pair[1], pair[0]])
return pairs
min_heap = []
def push(i, j):
if i < len(nums1) and j < len(nums2):
heappush(min_heap, [nums1[i] + nums2[j], i, j])
push(0, 0)
while min_heap and len(pairs) < k:
_, i, j = heappop(min_heap)
pairs.append([nums1[i], nums2[j]])
push(i, j + 1)
if j == 0:
push(i + 1, 0) # at most queue min(n, m) space
return pairs
# time: O(mn * log k)
# space: O(k)
from heapq import nsmallest
from itertools import product
|
Solution
|
python
|
allegroai__clearml
|
clearml/model.py
|
{
"start": 59703,
"end": 76727
}
|
class ____(Model):
"""
Load an existing model in the system, search by model ID.
The Model will be read-only and can be used to pre initialize a network.
We can connect the model to a task as input model, then when running remotely override it with the UI.
"""
# noinspection PyProtectedMember
_EMPTY_MODEL_ID = _Model._EMPTY_MODEL_ID
_WARNING_CONNECTED_NAMES = {}
@classmethod
def import_model(
cls,
weights_url: str,
config_text: Optional[str] = None,
config_dict: Optional[dict] = None,
label_enumeration: Optional[Mapping[str, int]] = None,
name: Optional[str] = None,
project: Optional[str] = None,
tags: Optional[List[str]] = None,
comment: Optional[str] = None,
is_package: bool = False,
create_as_published: bool = False,
framework: Optional[str] = None,
) -> "InputModel":
"""
Create an InputModel object from a pre-trained model by specifying the URL of an initial weight file.
Optionally, input a configuration, label enumeration, name for the model, tags describing the model,
comment as a description of the model, indicate whether the model is a package, specify the model's
framework, and indicate whether to immediately set the model's status to ``Published``.
The model is read-only.
The **ClearML Server** (backend) may already store the model's URL. If the input model's URL is not
stored, meaning the model is new, then it is imported and ClearML stores its metadata.
If the URL is already stored, the import process stops, ClearML issues a warning message, and ClearML
reuses the model.
In your Python experiment script, after importing the model, you can connect it to the main execution
Task as an input model using :meth:`InputModel.connect` or :meth:`.Task.connect`. That initializes the
network.
.. note::
Using the **ClearML Web-App** (user interface), you can reuse imported models and switch models in
experiments.
:param str weights_url: A valid URL for the initial weights file. If the **ClearML Web-App** (backend)
already stores the metadata of a model with the same URL, that existing model is returned
and ClearML ignores all other parameters. For example:
- ``https://domain.com/file.bin``
- ``s3://bucket/file.bin``
- ``file:///home/user/file.bin``
:param str config_text: The configuration as a string. This is usually the content of a configuration
dictionary file. Specify ``config_text`` or ``config_dict``, but not both.
:type config_text: unconstrained text string
:param dict config_dict: The configuration as a dictionary. Specify ``config_text`` or ``config_dict``,
but not both.
:param dict label_enumeration: Optional label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
"background": 0,
"person": 1
}
:param str name: The name of the newly imported model. (Optional)
:param str project: The project name to add the model into. (Optional)
:param tags: The list of tags which describe the model. (Optional)
:type tags: list(str)
:param str comment: A comment / description for the model. (Optional)
:type comment: str
:param is_package: Is the imported weights file is a package (Optional)
- ``True`` - Is a package. Add a package tag to the model.
- ``False`` - Is not a package. Do not add a package tag. (Default)
:type is_package: bool
:param bool create_as_published: Set the model's status to Published (Optional)
- ``True`` - Set the status to Published.
- ``False`` - Do not set the status to Published. The status will be Draft. (Default)
:param str framework: The framework of the model. (Optional)
:type framework: str or Framework object
:return: The imported model or existing model (see above).
"""
config_text = cls._resolve_config(config_text=config_text, config_dict=config_dict)
weights_url = StorageHelper.conform_url(weights_url)
if not weights_url:
raise ValueError("Please provide a valid weights_url parameter")
# convert local to file to remote one
weights_url = CacheManager.get_remote_url(weights_url)
extra = (
{"system_tags": ["-" + cls._archived_tag]}
if Session.check_min_api_version("2.3")
else {"tags": ["-" + cls._archived_tag]}
)
# noinspection PyProtectedMember
result = _Model._get_default_session().send(
models.GetAllRequest(uri=[weights_url], only_fields=["id", "name", "created"], **extra)
)
if result.response.models:
logger = get_logger()
logger.debug('A model with uri "{}" already exists. Selecting it'.format(weights_url))
model = get_single_result(
entity="model",
query=weights_url,
results=result.response.models,
log=logger,
raise_on_error=False,
)
logger.info("Selected model id: {}".format(model.id))
return InputModel(model_id=model.id)
base_model = _Model(
upload_storage_uri=None,
cache_dir=get_cache_dir(),
)
from .task import Task
task = Task.current_task()
if task:
comment = "Imported by task id: {}".format(task.id) + ("\n" + comment if comment else "")
project_id = task.project
name = name or "Imported by {}".format(task.name or "")
# do not register the Task, because we do not want it listed after as "output model",
# the Task never actually created the Model
task_id = None
else:
project_id = None
task_id = None
if project:
project_id = get_or_create_project(
session=task.session if task else Task._get_default_session(),
project_name=project,
)
if not framework:
# noinspection PyProtectedMember
framework, file_ext = Framework._get_file_ext(framework=framework, filename=weights_url)
base_model.update(
design=config_text,
labels=label_enumeration,
name=name,
comment=comment,
tags=tags,
uri=weights_url,
framework=framework,
project_id=project_id,
task_id=task_id,
)
this_model = InputModel(model_id=base_model.id)
this_model._base_model = base_model
if is_package:
this_model._set_package_tag()
if create_as_published:
this_model.publish()
return this_model
@classmethod
def load_model(cls, weights_url: str, load_archived: bool = False) -> "InputModel":
"""
Load an already registered model based on a pre-existing model file (link must be valid). If the url to the
weights file already exists, the returned object is a Model representing the loaded Model. If no registered
model with the specified url is found, ``None`` is returned.
:param weights_url: The valid url for the weights file (string).
Examples:
.. code-block:: py
"https://domain.com/file.bin" or "s3://bucket/file.bin" or "file:///home/user/file.bin".
.. note::
If a model with the exact same URL exists, it will be used, and all other arguments will be ignored.
:param bool load_archived: Load archived models
- ``True`` - Load the registered Model, if it is archived.
- ``False`` - Ignore archive models.
:return: The InputModel object, or None if no model could be found.
"""
weights_url = StorageHelper.conform_url(weights_url)
if not weights_url:
raise ValueError("Please provide a valid weights_url parameter")
# convert local to file to remote one
weights_url = CacheManager.get_remote_url(weights_url)
if not load_archived:
# noinspection PyTypeChecker
extra = (
{"system_tags": ["-" + _Task.archived_tag]}
if Session.check_min_api_version("2.3")
else {"tags": ["-" + cls._archived_tag]}
)
else:
extra = {}
# noinspection PyProtectedMember
result = _Model._get_default_session().send(
models.GetAllRequest(uri=[weights_url], only_fields=["id", "name", "created"], **extra)
)
if not result or not result.response or not result.response.models:
return None
logger = get_logger()
model = get_single_result(
entity="model",
query=weights_url,
results=result.response.models,
log=logger,
raise_on_error=False,
)
return InputModel(model_id=model.id)
@classmethod
def empty(
cls,
config_text: Optional[str] = None,
config_dict: Optional[dict] = None,
label_enumeration: Optional[Mapping[str, int]] = None,
) -> "InputModel":
"""
Create an empty model object. Later, you can assign a model to the empty model object.
:param config_text: The model configuration as a string. This is usually the content of a configuration
dictionary file. Specify ``config_text`` or ``config_dict``, but not both.
:type config_text: unconstrained text string
:param dict config_dict: The model configuration as a dictionary. Specify ``config_text`` or ``config_dict``,
but not both.
:param dict label_enumeration: The label enumeration dictionary of string (label) to integer (value) pairs.
(Optional)
For example:
.. code-block:: javascript
{
"background": 0,
"person": 1
}
:return: An empty model object.
"""
design = cls._resolve_config(config_text=config_text, config_dict=config_dict)
this_model = InputModel(model_id=cls._EMPTY_MODEL_ID)
this_model._base_model = m = _Model(
cache_dir=None,
upload_storage_uri=None,
model_id=cls._EMPTY_MODEL_ID,
)
# noinspection PyProtectedMember
m._data.design = _Model._wrap_design(design)
# noinspection PyProtectedMember
m._data.labels = label_enumeration
return this_model
def __init__(
self,
model_id: Optional[str] = None,
name: Optional[str] = None,
project: Optional[str] = None,
tags: Optional[Sequence[str]] = None,
only_published: bool = False,
) -> None:
"""
Load a model from the Model artifactory,
based on model_id (uuid) or a model name/projects/tags combination.
:param model_id: The ClearML ID (system UUID) of the input model whose metadata the **ClearML Server**
(backend) stores. If provided all other arguments are ignored
:param name: Model name to search and load
:param project: Model project name to search model in
:param tags: Model tags list to filter by
:param only_published: If True, filter out non-published (draft) models
"""
if not model_id:
found_models = self.query_models(
project_name=project,
model_name=name,
tags=tags,
only_published=only_published,
)
if not found_models:
raise ValueError(
"Could not locate model with project={} name={} tags={} published={}".format(
project, name, tags, only_published
)
)
model_id = found_models[0].id
super(InputModel, self).__init__(model_id)
@property
def id(self) -> str:
return self._base_model_id
def connect(
self,
task: "Task",
name: Optional[str] = None,
ignore_remote_overrides: bool = False,
) -> None:
"""
Connect the current model to a Task object, if the model is preexisting. Preexisting models include:
- Imported models (InputModel objects created using the :meth:`Logger.import_model` method).
- Models whose metadata is already in the ClearML platform, meaning the InputModel object is instantiated
from the ``InputModel`` class specifying the model's ClearML ID as an argument.
- Models whose origin is not ClearML that are used to create an InputModel object. For example,
models created using TensorFlow models.
When the experiment is executed remotely in a worker, the input model specified in the experiment UI/backend
is used, unless `ignore_remote_overrides` is set to True.
.. note::
The **ClearML Web-App** allows you to switch one input model for another and then enqueue the experiment
to execute in a worker.
:param object task: A Task object.
:param ignore_remote_overrides: If True, changing the model in the UI/backend will have no
effect when running remotely.
Default is False, meaning that any changes made in the UI/backend will be applied in remote execution.
:param str name: The model name to be stored on the Task
(default to filename of the model weights, without the file extension, or to `Input Model`
if that is not found)
"""
self._set_task(task)
name = name or InputModel._get_connect_name(self)
InputModel._warn_on_same_name_connect(name)
ignore_remote_overrides = task._handle_ignore_remote_overrides(
name + "/_ignore_remote_overrides_input_model_", ignore_remote_overrides
)
model_id = None
# noinspection PyProtectedMember
if running_remotely() and (task.is_main_task() or task._is_remote_main_task()) and not ignore_remote_overrides:
input_models = task.input_models_id
# noinspection PyBroadException
try:
# TODO: (temp fix) At the moment, the UI changes the key of the model hparam
# when modifying its value... There is no way to tell which model was changed
# so just take the first one in case `name` is not in `input_models`
model_id = input_models.get(name, next(iter(input_models.values())))
self._base_model_id = model_id
self._base_model = InputModel(model_id=model_id)._get_base_model()
except Exception:
model_id = None
if not model_id:
# we should set the task input model to point to us
model = self._get_base_model()
# try to store the input model id, if it is not empty
# (Empty Should not happen)
if model.id != self._EMPTY_MODEL_ID:
task.set_input_model(model_id=model.id, name=name)
# only copy the model design if the task has no design to begin with
# noinspection PyProtectedMember
if not self._task._get_model_config_text() and model.model_design:
# noinspection PyProtectedMember
task._set_model_config(config_text=model.model_design)
if not self._task.get_labels_enumeration() and model.data.labels:
task.set_model_label_enumeration(model.data.labels)
@classmethod
def _warn_on_same_name_connect(cls, name: str) -> None:
if name not in cls._WARNING_CONNECTED_NAMES:
cls._WARNING_CONNECTED_NAMES[name] = False
return
if cls._WARNING_CONNECTED_NAMES[name]:
return
get_logger().warning(
"Connecting multiple input models with the same name: `{}`. This might result in the wrong model being used when executing remotely".format(
name
)
)
cls._WARNING_CONNECTED_NAMES[name] = True
@staticmethod
def _get_connect_name(model: Optional[Any]) -> str:
default_name = "Input Model"
if model is None:
return default_name
# noinspection PyBroadException
try:
model_uri = getattr(model, "url", getattr(model, "uri", None))
return Path(model_uri).stem
except Exception:
return default_name
|
InputModel
|
python
|
pypa__installer
|
tests/test_records.py
|
{
"start": 1912,
"end": 6741
}
|
class ____:
@pytest.mark.parametrize(
"path, hash_, size, caused_by",
[
("", "", "", ["path"]),
("", "", "non-int", ["path", "size"]),
("a.py", "", "non-int", ["size"]),
# Notice that we're explicitly allowing non-compliant hash values
("a.py", "some-random-value", "non-int", ["size"]),
],
)
def test_invalid_elements(self, path, hash_, size, caused_by):
with pytest.raises(InvalidRecordEntry) as exc_info:
RecordEntry.from_elements(path, hash_, size)
assert exc_info.value.elements == (path, hash_, size)
for word in caused_by:
assert word in str(exc_info.value)
@pytest.mark.parametrize(
"path, hash_, size",
[
("a.py", "", ""),
("a.py", "", "3144"),
("a.py", "sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI", ""),
("a.py", "sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI", "3144"),
],
)
def test_valid_elements(self, path, hash_, size):
RecordEntry.from_elements(path, hash_, size)
@pytest.mark.parametrize(
("scheme", "elements", "data", "passes_validation"), SAMPLE_RECORDS
)
def test_populates_attributes_correctly(
self, scheme, elements, data, passes_validation
):
path, hash_string, size = elements
record = RecordEntry.from_elements(path, hash_string, size)
assert record.path == path
assert record.size == size
if record.hash_ is not None:
assert isinstance(record.hash_, Hash)
assert record.hash_.name == "sha256"
assert record.hash_.value == hash_string[len("sha256=") :]
@pytest.mark.parametrize(
("scheme", "elements", "data", "passes_validation"), SAMPLE_RECORDS
)
def test_validation(self, scheme, elements, data, passes_validation):
record = RecordEntry.from_elements(*elements)
assert record.validate(data) == passes_validation
@pytest.mark.parametrize(
("scheme", "elements", "data", "passes_validation"), SAMPLE_RECORDS
)
def test_validate_stream(self, scheme, elements, data, passes_validation):
record = RecordEntry.from_elements(*elements)
assert record.validate_stream(BytesIO(data)) == passes_validation
@pytest.mark.parametrize(
("scheme", "elements", "data", "passes_validation"), SAMPLE_RECORDS
)
def test_string_representation(self, scheme, elements, data, passes_validation):
record = RecordEntry.from_elements(*elements)
expected_row = tuple(
[(str(elem) if elem is not None else "") for elem in elements]
)
assert record.to_row() == expected_row
@pytest.mark.parametrize(
("scheme", "elements", "data", "passes_validation"), SAMPLE_RECORDS
)
def test_string_representation_with_prefix(
self, scheme, elements, data, passes_validation
):
record = RecordEntry.from_elements(*elements)
expected_row = tuple(
[
(str(elem) if elem is not None else "")
for elem in ("prefix/" + elements[0], elements[1], elements[2])
]
)
assert record.to_row("prefix/") == expected_row
def test_equality(self):
record = RecordEntry.from_elements(
"file.py",
"sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI",
"3144",
)
record_same = RecordEntry.from_elements(
"file.py",
"sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI",
"3144",
)
record_different_name = RecordEntry.from_elements(
"file2.py",
"sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI",
"3144",
)
record_different_hash_name = RecordEntry.from_elements(
"file.py",
"md5=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI",
"3144",
)
record_different_hash_value = RecordEntry.from_elements(
"file.py",
"sha256=qwertyuiodfdsflkgshdlkjghrefawrwerwffsdfflk29",
"3144",
)
record_different_size = RecordEntry.from_elements(
"file.py",
"sha256=AVTFPZpEKzuHr7OvQZmhaU3LvwKz06AJw8mT\\_pNh2yI",
"10",
)
assert record == record_same
assert record != "random string"
assert record != record_different_name
assert record != record_different_hash_name
assert record != record_different_hash_value
assert record != record_different_size
# Ensure equality is based on current state
record_same.hash_ = None
assert record != record_same
|
TestRecordEntry
|
python
|
weaviate__weaviate-python-client
|
weaviate/collections/classes/config.py
|
{
"start": 15941,
"end": 16173
}
|
class ____(_RerankerProvider):
module_config: Optional[Dict[str, Any]]
def _to_dict(self) -> Dict[str, Any]:
if self.module_config is None:
return {}
return self.module_config
|
_RerankerCustomConfig
|
python
|
encode__django-rest-framework
|
tests/test_generics.py
|
{
"start": 13237,
"end": 13364
}
|
class ____(serializers.ModelSerializer):
class Meta:
model = Comment
exclude = ('created',)
|
CommentSerializer
|
python
|
pytest-dev__pytest
|
testing/test_collection.py
|
{
"start": 70791,
"end": 94539
}
|
class ____:
"""Test that overlapping collection arguments (e.g. `pytest a/b a
a/c::TestIt) are handled correctly (#12083)."""
@pytest.mark.parametrize("args", [("a", "a/b"), ("a/b", "a")])
def test_parent_child(self, pytester: Pytester, args: tuple[str, ...]) -> None:
"""Test that 'pytest a a/b' and `pytest a/b a` collects all tests from 'a'."""
pytester.makepyfile(
**{
"a/test_a.py": """
def test_a1(): pass
def test_a2(): pass
""",
"a/b/test_b.py": """
def test_b1(): pass
def test_b2(): pass
""",
}
)
result = pytester.runpytest("--collect-only", *args)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Dir a>",
" <Dir b>",
" <Module test_b.py>",
" <Function test_b1>",
" <Function test_b2>",
" <Module test_a.py>",
" <Function test_a1>",
" <Function test_a2>",
"",
],
consecutive=True,
)
def test_multiple_nested_paths(self, pytester: Pytester) -> None:
"""Test that 'pytest a/b a a/b/c' collects all tests from 'a'."""
pytester.makepyfile(
**{
"a/test_a.py": """
def test_a(): pass
""",
"a/b/test_b.py": """
def test_b(): pass
""",
"a/b/c/test_c.py": """
def test_c(): pass
""",
}
)
result = pytester.runpytest("--collect-only", "a/b", "a", "a/b/c")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Dir a>",
" <Dir b>",
" <Dir c>",
" <Module test_c.py>",
" <Function test_c>",
" <Module test_b.py>",
" <Function test_b>",
" <Module test_a.py>",
" <Function test_a>",
"",
],
consecutive=True,
)
def test_same_path_twice(self, pytester: Pytester) -> None:
"""Test that 'pytest a a' doesn't duplicate tests."""
pytester.makepyfile(
**{
"a/test_a.py": """
def test_a(): pass
""",
}
)
result = pytester.runpytest("--collect-only", "a", "a")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Dir a>",
" <Module test_a.py>",
" <Function test_a>",
"",
],
consecutive=True,
)
def test_keep_duplicates_flag(self, pytester: Pytester) -> None:
"""Test that --keep-duplicates allows duplication."""
pytester.makepyfile(
**{
"a/test_a.py": """
def test_a(): pass
""",
"a/b/test_b.py": """
def test_b(): pass
""",
}
)
result = pytester.runpytest("--collect-only", "--keep-duplicates", "a", "a/b")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Dir a>",
" <Dir b>",
" <Module test_b.py>",
" <Function test_b>",
" <Module test_a.py>",
" <Function test_a>",
" <Dir b>",
" <Module test_b.py>",
" <Function test_b>",
"",
],
consecutive=True,
)
def test_specific_file_then_parent_dir(self, pytester: Pytester) -> None:
"""Test that 'pytest a/test_a.py a' collects all tests from 'a'."""
pytester.makepyfile(
**{
"a/test_a.py": """
def test_a(): pass
""",
"a/test_other.py": """
def test_other(): pass
""",
}
)
result = pytester.runpytest("--collect-only", "a/test_a.py", "a")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Dir a>",
" <Module test_a.py>",
" <Function test_a>",
" <Module test_other.py>",
" <Function test_other>",
"",
],
consecutive=True,
)
def test_package_scope_fixture_with_overlapping_paths(
self, pytester: Pytester
) -> None:
"""Test that package-scoped fixtures work correctly with overlapping paths."""
pytester.makepyfile(
**{
"pkg/__init__.py": "",
"pkg/test_pkg.py": """
import pytest
counter = {"value": 0}
@pytest.fixture(scope="package")
def pkg_fixture():
counter["value"] += 1
return counter["value"]
def test_pkg1(pkg_fixture):
assert pkg_fixture == 1
def test_pkg2(pkg_fixture):
assert pkg_fixture == 1
""",
"pkg/sub/__init__.py": "",
"pkg/sub/test_sub.py": """
def test_sub(): pass
""",
}
)
# Package fixture should run only once even with overlapping paths.
result = pytester.runpytest("pkg", "pkg/sub", "pkg", "-v")
result.assert_outcomes(passed=3)
def test_execution_order_preserved(self, pytester: Pytester) -> None:
"""Test that test execution order follows argument order."""
pytester.makepyfile(
**{
"a/test_a.py": """
def test_a(): pass
""",
"b/test_b.py": """
def test_b(): pass
""",
}
)
result = pytester.runpytest("--collect-only", "b", "a", "b/test_b.py::test_b")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Dir b>",
" <Module test_b.py>",
" <Function test_b>",
" <Dir a>",
" <Module test_a.py>",
" <Function test_a>",
"",
],
consecutive=True,
)
def test_overlapping_node_ids_class_and_method(self, pytester: Pytester) -> None:
"""Test that overlapping node IDs are handled correctly."""
pytester.makepyfile(
test_nodeids="""
class TestClass:
def test_method1(self): pass
def test_method2(self): pass
def test_method3(self): pass
def test_function(): pass
"""
)
# Class then specific method.
result = pytester.runpytest(
"--collect-only",
"test_nodeids.py::TestClass",
"test_nodeids.py::TestClass::test_method2",
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Module test_nodeids.py>",
" <Class TestClass>",
" <Function test_method1>",
" <Function test_method2>",
" <Function test_method3>",
"",
],
consecutive=True,
)
# Specific method then class.
result = pytester.runpytest(
"--collect-only",
"test_nodeids.py::TestClass::test_method3",
"test_nodeids.py::TestClass",
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Module test_nodeids.py>",
" <Class TestClass>",
" <Function test_method1>",
" <Function test_method2>",
" <Function test_method3>",
"",
],
consecutive=True,
)
def test_overlapping_node_ids_file_and_class(self, pytester: Pytester) -> None:
"""Test that file-level and class-level selections work correctly."""
pytester.makepyfile(
test_file="""
class TestClass:
def test_method(self): pass
class TestOther:
def test_other(self): pass
def test_function(): pass
"""
)
# File then class.
result = pytester.runpytest(
"--collect-only", "test_file.py", "test_file.py::TestClass"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Module test_file.py>",
" <Class TestClass>",
" <Function test_method>",
" <Class TestOther>",
" <Function test_other>",
" <Function test_function>",
"",
],
consecutive=True,
)
# Class then file.
result = pytester.runpytest(
"--collect-only", "test_file.py::TestClass", "test_file.py"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Module test_file.py>",
" <Class TestClass>",
" <Function test_method>",
" <Class TestOther>",
" <Function test_other>",
" <Function test_function>",
"",
],
consecutive=True,
)
def test_same_node_id_twice(self, pytester: Pytester) -> None:
"""Test that the same node ID specified twice is collected only once."""
pytester.makepyfile(
test_dup="""
def test_one(): pass
def test_two(): pass
"""
)
result = pytester.runpytest(
"--collect-only",
"test_dup.py::test_one",
"test_dup.py::test_one",
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Module test_dup.py>",
" <Function test_one>",
"",
],
consecutive=True,
)
def test_overlapping_with_parametrization(self, pytester: Pytester) -> None:
"""Test overlapping with parametrized tests."""
pytester.makepyfile(
test_param="""
import pytest
@pytest.mark.parametrize("n", [1, 2])
def test_param(n): pass
class TestClass:
@pytest.mark.parametrize("x", ["a", "b"])
def test_method(self, x): pass
"""
)
result = pytester.runpytest(
"--collect-only",
"test_param.py::test_param[2]",
"test_param.py::TestClass::test_method[a]",
"test_param.py",
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Module test_param.py>",
" <Function test_param[1]>",
" <Function test_param[2]>",
" <Class TestClass>",
" <Function test_method[a]>",
" <Function test_method[b]>",
"",
],
consecutive=True,
)
result = pytester.runpytest(
"--collect-only",
"test_param.py::test_param[2]",
"test_param.py::test_param",
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Module test_param.py>",
" <Function test_param[1]>",
" <Function test_param[2]>",
"",
],
consecutive=True,
)
@pytest.mark.parametrize("order", [(".", "a"), ("a", ".")])
def test_root_and_subdir(self, pytester: Pytester, order: tuple[str, ...]) -> None:
"""Test that '. a' and 'a .' both collect all tests."""
pytester.makepyfile(
test_root="""
def test_root(): pass
""",
**{
"a/test_a.py": """
def test_a(): pass
""",
},
)
result = pytester.runpytest("--collect-only", *order)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Dir a>",
" <Module test_a.py>",
" <Function test_a>",
" <Module test_root.py>",
" <Function test_root>",
"",
],
consecutive=True,
)
def test_complex_combined_handling(self, pytester: Pytester) -> None:
"""Test some scenarios in a complex hierarchy."""
pytester.makepyfile(
**{
"top1/__init__.py": "",
"top1/test_1.py": (
"""
def test_1(): pass
class TestIt:
def test_2(): pass
def test_3(): pass
"""
),
"top1/test_2.py": (
"""
def test_1(): pass
"""
),
"top2/__init__.py": "",
"top2/test_1.py": (
"""
def test_1(): pass
"""
),
},
)
result = pytester.runpytest_inprocess("--collect-only", ".")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
" <Module test_1.py>",
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
" <Module test_2.py>",
" <Function test_1>",
" <Package top2>",
" <Module test_1.py>",
" <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess("--collect-only", "top2", "top1")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top2>",
" <Module test_1.py>",
" <Function test_1>",
" <Package top1>",
" <Module test_1.py>",
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
" <Module test_2.py>",
" <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only", "top1", "top1/test_2.py"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
" <Module test_1.py>",
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
" <Module test_2.py>",
" <Function test_1>",
# NOTE: Also sensible arguably even without --keep-duplicates.
# " <Module test_2.py>",
# " <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only", "top1/test_2.py", "top1"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
# NOTE: Ideally test_2 would come before test_1 here.
" <Module test_1.py>",
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
" <Module test_2.py>",
" <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only", "--keep-duplicates", "top1/test_2.py", "top1"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
" <Module test_2.py>",
" <Function test_1>",
" <Module test_1.py>",
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
" <Module test_2.py>",
" <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only", "top1/test_2.py", "top1/test_2.py"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
" <Module test_2.py>",
" <Function test_1>",
# NOTE: Also sensible arguably even without --keep-duplicates.
# " <Module test_2.py>",
# " <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess("--collect-only", "top2/", "top2/")
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top2>",
" <Module test_1.py>",
" <Function test_1>",
# NOTE: Also sensible arguably even without --keep-duplicates.
# " <Package top2>",
# " <Module test_1.py>",
# " <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only", "top2/", "top2/", "top2/test_1.py"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top2>",
" <Module test_1.py>",
" <Function test_1>",
# NOTE: Also sensible arguably even without --keep-duplicates.
# " <Package top2>",
# " <Module test_1.py>",
# " <Function test_1>",
# " <Module test_1.py>",
# " <Function test_1>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only", "top1/test_1.py", "top1/test_1.py::test_3"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
" <Module test_1.py>",
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
# NOTE: Also sensible arguably even without --keep-duplicates.
# " <Function test_3>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only", "top1/test_1.py::test_3", "top1/test_1.py"
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
" <Module test_1.py>",
# NOTE: Ideally test_3 would come before the others here.
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
"",
],
consecutive=True,
)
result = pytester.runpytest_inprocess(
"--collect-only",
"--keep-duplicates",
"top1/test_1.py::test_3",
"top1/test_1.py",
)
result.stdout.fnmatch_lines(
[
"<Dir *>",
" <Package top1>",
# NOTE: That <Module test_1.py> is duplicated here is not great.
" <Module test_1.py>",
" <Function test_3>",
" <Module test_1.py>",
" <Function test_1>",
" <Class TestIt>",
" <Function test_2>",
" <Function test_3>",
"",
],
consecutive=True,
)
@pytest.mark.parametrize(
["x_y", "expected_duplicates"],
[
(
[(1, 1), (1, 1)],
["1-1"],
),
(
[(1, 1), (1, 2), (1, 1)],
["1-1"],
),
(
[(1, 1), (2, 2), (1, 1)],
["1-1"],
),
(
[(1, 1), (2, 2), (1, 2), (2, 1), (1, 1), (2, 1)],
["1-1", "2-1"],
),
],
)
@pytest.mark.parametrize("option_name", ["strict_parametrization_ids", "strict"])
def test_strict_parametrization_ids(
pytester: Pytester,
x_y: Sequence[tuple[int, int]],
expected_duplicates: Sequence[str],
option_name: str,
) -> None:
pytester.makeini(
f"""
[pytest]
{option_name} = true
"""
)
pytester.makepyfile(
f"""
import pytest
@pytest.mark.parametrize(["x", "y"], {x_y})
def test1(x, y):
pass
"""
)
result = pytester.runpytest()
assert result.ret == ExitCode.INTERRUPTED
expected_parametersets = ", ".join(str(list(p)) for p in x_y)
expected_ids = ", ".join(f"{x}-{y}" for x, y in x_y)
result.stdout.fnmatch_lines(
[
"Duplicate parametrization IDs detected*",
"",
"Test name: *::test1",
"Parameters: x, y",
f"Parameter sets: {expected_parametersets}",
f"IDs: {expected_ids}",
f"Duplicates: {', '.join(expected_duplicates)}",
"",
"You can fix this problem using *",
]
)
def test_strict_parametrization_ids_with_hidden_param(pytester: Pytester) -> None:
pytester.makeini(
"""
[pytest]
strict_parametrization_ids = true
"""
)
pytester.makepyfile(
"""
import pytest
@pytest.mark.parametrize(["x"], ["a", pytest.param("a", id=pytest.HIDDEN_PARAM), "a"])
def test1(x):
pass
"""
)
result = pytester.runpytest()
assert result.ret == ExitCode.INTERRUPTED
result.stdout.fnmatch_lines(
[
"Duplicate parametrization IDs detected*",
"IDs: a, <hidden>, a",
"Duplicates: a",
]
)
|
TestOverlappingCollectionArguments
|
python
|
google__jax
|
tests/mutable_array_test.py
|
{
"start": 28725,
"end": 35419
}
|
class ____(jtu.JaxTestCase):
def test_return_from_jit(self):
with self.assertRaisesRegex(
ValueError,
r"traced for jit returned a mutable array reference.*\n\n"
r".*was created on line"):
jax.jit(core.new_ref)(jnp.arange(3))
def test_return_from_jit_arg(self):
with self.assertRaisesRegex(
ValueError,
r"traced for jit returned a mutable array reference.*\n\n"
r".*was passed in as the argument x_ref"):
jax.jit(lambda x_ref: x_ref)(core.new_ref(jnp.arange(3)))
def test_return_from_jit_pytree(self):
with self.assertRaisesRegex(
ValueError,
r"tree path result\['hi'\]"):
jax.jit(lambda x_ref: {'hi': x_ref})(core.new_ref(jnp.arange(3)))
def test_return_from_jit_closure(self):
with self.assertRaisesRegex(
ValueError,
r"tree path result\['hi'\]"):
x_ref = core.new_ref(jnp.arange(3))
jax.jit(lambda: {'hi': x_ref})()
def test_argument_aliases_jit(self):
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, "appeared at both x_ref and y_ref"):
jax.jit(lambda x_ref, y_ref: x_ref[...] + y_ref[...])(x_ref, x_ref)
def test_closure_and_argument_aliases_jit(self):
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, "closed over and passed as the argument y_ref"):
jax.jit(lambda y_ref: x_ref[...] + y_ref[...])(x_ref)
def test_return_from_scan(self):
with self.assertRaisesRegex(
ValueError, "traced for scan returned a mutable array reference of type"):
jax.lax.scan(lambda c, x: (core.new_ref(c), x), 0, jnp.arange(3))
def test_argument_aliases_scan(self):
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, r"appeared at both c\[0\] and c\[1\]"):
jax.lax.scan(lambda c, _: (None, None), (x_ref, x_ref), None, length=1)
def test_closure_and_argument_aliases_scan(self):
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, r"closed over and passed as the argument y_ref"):
jax.lax.scan(lambda y_ref, _: (x_ref[...] + y_ref[...], None), x_ref,
None, length=1)
def test_return_from_cond(self):
with self.assertRaisesRegex(
ValueError, "traced for cond returned a mutable array reference of type"):
jax.lax.cond(True, lambda: core.new_ref(1.0), lambda: core.new_ref(2.0))
def test_argument_aliases_cond(self):
x_ref = core.new_ref(0.)
with self.assertRaisesRegex( ValueError, r"for cond.*at both x1 and x2"):
jax.lax.cond(True, lambda x1, x2: ..., lambda x1, x2: ..., x_ref, x_ref)
def test_closure_and_argument_aliases_cond(self):
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, r"closed over and passed as the argument y_ref"):
jax.lax.cond(True,
lambda y_ref: x_ref[...] + y_ref[...],
lambda y_ref: x_ref[...] + y_ref[...],
x_ref)
@parameterized.parameters([False, True])
def test_return_from_custom_vjp_primal(self, jit):
@jax.custom_vjp
def f(ref):
return ref
f.defvjp(lambda ref: ..., lambda *_: ...)
if jit:
f = jax.jit(f)
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, "custom_vjp primal function"):
f(x_ref)
@parameterized.parameters([False, True])
def test_return_from_custom_vjp_primal_nondiff_argnum(self, jit):
@partial(jax.custom_vjp, nondiff_argnums=(0,))
def f(_, ref):
return ref
f.defvjp(lambda _, ref: ..., lambda *_: ...)
if jit:
f = jax.jit(f, static_argnums=0)
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, "custom_vjp primal function"):
f('hi', x_ref)
@parameterized.parameters([False, True])
def test_return_from_custom_vjp_fwd(self, jit):
@jax.custom_vjp
def f(x, ref):
return x
f.defvjp(lambda x, ref: (x, ref), lambda ref, g: g)
if jit:
f = jax.jit(f)
x_ref = core.new_ref(0.)
jax.vjp(f, 3., x_ref) # returning input ref, okay
@jax.custom_vjp
def g(x, ref):
return x
def g_fwd(x, _):
y_ref = core.new_ref(0)
return x, y_ref
g.defvjp(g_fwd, lambda ref, g: g)
if jit:
g = jax.jit(g)
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(
ValueError, "custom_vjp fwd function"):
jax.vjp(g, 3., x_ref)
@parameterized.parameters([False, True])
def test_argument_aliases_custom_vjp_primal(self, jit):
@jax.custom_vjp
def f(x_ref, y_ref):
...
f.defvjp(lambda x_ref, y_ref: (None, None), lambda _, g: (None, None))
if jit:
f = jax.jit(f)
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(ValueError, "x_ref and y_ref"):
f(x_ref, x_ref)
@parameterized.parameters([False, True])
def test_argument_aliases_custom_vjp_fwd(self, jit):
@jax.custom_vjp
def f(x_ref, y_ref):
...
f.defvjp(lambda x_ref, y_ref: (None, None), lambda _, g: (None, None))
if jit:
f = jax.jit(f)
x_ref = core.new_ref(0.)
with self.assertRaisesRegex(ValueError, "x_ref and y_ref"):
jax.vjp(f, x_ref, x_ref)
# TODO(mattjj): add test test_closure_and_argument_aliases_custom_vjp
@parameterized.parameters([False, True])
def test_cond_both_branches_close_over_same_mutable_array(self, jit):
# see also test_cond_with_ref_reuse in state_test.py
x_ref = core.new_ref(0.)
def f(pred):
def true_fun():
x_ref[()] = 1.
def false_fun():
x_ref[()] = 2.
jax.lax.cond(pred, true_fun, false_fun)
if jit:
f = jax.jit(f)
out_true = f(True)
self.assertAllClose(x_ref[...], 1.)
out_false = f(False)
self.assertAllClose(x_ref[...], 2.)
def test_vmap_closed_over_ref_write(self):
x_ref = core.new_ref(jnp.zeros((), 'int32'))
def f(val):
x_ref[...] += val
vals = jnp.arange(3, dtype='int32')
with self.assertRaisesRegex(Exception, "unbatched array reference"):
jax.vmap(f)(vals)
def test_vmap_aliased_arguments(self):
def f(ref_1, ref_2):
pass
x_ref = core.new_ref(jnp.zeros((3, 3)))
with self.assertRaisesRegex(
ValueError,
"only one reference to a mutable array may be passed as an argument"):
jax.vmap(f)(x_ref, x_ref)
def test_jvp_closed_over_ref_error(self):
ref = core.new_ref(0.)
def f(x):
ref[...] = x
return x
with self.assertRaisesRegex(
Exception, "Move the array reference"):
jax.jvp(f, [1.], [1.])
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
|
MutableArrayErrorsTest
|
python
|
getsentry__sentry
|
src/sentry/core/endpoints/scim/members.py
|
{
"start": 3996,
"end": 5671
}
|
class ____(serializers.Serializer):
# we don't actually use "schemas" for anything atm but its part of the spec
schemas = serializers.ListField(child=serializers.CharField(), required=False)
Operations = serializers.ListField(
child=SCIMPatchOperationSerializer(),
required=True,
source="operations",
max_length=100,
help_text="""A list of operations to perform. Currently, the only valid operation is setting
a member's `active` attribute to false, after which the member will be permanently deleted.
```json
{
"Operations": [{
"op": "replace",
"path": "active",
"value": False
}]
}
```
""",
)
def _scim_member_serializer_with_expansion(organization):
"""
For our Azure SCIM integration, we don't want to return the `active`
flag since we don't support soft deletes. Other integrations don't
care about this and rely on the behavior of setting "active" to false
to delete a member.
"""
auth_provider = auth_service.get_auth_provider(organization_id=organization.id)
expand = ["active"]
if auth_provider and auth_provider.provider == ACTIVE_DIRECTORY_PROVIDER_NAME:
expand = []
return OrganizationMemberSCIMSerializer(expand=expand)
def resolve_maybe_bool_value(value):
if isinstance(value, str):
value = value.lower()
# Some IdP vendors such as Azure send boolean values as actual strings.
if value == "true":
return True
elif value == "false":
return False
if isinstance(value, bool):
return value
return None
@region_silo_endpoint
|
SCIMPatchRequestSerializer
|
python
|
run-llama__llama_index
|
llama-index-packs/llama-index-packs-nebulagraph-query-engine/llama_index/packs/nebulagraph_query_engine/base.py
|
{
"start": 910,
"end": 6068
}
|
class ____(BaseLlamaPack):
"""NebulaGraph Query Engine pack."""
def __init__(
self,
username: str,
password: str,
ip_and_port: str,
space_name: str,
edge_types: str,
rel_prop_names: str,
tags: str,
max_triplets_per_chunk: int,
docs: List[Document],
query_engine_type: Optional[NebulaGraphQueryEngineType] = None,
**kwargs: Any,
) -> None:
"""Init params."""
os.environ["GRAPHD_HOST"] = "127.0.0.1"
os.environ["NEBULA_USER"] = username
os.environ["NEBULA_PASSWORD"] = password
os.environ["NEBULA_ADDRESS"] = (
ip_and_port # such as "127.0.0.1:9669" for local instance
)
nebulagraph_graph_store = NebulaGraphStore(
space_name=space_name,
edge_types=edge_types,
rel_prop_names=rel_prop_names,
tags=tags,
)
nebulagraph_storage_context = StorageContext.from_defaults(
graph_store=nebulagraph_graph_store
)
# define LLM
self.llm = OpenAI(temperature=0.1, model="gpt-3.5-turbo")
Settings.llm = self.llm
nebulagraph_index = KnowledgeGraphIndex.from_documents(
documents=docs,
storage_context=nebulagraph_storage_context,
max_triplets_per_chunk=max_triplets_per_chunk,
space_name=space_name,
edge_types=edge_types,
rel_prop_names=rel_prop_names,
tags=tags,
include_embeddings=True,
)
# create index
vector_index = VectorStoreIndex.from_documents(docs)
if query_engine_type == NebulaGraphQueryEngineType.KG_KEYWORD:
# KG keyword-based entity retrieval
self.query_engine = nebulagraph_index.as_query_engine(
# setting to false uses the raw triplets instead of adding the text from the corresponding nodes
include_text=False,
retriever_mode="keyword",
response_mode="tree_summarize",
)
elif query_engine_type == NebulaGraphQueryEngineType.KG_HYBRID:
# KG hybrid entity retrieval
self.query_engine = nebulagraph_index.as_query_engine(
include_text=True,
response_mode="tree_summarize",
embedding_mode="hybrid",
similarity_top_k=3,
explore_global_knowledge=True,
)
elif query_engine_type == NebulaGraphQueryEngineType.RAW_VECTOR:
# Raw vector index retrieval
self.query_engine = vector_index.as_query_engine()
elif query_engine_type == NebulaGraphQueryEngineType.RAW_VECTOR_KG_COMBO:
from llama_index.core.query_engine import RetrieverQueryEngine
# create custom retriever
nebulagraph_vector_retriever = VectorIndexRetriever(index=vector_index)
nebulagraph_kg_retriever = KGTableRetriever(
index=nebulagraph_index, retriever_mode="keyword", include_text=False
)
nebulagraph_custom_retriever = CustomRetriever(
nebulagraph_vector_retriever, nebulagraph_kg_retriever
)
# create response synthesizer
nebulagraph_response_synthesizer = get_response_synthesizer(
response_mode="tree_summarize"
)
# Custom combo query engine
self.query_engine = RetrieverQueryEngine(
retriever=nebulagraph_custom_retriever,
response_synthesizer=nebulagraph_response_synthesizer,
)
elif query_engine_type == NebulaGraphQueryEngineType.KG_QE:
# using KnowledgeGraphQueryEngine
from llama_index.core.query_engine import KnowledgeGraphQueryEngine
self.query_engine = KnowledgeGraphQueryEngine(
storage_context=nebulagraph_storage_context,
llm=self.llm,
verbose=True,
)
elif query_engine_type == NebulaGraphQueryEngineType.KG_RAG_RETRIEVER:
# using KnowledgeGraphRAGRetriever
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core.retrievers import KnowledgeGraphRAGRetriever
nebulagraph_graph_rag_retriever = KnowledgeGraphRAGRetriever(
storage_context=nebulagraph_storage_context,
llm=self.llm,
verbose=True,
)
self.query_engine = RetrieverQueryEngine.from_args(
nebulagraph_graph_rag_retriever
)
else:
# KG vector-based entity retrieval
self.query_engine = nebulagraph_index.as_query_engine()
def get_modules(self) -> Dict[str, Any]:
"""Get modules."""
return {
"llm": self.llm,
"query_engine": self.query_engine,
}
def run(self, *args: Any, **kwargs: Any) -> Any:
"""Run the pipeline."""
return self.query_engine.query(*args, **kwargs)
|
NebulaGraphQueryEnginePack
|
python
|
automl__auto-sklearn
|
autosklearn/metalearning/metafeatures/metafeatures.py
|
{
"start": 17717,
"end": 18129
}
|
class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
# TODO: categorical attributes without a symbol don't count towards this
# measure
values = [val for val in helper_functions.get_value("NumSymbols") if val > 0]
mean = np.nanmean(values)
return mean if np.isfinite(mean) else 0
@metafeatures.define("SymbolsSTD", dependency="NumSymbols")
|
SymbolsMean
|
python
|
walkccc__LeetCode
|
solutions/1770. Maximum Score from Performing Multiplication Operations/1770.py
|
{
"start": 0,
"end": 639
}
|
class ____:
def maximumScore(self, nums: list[int], multipliers: list[int]) -> int:
@functools.lru_cache(2000)
def dp(s: int, i: int) -> int:
"""Returns the maximum score of nums[s..e] and multipliers[i]."""
if i == len(multipliers):
return 0
# The number of nums picked on the start side is s.
# The number of nums picked on the end side is i - s.
# So, e = n - (i - s) - 1.
e = len(nums) - (i - s) - 1
pickStart = nums[s] * multipliers[i] + dp(s + 1, i + 1)
pickEnd = nums[e] * multipliers[i] + dp(s, i + 1)
return max(pickStart, pickEnd)
return dp(0, 0)
|
Solution
|
python
|
python-openxml__python-docx
|
src/docx/parts/story.py
|
{
"start": 498,
"end": 3857
}
|
class ____(XmlPart):
"""Base class for story parts.
A story part is one that can contain textual content, such as the document-part and
header or footer parts. These all share content behaviors like `.paragraphs`,
`.add_paragraph()`, `.add_table()` etc.
"""
def get_or_add_image(self, image_descriptor: str | IO[bytes]) -> Tuple[str, Image]:
"""Return (rId, image) pair for image identified by `image_descriptor`.
`rId` is the str key (often like "rId7") for the relationship between this story
part and the image part, reused if already present, newly created if not.
`image` is an |Image| instance providing access to the properties of the image,
such as dimensions and image type.
"""
package = self._package
assert package is not None
image_part = package.get_or_add_image_part(image_descriptor)
rId = self.relate_to(image_part, RT.IMAGE)
return rId, image_part.image
def get_style(self, style_id: str | None, style_type: WD_STYLE_TYPE) -> BaseStyle:
"""Return the style in this document matching `style_id`.
Returns the default style for `style_type` if `style_id` is |None| or does not
match a defined style of `style_type`.
"""
return self._document_part.get_style(style_id, style_type)
def get_style_id(
self, style_or_name: BaseStyle | str | None, style_type: WD_STYLE_TYPE
) -> str | None:
"""Return str style_id for `style_or_name` of `style_type`.
Returns |None| if the style resolves to the default style for `style_type` or if
`style_or_name` is itself |None|. Raises if `style_or_name` is a style of the
wrong type or names a style not present in the document.
"""
return self._document_part.get_style_id(style_or_name, style_type)
def new_pic_inline(
self,
image_descriptor: str | IO[bytes],
width: int | Length | None = None,
height: int | Length | None = None,
) -> CT_Inline:
"""Return a newly-created `w:inline` element.
The element contains the image specified by `image_descriptor` and is scaled
based on the values of `width` and `height`.
"""
rId, image = self.get_or_add_image(image_descriptor)
cx, cy = image.scaled_dimensions(width, height)
shape_id, filename = self.next_id, image.filename
return CT_Inline.new_pic_inline(shape_id, rId, filename, cx, cy)
@property
def next_id(self) -> int:
"""Next available positive integer id value in this story XML document.
The value is determined by incrementing the maximum existing id value. Gaps in
the existing id sequence are not filled. The id attribute value is unique in the
document, without regard to the element type it appears on.
"""
id_str_lst = self._element.xpath("//@id")
used_ids = [int(id_str) for id_str in id_str_lst if id_str.isdigit()]
if not used_ids:
return 1
return max(used_ids) + 1
@lazyproperty
def _document_part(self) -> DocumentPart:
"""|DocumentPart| object for this package."""
package = self.package
assert package is not None
return cast("DocumentPart", package.main_document_part)
|
StoryPart
|
python
|
jina-ai__jina
|
tests/integration/override_config_params/container/executor.py
|
{
"start": 38,
"end": 613
}
|
class ____(Executor):
def __init__(self, param1, param2, param3, *args, **kwargs):
super().__init__(*args, **kwargs)
self.param1 = param1
self.param2 = param2
self.param3 = param3
@requests(on='/search')
def encode(self, docs, **kwargs):
for doc in docs:
doc.tags['param1'] = self.param1
doc.tags['param2'] = self.param2
doc.tags['param3'] = self.param3
doc.tags['workspace'] = getattr(self.metas, 'workspace')
doc.tags['name'] = getattr(self.metas, 'name')
|
Override
|
python
|
pytest-dev__pytest
|
testing/test_pytester.py
|
{
"start": 5993,
"end": 9391
}
|
class ____:
def test_inline_run_test_module_not_cleaned_up(self, pytester: Pytester) -> None:
test_mod = pytester.makepyfile("def test_foo(): assert True")
result = pytester.inline_run(str(test_mod))
assert result.ret == ExitCode.OK
# rewrite module, now test should fail if module was re-imported
test_mod.write_text("def test_foo(): assert False", encoding="utf-8")
result2 = pytester.inline_run(str(test_mod))
assert result2.ret == ExitCode.TESTS_FAILED
def spy_factory(self):
class SysModulesSnapshotSpy:
instances: list[SysModulesSnapshotSpy] = []
def __init__(self, preserve=None) -> None:
SysModulesSnapshotSpy.instances.append(self)
self._spy_restore_count = 0
self._spy_preserve = preserve
self.__snapshot = SysModulesSnapshot(preserve=preserve)
def restore(self):
self._spy_restore_count += 1
return self.__snapshot.restore()
return SysModulesSnapshotSpy
def test_inline_run_taking_and_restoring_a_sys_modules_snapshot(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
spy_factory = self.spy_factory()
monkeypatch.setattr(pytester_mod, "SysModulesSnapshot", spy_factory)
pytester.syspathinsert()
original = dict(sys.modules)
pytester.makepyfile(import1="# you son of a silly person")
pytester.makepyfile(import2="# my hovercraft is full of eels")
test_mod = pytester.makepyfile(
"""
import import1
def test_foo(): import import2"""
)
pytester.inline_run(str(test_mod))
assert len(spy_factory.instances) == 1
spy = spy_factory.instances[0]
assert spy._spy_restore_count == 1
assert sys.modules == original
assert all(sys.modules[x] is original[x] for x in sys.modules)
def test_inline_run_sys_modules_snapshot_restore_preserving_modules(
self, pytester: Pytester, monkeypatch: MonkeyPatch
) -> None:
spy_factory = self.spy_factory()
monkeypatch.setattr(pytester_mod, "SysModulesSnapshot", spy_factory)
test_mod = pytester.makepyfile("def test_foo(): pass")
pytester.inline_run(str(test_mod))
spy = spy_factory.instances[0]
assert not spy._spy_preserve("black_knight")
assert spy._spy_preserve("zope")
assert spy._spy_preserve("zope.interface")
assert spy._spy_preserve("zopelicious")
def test_external_test_module_imports_not_cleaned_up(
self, pytester: Pytester
) -> None:
pytester.syspathinsert()
pytester.makepyfile(imported="data = 'you son of a silly person'")
import imported
test_mod = pytester.makepyfile(
"""
def test_foo():
import imported
imported.data = 42"""
)
pytester.inline_run(str(test_mod))
assert imported.data == 42
def test_assert_outcomes_after_pytest_error(pytester: Pytester) -> None:
pytester.makepyfile("def test_foo(): assert True")
result = pytester.runpytest("--unexpected-argument")
with pytest.raises(ValueError, match="Pytest terminal summary report not found"):
result.assert_outcomes(passed=0)
|
TestInlineRunModulesCleanup
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql_tests/client_tests/test_shutdown_repository_location.py
|
{
"start": 1236,
"end": 2377
}
|
class ____(BaseTestSuite):
def test_shutdown_repository_location(self, graphql_client, graphql_context):
origin = next(iter(graphql_context.get_code_location_entries().values())).origin
origin.create_client().heartbeat()
result = graphql_client.shutdown_repository_location(main_repo_location_name())
assert result.status == ShutdownRepositoryLocationStatus.SUCCESS, result.message
# Wait for client to be unavailable
start_time = time.time()
while time.time() - start_time < 15:
try:
origin.create_client().heartbeat()
except DagsterUserCodeUnreachableError:
# Shutdown succeeded
return
time.sleep(1)
raise Exception("Timed out waiting for shutdown to take effect")
def test_shutdown_repository_location_not_found(self, graphql_client):
result = graphql_client.shutdown_repository_location("not_real")
assert result.status == ShutdownRepositoryLocationStatus.FAILURE
assert "Location not_real does not exist" in result.message
|
TestShutdownRepositoryLocation
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_stateful.py
|
{
"start": 10934,
"end": 31964
}
|
class ____(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.counter = 0
@rule()
def increment(self):
self.counter += 1
assert self.counter < 10
FailsEventually.TestCase.settings = Settings(stateful_step_count=5)
@skipif_threading
def test_can_explicitly_pass_settings():
run_state_machine_as_test(FailsEventually)
try:
FailsEventually.TestCase.settings = Settings(
FailsEventually.TestCase.settings, stateful_step_count=15
)
run_state_machine_as_test(
FailsEventually, settings=Settings(stateful_step_count=2)
)
finally:
FailsEventually.TestCase.settings = Settings(
FailsEventually.TestCase.settings, stateful_step_count=5
)
def test_settings_argument_is_validated():
with pytest.raises(InvalidArgument):
run_state_machine_as_test(FailsEventually, settings=object())
def test_runner_that_checks_factory_produced_a_machine():
with pytest.raises(InvalidArgument):
run_state_machine_as_test(object)
@skipif_threading
def test_settings_attribute_is_validated():
real_settings = FailsEventually.TestCase.settings
try:
FailsEventually.TestCase.settings = object()
with pytest.raises(InvalidArgument):
run_state_machine_as_test(FailsEventually)
finally:
FailsEventually.TestCase.settings = real_settings
def test_saves_failing_example_in_database():
db = InMemoryExampleDatabase()
ss = Settings(
database=db, max_examples=1000, suppress_health_check=list(HealthCheck)
)
with raises(AssertionError):
run_state_machine_as_test(DepthMachine, settings=ss)
assert any(list(db.data.values()))
def test_can_run_with_no_db():
with raises(AssertionError):
run_state_machine_as_test(
DepthMachine, settings=Settings(database=None, max_examples=10_000)
)
def test_stateful_double_rule_is_forbidden(recwarn):
with pytest.raises(InvalidDefinition):
class DoubleRuleMachine(RuleBasedStateMachine):
@rule(num=just(1))
@rule(num=just(2))
def whatevs(self, num):
pass
def test_can_explicitly_call_functions_when_precondition_not_satisfied():
class BadPrecondition(RuleBasedStateMachine):
def __init__(self):
super().__init__()
@precondition(lambda self: False)
@rule()
def test_blah(self):
raise ValueError
@rule()
def test_foo(self):
self.test_blah()
with pytest.raises(ValueError):
run_state_machine_as_test(BadPrecondition)
def test_invariant():
"""If an invariant raise an exception, the exception is propagated."""
class Invariant(RuleBasedStateMachine):
def __init__(self):
super().__init__()
@invariant()
def test_blah(self):
raise ValueError
@rule()
def do_stuff(self):
pass
with pytest.raises(ValueError):
run_state_machine_as_test(Invariant)
def test_no_double_invariant():
"""The invariant decorator can't be applied multiple times to a single
function."""
with raises(InvalidDefinition):
class Invariant(RuleBasedStateMachine):
def __init__(self):
super().__init__()
@invariant()
@invariant()
def test_blah(self):
pass
@rule()
def do_stuff(self):
pass
def test_invariant_precondition():
"""If an invariant precodition isn't met, the invariant isn't run.
The precondition decorator can be applied in any order.
"""
class Invariant(RuleBasedStateMachine):
def __init__(self):
super().__init__()
@invariant()
@precondition(lambda _: False)
def an_invariant(self):
raise ValueError
@precondition(lambda _: False)
@invariant()
def another_invariant(self):
raise ValueError
@rule()
def do_stuff(self):
pass
run_state_machine_as_test(Invariant)
@pytest.mark.parametrize(
"decorators",
[
(invariant(), rule()),
(rule(), invariant()),
(invariant(), initialize()),
(initialize(), invariant()),
(invariant(), precondition(lambda self: True), rule()),
(rule(), precondition(lambda self: True), invariant()),
(precondition(lambda self: True), invariant(), rule()),
(precondition(lambda self: True), rule(), invariant()),
],
ids=lambda x: "-".join(f.__qualname__.split(".")[0] for f in x),
)
def test_invariant_and_rule_are_incompatible(decorators):
"""It's an error to apply @invariant and @rule to the same method."""
def method(self):
pass
for d in decorators[:-1]:
method = d(method)
with pytest.raises(InvalidDefinition):
decorators[-1](method)
def test_invalid_rule_argument():
"""Rule kwargs that are not a Strategy are expected to raise an InvalidArgument error."""
with pytest.raises(InvalidArgument):
class InvalidRuleMachine(RuleBasedStateMachine):
@rule(strategy=object())
def do_stuff(self):
pass
def test_invalid_initialize_argument():
"""Initialize kwargs that are not a Strategy are expected to raise an InvalidArgument error."""
with pytest.raises(InvalidArgument):
class InvalidInitialize(RuleBasedStateMachine):
@initialize(strategy=object())
def initialize(self):
pass
def test_multiple_invariants():
"""If multiple invariants are present, they all get run."""
class Invariant(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.first_invariant_ran = False
@invariant()
def invariant_1(self):
self.first_invariant_ran = True
@precondition(lambda self: self.first_invariant_ran)
@invariant()
def invariant_2(self):
raise ValueError
@rule()
def do_stuff(self):
pass
with pytest.raises(ValueError):
run_state_machine_as_test(Invariant)
def test_explicit_invariant_call_with_precondition():
"""Invariants can be called explicitly even if their precondition is not
satisfied."""
class BadPrecondition(RuleBasedStateMachine):
def __init__(self):
super().__init__()
@precondition(lambda self: False)
@invariant()
def test_blah(self):
raise ValueError
@rule()
def test_foo(self):
self.test_blah()
with pytest.raises(ValueError):
run_state_machine_as_test(BadPrecondition)
def test_invariant_checks_initial_state_if_no_initialize_rules():
"""Invariants are checked before any rules run."""
class BadPrecondition(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.num = 0
@invariant()
def test_blah(self):
if self.num == 0:
raise ValueError
@rule()
def test_foo(self):
self.num += 1
with pytest.raises(ValueError):
run_state_machine_as_test(BadPrecondition)
def test_invariant_failling_present_in_falsifying_example():
@Settings(print_blob=False)
class BadInvariant(RuleBasedStateMachine):
@initialize()
def initialize_1(self):
pass
@invariant()
def invariant_1(self):
raise ValueError
@rule()
def rule_1(self):
pass
with pytest.raises(ValueError) as err:
run_state_machine_as_test(BadInvariant)
result = "\n".join(err.value.__notes__)
assert (
result
== """
Falsifying example:
state = BadInvariant()
state.initialize_1()
state.invariant_1()
state.teardown()
""".strip()
)
def test_invariant_present_in_falsifying_example():
@Settings(print_blob=False, phases=tuple(Phase)[:-1])
class BadRuleWithGoodInvariants(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.num = 0
@initialize()
def initialize_1(self):
pass
@invariant(check_during_init=True)
def invariant_1(self):
pass
@invariant(check_during_init=False)
def invariant_2(self):
pass
@precondition(lambda self: self.num > 0)
@invariant()
def invariant_3(self):
pass
@rule()
def rule_1(self):
self.num += 1
if self.num == 2:
raise ValueError
with pytest.raises(ValueError) as err:
run_state_machine_as_test(BadRuleWithGoodInvariants)
expected = """
Falsifying example:
state = BadRuleWithGoodInvariants()
state.invariant_1()
state.initialize_1()
state.invariant_1()
state.invariant_2()
state.rule_1()
state.invariant_1()
state.invariant_2()
state.invariant_3()
state.rule_1()
state.teardown()
""".strip()
result = "\n".join(err.value.__notes__).strip()
assert expected == result
def test_always_runs_at_least_one_step():
class CountSteps(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.count = 0
@rule()
def do_something(self):
self.count += 1
def teardown(self):
assert self.count > 0
run_state_machine_as_test(CountSteps)
def test_removes_needless_steps():
"""Regression test from an example based on
tests/nocover/test_database_agreement.py, but without the expensive bits.
Comparing two database implementations in which deletion is broken, so as
soon as a key/value pair is successfully deleted the test will now fail if
you ever check that key.
The main interesting feature of this is that it has a lot of
opportunities to generate keys and values before it actually fails,
but will still fail with very high probability.
"""
@Settings(derandomize=True, max_examples=1000, deadline=None)
class IncorrectDeletion(RuleBasedStateMachine):
def __init__(self):
super().__init__()
self.__saved = defaultdict(set)
self.__deleted = defaultdict(set)
keys = Bundle("keys")
values = Bundle("values")
@rule(target=keys, k=binary())
def k(self, k):
return k
@rule(target=values, v=binary())
def v(self, v):
return v
@rule(k=keys, v=values)
def save(self, k, v):
self.__saved[k].add(v)
@rule(k=keys, v=values)
def delete(self, k, v):
if v in self.__saved[k]:
self.__deleted[k].add(v)
@rule(k=keys)
def values_agree(self, k):
assert not self.__deleted[k]
with pytest.raises(AssertionError) as err:
run_state_machine_as_test(IncorrectDeletion)
result = "\n".join(err.value.__notes__)
assert result.count(" = state.k(") == 1
assert result.count(" = state.v(") == 1
def test_prints_equal_values_with_correct_variable_name():
@Settings(max_examples=100, suppress_health_check=list(HealthCheck))
class MovesBetweenBundles(RuleBasedStateMachine):
b1 = Bundle("b1")
b2 = Bundle("b2")
@rule(target=b1)
def create(self):
return []
@rule(target=b2, source=b1)
def transfer(self, source):
return source
@rule(source=b2)
def fail(self, source):
raise AssertionError
with pytest.raises(AssertionError) as err:
run_state_machine_as_test(MovesBetweenBundles)
result = "\n".join(err.value.__notes__)
for m in ["create", "transfer", "fail"]:
assert result.count("state." + m) == 1
assert "b1_0 = state.create()" in result
assert "b2_0 = state.transfer(source=b1_0)" in result
assert "state.fail(source=b2_0)" in result
def test_initialize_rule():
@Settings(max_examples=1000)
class WithInitializeRules(RuleBasedStateMachine):
initialized: ClassVar = []
@initialize()
def initialize_a(self):
self.initialized.append("a")
@initialize()
def initialize_b(self):
self.initialized.append("b")
@initialize()
def initialize_c(self):
self.initialized.append("c")
@rule()
def fail_fast(self):
raise AssertionError
with pytest.raises(AssertionError) as err:
run_state_machine_as_test(WithInitializeRules)
assert set(WithInitializeRules.initialized[-3:]) == {"a", "b", "c"}
result = err.value.__notes__[1:]
assert result[0] == "state = WithInitializeRules()"
# Initialize rules call order is shuffled
assert {result[1], result[2], result[3]} == {
"state.initialize_a()",
"state.initialize_b()",
"state.initialize_c()",
}
assert result[4] == "state.fail_fast()"
assert result[5] == "state.teardown()"
def test_initialize_rule_populate_bundle():
class WithInitializeBundleRules(RuleBasedStateMachine):
a = Bundle("a")
@initialize(target=a, dep=just("dep"))
def initialize_a(self, dep):
return f"a a_0 with ({dep})"
@rule(param=a)
def fail_fast(self, param):
raise AssertionError
WithInitializeBundleRules.TestCase.settings = NO_BLOB_SETTINGS
with pytest.raises(AssertionError) as err:
run_state_machine_as_test(WithInitializeBundleRules)
result = "\n".join(err.value.__notes__)
assert (
result
== """
Falsifying example:
state = WithInitializeBundleRules()
a_0 = state.initialize_a(dep='dep')
state.fail_fast(param=a_0)
state.teardown()
""".strip()
)
def test_initialize_rule_dont_mix_with_precondition():
with pytest.raises(
InvalidDefinition,
match=(
"BadStateMachine\\.initialize has been decorated with both @initialize "
"and @precondition"
),
):
class BadStateMachine(RuleBasedStateMachine):
@precondition(lambda self: True)
@initialize()
def initialize(self):
pass
# Also test decorator application in reverse order
with pytest.raises(
InvalidDefinition,
match=(
"BadStateMachineReverseOrder\\.initialize has been decorated with both "
"@initialize and @precondition"
),
):
class BadStateMachineReverseOrder(RuleBasedStateMachine):
@initialize()
@precondition(lambda self: True)
def initialize(self):
pass
def test_initialize_rule_dont_mix_with_regular_rule():
with pytest.raises(
InvalidDefinition,
match="BadStateMachine\\.initialize has been decorated with both @rule and @initialize",
):
class BadStateMachine(RuleBasedStateMachine):
@rule()
@initialize()
def initialize(self):
pass
with pytest.raises(
InvalidDefinition,
match=(
"BadStateMachineReverseOrder\\.initialize has been decorated with both "
"@rule and @initialize"
),
):
class BadStateMachineReverseOrder(RuleBasedStateMachine):
@initialize()
@rule()
def initialize(self):
pass
def test_initialize_rule_cannot_be_double_applied():
with pytest.raises(
InvalidDefinition,
match="BadStateMachine\\.initialize has been decorated with @initialize twice",
):
class BadStateMachine(RuleBasedStateMachine):
@initialize()
@initialize()
def initialize(self):
pass
def test_initialize_rule_in_state_machine_with_inheritance():
class ParentStateMachine(RuleBasedStateMachine):
initialized: ClassVar = []
@initialize()
def initialize_a(self):
self.initialized.append("a")
class ChildStateMachine(ParentStateMachine):
@initialize()
def initialize_b(self):
self.initialized.append("b")
@rule()
def fail_fast(self):
raise AssertionError
with pytest.raises(AssertionError) as err:
run_state_machine_as_test(ChildStateMachine)
assert set(ChildStateMachine.initialized[-2:]) == {"a", "b"}
result = err.value.__notes__[1:]
assert result[0] == "state = ChildStateMachine()"
# Initialize rules call order is shuffled
assert {result[1], result[2]} == {"state.initialize_a()", "state.initialize_b()"}
assert result[3] == "state.fail_fast()"
assert result[4] == "state.teardown()"
def test_can_manually_call_initialize_rule():
class StateMachine(RuleBasedStateMachine):
initialize_called_counter = 0
@initialize()
def initialize(self):
self.initialize_called_counter += 1
@rule()
def fail_eventually(self):
self.initialize()
assert self.initialize_called_counter <= 2
StateMachine.TestCase.settings = NO_BLOB_SETTINGS
with pytest.raises(AssertionError) as err:
run_state_machine_as_test(StateMachine)
result = "\n".join(err.value.__notes__)
assert (
result
== """
Falsifying example:
state = StateMachine()
state.initialize()
state.fail_eventually()
state.fail_eventually()
state.teardown()
""".strip()
)
def test_steps_printed_despite_pytest_fail():
# Test for https://github.com/HypothesisWorks/hypothesis/issues/1372
@Settings(print_blob=False)
class RaisesProblem(RuleBasedStateMachine):
@rule()
def oops(self):
pytest.fail("note that this raises a BaseException")
with pytest.raises(Failed) as err:
run_state_machine_as_test(RaisesProblem)
assert (
"\n".join(err.value.__notes__).strip()
== """
Falsifying example:
state = RaisesProblem()
state.oops()
state.teardown()""".strip()
)
def test_steps_not_printed_with_pytest_skip(capsys):
class RaisesProblem(RuleBasedStateMachine):
@rule()
def skip_whole_test(self):
pytest.skip()
with pytest.raises(Skipped):
run_state_machine_as_test(RaisesProblem)
out, _ = capsys.readouterr()
assert "state" not in out
def test_rule_deprecation_targets_and_target():
k, v = Bundle("k"), Bundle("v")
with pytest.raises(InvalidArgument):
rule(targets=(k,), target=v)
def test_rule_deprecation_bundle_by_name():
Bundle("k")
with pytest.raises(InvalidArgument):
rule(target="k")
def test_rule_non_bundle_target():
with pytest.raises(InvalidArgument):
rule(target=integers())
def test_rule_non_bundle_target_oneof():
k, v = Bundle("k"), Bundle("v")
pattern = r".+ `one_of(a, b)` or `a | b` .+"
with pytest.raises(InvalidArgument, match=pattern):
rule(target=k | v)
def test_uses_seed(capsys):
@seed(0)
class TrivialMachine(RuleBasedStateMachine):
@rule()
def oops(self):
raise AssertionError
with pytest.raises(AssertionError):
run_state_machine_as_test(TrivialMachine)
out, _ = capsys.readouterr()
assert "@seed" not in out
def test_reproduce_failure_works():
@reproduce_failure(__version__, encode_failure([False, 0, True]))
class TrivialMachine(RuleBasedStateMachine):
@rule()
def oops(self):
raise AssertionError
with pytest.raises(AssertionError):
run_state_machine_as_test(TrivialMachine, settings=Settings(print_blob=True))
def test_reproduce_failure_fails_if_no_error():
@reproduce_failure(__version__, encode_failure([False, 0, True]))
class TrivialMachine(RuleBasedStateMachine):
@rule()
def ok(self):
pass
with pytest.raises(DidNotReproduce):
run_state_machine_as_test(TrivialMachine, settings=Settings(print_blob=True))
def test_cannot_have_zero_steps():
with pytest.raises(InvalidArgument):
Settings(stateful_step_count=0)
def test_arguments_do_not_use_names_of_return_values():
# See https://github.com/HypothesisWorks/hypothesis/issues/2341
class TrickyPrintingMachine(RuleBasedStateMachine):
data = Bundle("data")
@initialize(target=data, value=integers())
def init_data(self, value):
return value
@rule(d=data)
def mostly_fails(self, d):
assert d == 42
with pytest.raises(AssertionError) as err:
run_state_machine_as_test(TrickyPrintingMachine)
assert "data_0 = state.init_data(value=0)" in err.value.__notes__
assert "data_0 = state.init_data(value=data_0)" not in err.value.__notes__
|
FailsEventually
|
python
|
PyCQA__pylint
|
doc/data/messages/d/duplicate-bases/good.py
|
{
"start": 25,
"end": 56
}
|
class ____(Animal):
pass
|
Bird
|
python
|
google__jax
|
docs/autodidax.py
|
{
"start": 22473,
"end": 22508
}
|
class ____: pass
empty = Empty()
|
Empty
|
python
|
mahmoud__boltons
|
tests/test_ioutils.py
|
{
"start": 11728,
"end": 17654
}
|
class ____(TestCase, BaseTestMixin, AssertionsMixin):
linesep = os.linesep
def setUp(self):
self.spooled_flo = ioutils.SpooledStringIO()
self.test_str = "Remember kids, always use an emdash: '\u2014'"
self.test_str_lines = f"Text with\u2014{os.linesep}newlines!"
self.data_type = str
def test_compare_not_equal_instances(self):
"""Make sure instances with different values fail == check."""
a = ioutils.SpooledStringIO()
a.write("I am a!")
b = ioutils.SpooledStringIO()
b.write("I am b!")
self.assertNotEqual(a, b)
def test_compare_two_equal_instances(self):
"""Make sure we can compare instances"""
a = ioutils.SpooledStringIO()
a.write("I am equal!")
b = ioutils.SpooledStringIO()
b.write("I am equal!")
self.assertEqual(a, b)
def test_auto_rollover(self):
"""Make sure file rolls over to disk after max_size reached"""
tmp = ioutils.SpooledStringIO(max_size=10)
tmp.write("The quick brown fox jumped over the lazy dogs.")
self.assertTrue(tmp._rolled)
def test_use_as_context_mgr(self):
"""Make sure SpooledStringIO can be used as a context manager"""
test_str = "Armado en los EE, UU. para S. P. Richards co.,"
with ioutils.SpooledStringIO() as f:
f.write(test_str)
self.assertEqual(f.getvalue(), test_str)
def test_len_no_rollover(self):
"""Make sure len property works with in-memory flo"""
self.spooled_flo.write(self.test_str)
self.assertEqual(self.spooled_flo.len, len(self.test_str))
def test_len_rollover(self):
"""Make sure len property works with on-disk flo"""
self.spooled_flo.write(self.test_str)
self.spooled_flo.rollover()
self.assertEqual(self.spooled_flo.len, len(self.test_str))
def test_invalid_type(self):
"""Ensure TypeError raised when writing bytes to SpooledStringIO"""
self.assertRaises(TypeError, self.spooled_flo.write, b"hi")
def test_tell_codepoints(self):
"""Verify tell() returns codepoint position, not bytes position"""
self.spooled_flo.write(self.test_str)
self.spooled_flo.seek(0)
self.spooled_flo.read(40)
self.assertEqual(self.spooled_flo.tell(), 40)
self.spooled_flo.seek(10)
self.assertEqual(self.spooled_flo.tell(), 10)
def test_codepoints_all_enc(self):
""""Test getting read, seek, tell, on various codepoints"""
test_str = "\u2014\u2014\u2014"
self.spooled_flo.write(test_str)
self.spooled_flo.seek(1)
self.assertEqual(self.spooled_flo.read(), "\u2014\u2014")
self.assertEqual(len(self.spooled_flo), len(test_str))
def test_seek_codepoints_SEEK_END(self):
"""Make sure seek() moves to codepoints relative to file end"""
self.spooled_flo.write(self.test_str)
ret = self.spooled_flo.seek(0, os.SEEK_END)
self.assertEqual(ret, len(self.test_str))
def test_seek_codepoints_large_SEEK_END(self):
"""Make sure seek() moves to codepoints relative to file end"""
test_str = "".join(random.choice(string.ascii_letters) for
x in range(34000))
self.spooled_flo.write(test_str)
ret = self.spooled_flo.seek(0, os.SEEK_END)
self.assertEqual(ret, len(test_str))
def test_seek_codepoints_SEEK_SET(self):
"""Make sure seek() moves to codepoints relative to file start"""
self.spooled_flo.write(self.test_str)
ret = self.spooled_flo.seek(3, os.SEEK_SET)
self.assertEqual(ret, 3)
def test_seek_codepoints_large_SEEK_SET(self):
"""Make sure seek() moves to codepoints relative to file start"""
test_str = "".join(random.choice(string.ascii_letters) for
x in range(34000))
self.spooled_flo.write(test_str)
ret = self.spooled_flo.seek(33000, os.SEEK_SET)
self.assertEqual(ret, 33000)
def test_seek_codepoints_SEEK_CUR(self):
"""Make sure seek() moves to codepoints relative to current_position"""
test_str = "\u2014\u2014\u2014"
self.spooled_flo.write(test_str)
self.spooled_flo.seek(1)
self.assertEqual(self.spooled_flo.tell(), 1)
ret = self.spooled_flo.seek(2, os.SEEK_CUR)
self.assertEqual(ret, 3)
def test_seek_codepoints_large_SEEK_CUR(self):
"""Make sure seek() moves to codepoints relative to current_position"""
test_str = "".join(random.choice(string.ascii_letters) for
x in range(34000))
self.spooled_flo.write(test_str)
self.spooled_flo.seek(1)
ret = self.spooled_flo.seek(33000, os.SEEK_CUR)
self.assertEqual(ret, 33001)
def test_x80_codepoint(self):
"""Make sure x80 codepoint doesn't confuse read value"""
test_str = '\x8000'
self.spooled_flo.write(test_str)
self.spooled_flo.seek(0)
self.assertEqual(len(self.spooled_flo.read(2)), 2)
self.assertEqual(self.spooled_flo.read(), '0')
def test_seek_encoded(self):
"""Make sure reading works when bytes exceeds read val"""
test_str = "\u2014\u2014\u2014"
self.spooled_flo.write(test_str)
self.spooled_flo.seek(0)
self.assertEqual(self.spooled_flo.read(3), test_str)
def test_iter(self):
"""Make sure iter works as expected"""
self.spooled_flo.write("a\nb")
self.spooled_flo.seek(0)
self.assertEqual([x for x in self.spooled_flo], ["a\n", "b"])
def test_writelines(self):
"""An iterable of lines can be written"""
lines = ["1", "2", "3"]
expected = "123"
self.spooled_flo.writelines(lines)
self.assertEqual(self.spooled_flo.getvalue(), expected)
|
TestSpooledStringIO
|
python
|
airbytehq__airbyte
|
airbyte-integrations/connectors/source-github/source_github/github_schema.py
|
{
"start": 1021697,
"end": 1022481
}
|
class ____(sgqlc.types.Type):
"""Autogenerated return type of
UpdateEnterpriseMembersCanDeleteRepositoriesSetting
"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "enterprise", "message")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
enterprise = sgqlc.types.Field("Enterprise", graphql_name="enterprise")
"""The enterprise with the updated members can delete repositories
setting.
"""
message = sgqlc.types.Field(String, graphql_name="message")
"""A message confirming the result of updating the members can delete
repositories setting.
"""
|
UpdateEnterpriseMembersCanDeleteRepositoriesSettingPayload
|
python
|
pytransitions__transitions
|
transitions/extensions/asyncio.py
|
{
"start": 1561,
"end": 2639
}
|
class ____(State):
"""A persistent representation of a state managed by a ``Machine``. Callback execution is done asynchronously."""
async def enter(self, event_data):
"""Triggered when a state is entered.
Args:
event_data: (AsyncEventData): The currently processed event.
"""
_LOGGER.debug("%sEntering state %s. Processing callbacks...", event_data.machine.name, self.name)
await event_data.machine.callbacks(self.on_enter, event_data)
_LOGGER.info("%sFinished processing state %s enter callbacks.", event_data.machine.name, self.name)
async def exit(self, event_data):
"""Triggered when a state is exited.
Args:
event_data: (AsyncEventData): The currently processed event.
"""
_LOGGER.debug("%sExiting state %s. Processing callbacks...", event_data.machine.name, self.name)
await event_data.machine.callbacks(self.on_exit, event_data)
_LOGGER.info("%sFinished processing state %s exit callbacks.", event_data.machine.name, self.name)
|
AsyncState
|
python
|
mlflow__mlflow
|
mlflow/utils/gorilla.py
|
{
"start": 3270,
"end": 4837
}
|
class ____:
"""Define the patching behaviour.
Attributes
----------
allow_hit : bool
A hit occurs when an attribute at the destination already exists with
the name given by the patch. If ``False``, the patch process won't
allow setting a new value for the attribute by raising an exception.
Defaults to ``False``.
store_hit : bool
If ``True`` and :attr:`allow_hit` is also set to ``True``, then any
attribute at the destination that is hit is stored under a different
name before being overwritten by the patch. Defaults to ``True``.
"""
def __init__(self, **kwargs):
"""Constructor.
Parameters
----------
kwargs
Keyword arguments, see the attributes.
"""
self.allow_hit = False
self.store_hit = True
self._update(**kwargs)
def __repr__(self):
values = ", ".join([f"{key}={value!r}" for key, value in sorted(_iteritems(self.__dict__))])
return f"{type(self).__name__}({values})"
def __eq__(self, other):
if isinstance(other, type(self)):
return self.__dict__ == other.__dict__
return NotImplemented
def __ne__(self, other):
is_equal = self.__eq__(other)
return is_equal if is_equal is NotImplemented else not is_equal
def _update(self, **kwargs):
"""Update some settings.
Parameters
----------
kwargs
Settings to update.
"""
self.__dict__.update(**kwargs)
|
Settings
|
python
|
dagster-io__dagster
|
python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py
|
{
"start": 5734,
"end": 5928
}
|
class ____(graphene.Union):
class Meta:
types = (GrapheneAssetPartitionsStatusCounts, GrapheneUnpartitionedAssetStatus)
name = "AssetBackfillStatus"
|
GrapheneAssetBackfillStatus
|
python
|
pypa__hatch
|
tests/project/test_core.py
|
{
"start": 56,
"end": 1166
}
|
class ____:
def test_no_project(self, temp_dir):
project = Project(temp_dir)
assert project.find_project_root() is None
@pytest.mark.parametrize("file_name", ["pyproject.toml", "setup.py"])
def test_direct(self, temp_dir, file_name):
project = Project(temp_dir)
project_file = temp_dir / file_name
project_file.touch()
assert project.find_project_root() == temp_dir
@pytest.mark.parametrize("file_name", ["pyproject.toml", "setup.py"])
def test_recurse(self, temp_dir, file_name):
project = Project(temp_dir)
project_file = temp_dir / file_name
project_file.touch()
path = temp_dir / "test"
path.mkdir()
assert project.find_project_root() == temp_dir
@pytest.mark.parametrize("file_name", ["pyproject.toml", "setup.py"])
def test_no_path(self, temp_dir, file_name):
project_file = temp_dir / file_name
project_file.touch()
path = temp_dir / "test"
project = Project(path)
assert project.find_project_root() == temp_dir
|
TestFindProjectRoot
|
python
|
getsentry__sentry
|
src/sentry/issues/search.py
|
{
"start": 1745,
"end": 1809
}
|
class ____(TypedDict, total=False):
group_id: int
|
MergeableRow
|
python
|
astropy__astropy
|
astropy/time/tests/test_comparisons.py
|
{
"start": 185,
"end": 7287
}
|
class ____:
"""Test Comparisons of Time and TimeDelta classes"""
def setup_method(self):
self.t1 = Time(np.arange(49995, 50005), format="mjd", scale="utc")
self.t2 = Time(np.arange(49000, 51000, 200), format="mjd", scale="utc")
def test_miscompares(self):
"""
If an incompatible object is compared to a Time object, == should
return False and != should return True. All other comparison
operators should raise a TypeError.
"""
t1 = Time("J2000", scale="utc")
for op, op_str in (
(operator.ge, ">="),
(operator.gt, ">"),
(operator.le, "<="),
(operator.lt, "<"),
):
with pytest.raises(
TypeError,
match=f"'{op_str}' not supported between instances of 'Time' and 'NoneType'",
):
op(t1, None)
# Keep == and != as they are specifically meant to test Time.__eq__
# and Time.__ne__
assert (t1 == None) is False
assert (t1 != None) is True
def test_time(self):
t1_lt_t2 = self.t1 < self.t2
assert np.all(
t1_lt_t2
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
t1_ge_t2 = self.t1 >= self.t2
assert np.all(t1_ge_t2 != t1_lt_t2)
t1_le_t2 = self.t1 <= self.t2
assert np.all(
t1_le_t2
== np.array(
[False, False, False, False, False, True, True, True, True, True]
)
)
t1_gt_t2 = self.t1 > self.t2
assert np.all(t1_gt_t2 != t1_le_t2)
t1_eq_t2 = self.t1 == self.t2
assert np.all(
t1_eq_t2
== np.array(
[False, False, False, False, False, True, False, False, False, False]
)
)
t1_ne_t2 = self.t1 != self.t2
assert np.all(t1_ne_t2 != t1_eq_t2)
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
t1_0_gt_t2 = self.t1[0] > self.t2
assert np.all(
t1_0_gt_t2
== np.array(
[True, True, True, True, True, False, False, False, False, False]
)
)
t1_gt_t2_0 = self.t1 > self.t2[0]
assert np.all(
t1_gt_t2_0
== np.array([True, True, True, True, True, True, True, True, True, True])
)
def test_time_boolean(self):
t1_0_gt_t2_0 = self.t1[0] > self.t2[0]
assert t1_0_gt_t2_0 is True
def test_timedelta(self):
dt = self.t2 - self.t1
with pytest.raises(TypeError):
self.t1 > dt # noqa: B015
dt_gt_td0 = dt > TimeDelta(0.0, format="sec")
assert np.all(
dt_gt_td0
== np.array(
[False, False, False, False, False, False, True, True, True, True]
)
)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
def test_isclose_time(swap, time_delta):
"""Test functionality of Time.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided)."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
# Start with original demonstration from #8742. In this issue both t2 == t1
# and t3 == t1 give False, but this may change with a newer ERFA.
t1 = Time("2018-07-24T10:41:56.807015240")
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day) # Test different unit
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
t2 = t1 + 3 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
def test_isclose_time_exceptions():
t1 = Time("2020:001")
t2 = t1 + 1 * u.s
match = "'other' argument must support subtraction with Time"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
@pytest.mark.parametrize("swap", [True, False])
@pytest.mark.parametrize("time_delta", [True, False])
@pytest.mark.parametrize("other_quantity", [True, False])
def test_isclose_timedelta(swap, time_delta, other_quantity):
"""Test functionality of TimeDelta.isclose() method.
Run every test with 2 args in original order and swapped, and using
Quantity or TimeDelta for atol (when provided), and using Quantity or
TimeDelta for the other argument."""
def isclose_swap(t1, t2, **kwargs):
if swap:
t1, t2 = t2, t1
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
def isclose_other_quantity(t1, t2, **kwargs):
if other_quantity:
t2 = t2.to(u.day)
if "atol" in kwargs and time_delta:
kwargs["atol"] = TimeDelta(kwargs["atol"])
return t1.isclose(t2, **kwargs)
t1 = TimeDelta(1.0 * u.s)
t2 = t1 + 0.0 * u.s
t3 = t1 + TimeDelta(0.0 * u.s)
assert isclose_swap(t1, t2)
assert isclose_swap(t1, t3)
assert isclose_other_quantity(t1, t2)
assert isclose_other_quantity(t1, t3)
t2 = t1 + 1 * u.s
assert isclose_swap(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_swap(t1, t2, atol=0.5 / 86400 * u.day)
assert isclose_other_quantity(t1, t2, atol=1.5 / 86400 * u.day)
assert not isclose_other_quantity(t1, t2, atol=0.5 / 86400 * u.day)
t1 = TimeDelta(0 * u.s)
t2 = t1 + [-1, 0, 2] * u.s
assert np.all(isclose_swap(t1, t2, atol=1.5 * u.s) == [True, True, False])
assert np.all(isclose_other_quantity(t1, t2, atol=1.5 * u.s) == [True, True, False])
# Check with rtol
# 1 * 0.6 + 0.5 = 1.1 --> 1 <= 1.1 --> True
# 0 * 0.6 + 0.5 = 0.5 --> 0 <= 0.5 --> True
# 2 * 0.6 + 0.5 = 1.7 --> 2 <= 1.7 --> False
assert np.all(t1.isclose(t2, atol=0.5 * u.s, rtol=0.6) == [True, True, False])
t2 = t1 + 2 * np.finfo(float).eps * u.day
assert not isclose_swap(t1, t2)
assert not isclose_other_quantity(t1, t2)
def test_isclose_timedelta_exceptions():
t1 = TimeDelta(1 * u.s)
t2 = t1 + 1 * u.s
match = "other' argument must support conversion to days"
with pytest.raises(TypeError, match=match):
t1.isclose(1.5)
match = (
"'atol' argument must be a Quantity or TimeDelta instance, got float instead"
)
with pytest.raises(TypeError, match=match):
t1.isclose(t2, 1.5)
|
TestTimeComparisons
|
python
|
pypa__warehouse
|
tests/unit/email/ses/test_views.py
|
{
"start": 424,
"end": 2422
}
|
class ____:
def test_valid(self, monkeypatch):
class FakeMessageVerifier:
@staticmethod
@pretend.call_recorder
def __init__(topics, session):
self.topics = topics
self.session = session
@staticmethod
@pretend.call_recorder
def verify(message):
pass
monkeypatch.setattr(views.sns, "MessageVerifier", FakeMessageVerifier)
request = pretend.stub(
http=pretend.stub(),
registry=pretend.stub(settings={"mail.topic": "this is a topic"}),
)
message = pretend.stub()
views._verify_sns_message(request, message)
assert FakeMessageVerifier.__init__.calls == [
pretend.call(topics=["this is a topic"], session=request.http)
]
assert FakeMessageVerifier.verify.calls == [pretend.call(message)]
def test_invalid(self, monkeypatch):
class FakeMessageVerifier:
@staticmethod
@pretend.call_recorder
def __init__(topics, session):
self.topics = topics
self.session = session
@staticmethod
@pretend.call_recorder
def verify(message):
raise views.sns.InvalidMessageError("This is an Invalid Message")
monkeypatch.setattr(views.sns, "MessageVerifier", FakeMessageVerifier)
request = pretend.stub(
http=pretend.stub(),
registry=pretend.stub(settings={"mail.topic": "this is a topic"}),
)
message = pretend.stub()
with pytest.raises(HTTPBadRequest, match="This is an Invalid Message"):
views._verify_sns_message(request, message)
assert FakeMessageVerifier.__init__.calls == [
pretend.call(topics=["this is a topic"], session=request.http)
]
assert FakeMessageVerifier.verify.calls == [pretend.call(message)]
|
TestVerifySNSMessageHelper
|
python
|
dask__dask
|
dask/backends.py
|
{
"start": 1458,
"end": 5298
}
|
class ____(Generic[BackendEntrypointType]):
"""Simple backend dispatch for collection-creation functions"""
_lookup: dict[str, BackendEntrypointType]
_module_name: str
_config_field: str
_default: str
_entrypoint_class: type[BackendEntrypointType]
_entrypoint_root: str
def __init__(
self,
module_name: str,
default: str,
entrypoint_class: type[BackendEntrypointType],
name: str | None = None,
entrypoint_root: str = "dask",
):
self._lookup = {}
self._module_name = module_name
self._config_field = f"{module_name}.backend"
self._default = default
self._entrypoint_class = entrypoint_class
self._entrypoint_root = entrypoint_root
if name:
self.__name__ = name
def register_backend(
self, name: str, backend: BackendEntrypointType
) -> BackendEntrypointType:
"""Register a target class for a specific array-backend label"""
if not isinstance(backend, self._entrypoint_class):
raise ValueError(
f"This CreationDispatch only supports "
f"{self._entrypoint_class} registration. "
f"Got {type(backend)}"
)
self._lookup[name] = backend
return backend
def dispatch(self, backend: str):
"""Return the desired backend entrypoint"""
try:
impl = self._lookup[backend]
except KeyError:
# Check entrypoints for the specified backend
entrypoints = detect_entrypoints(
f"{self._entrypoint_root}.{self._module_name}.backends"
)
if backend in entrypoints:
return self.register_backend(backend, entrypoints[backend].load()())
else:
return impl
raise ValueError(f"No backend dispatch registered for {backend}")
@property
def backend(self) -> str:
"""Return the desired collection backend"""
return config.get(self._config_field, self._default) or self._default
@backend.setter
def backend(self, value: str):
raise RuntimeError(
f"Set the backend by configuring the {self._config_field} option"
)
def register_inplace(
self,
backend: str,
name: str | None = None,
) -> Callable[
[Callable[BackendFuncParams, BackendFuncReturn]],
Callable[BackendFuncParams, BackendFuncReturn],
]:
"""Register dispatchable function"""
def decorator(
fn: Callable[BackendFuncParams, BackendFuncReturn],
) -> Callable[BackendFuncParams, BackendFuncReturn]:
dispatch_name = name or fn.__name__
dispatcher = self.dispatch(backend)
dispatcher.__setattr__(dispatch_name, fn)
@wraps(fn)
def wrapper(*args, **kwargs):
func = getattr(self, dispatch_name)
try:
return func(*args, **kwargs)
except Exception as e:
try:
exc = type(e)(
f"An error occurred while calling the {funcname(func)} "
f"method registered to the {self.backend} backend.\n"
f"Original Message: {e}"
)
except TypeError:
raise e
else:
raise exc from e
wrapper.__name__ = dispatch_name
return wrapper
return decorator
def __getattr__(self, item: str):
"""
Return the appropriate attribute for the current backend
"""
backend = self.dispatch(self.backend)
return getattr(backend, item)
|
CreationDispatch
|
python
|
django__django
|
tests/resolve_url/tests.py
|
{
"start": 272,
"end": 2711
}
|
class ____(SimpleTestCase):
"""
Tests for the resolve_url() function.
"""
def test_url_path(self):
"""
Passing a URL path to resolve_url() results in the same url.
"""
self.assertEqual("/something/", resolve_url("/something/"))
def test_relative_path(self):
"""
Passing a relative URL path to resolve_url() results in the same url.
"""
self.assertEqual("../", resolve_url("../"))
self.assertEqual("../relative/", resolve_url("../relative/"))
self.assertEqual("./", resolve_url("./"))
self.assertEqual("./relative/", resolve_url("./relative/"))
def test_full_url(self):
"""
Passing a full URL to resolve_url() results in the same url.
"""
url = "http://example.com/"
self.assertEqual(url, resolve_url(url))
def test_model(self):
"""
Passing a model to resolve_url() results in get_absolute_url() being
called on that model instance.
"""
m = UnimportantThing(importance=1)
self.assertEqual(m.get_absolute_url(), resolve_url(m))
def test_view_function(self):
"""
Passing a view function to resolve_url() results in the URL path
mapping to that view name.
"""
resolved_url = resolve_url(some_view)
self.assertEqual("/some-url/", resolved_url)
def test_lazy_reverse(self):
"""
Passing the result of reverse_lazy is resolved to a real URL
string.
"""
resolved_url = resolve_url(reverse_lazy("some-view"))
self.assertIsInstance(resolved_url, str)
self.assertEqual("/some-url/", resolved_url)
def test_valid_view_name(self):
"""
Passing a view name to resolve_url() results in the URL path mapping
to that view.
"""
resolved_url = resolve_url("some-view")
self.assertEqual("/some-url/", resolved_url)
def test_domain(self):
"""
Passing a domain to resolve_url() returns the same domain.
"""
self.assertEqual(resolve_url("example.com"), "example.com")
def test_non_view_callable_raises_no_reverse_match(self):
"""
Passing a non-view callable into resolve_url() raises a
NoReverseMatch exception.
"""
with self.assertRaises(NoReverseMatch):
resolve_url(lambda: "asdf")
|
ResolveUrlTests
|
python
|
pytorch__pytorch
|
test/test_hub.py
|
{
"start": 607,
"end": 12329
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
self.previous_hub_dir = torch.hub.get_dir()
self.tmpdir = tempfile.TemporaryDirectory("hub_dir")
torch.hub.set_dir(self.tmpdir.name)
self.trusted_list_path = os.path.join(torch.hub.get_dir(), "trusted_list")
def tearDown(self):
super().tearDown()
torch.hub.set_dir(self.previous_hub_dir) # probably not needed, but can't hurt
self.tmpdir.cleanup()
def _assert_trusted_list_is_empty(self):
with open(self.trusted_list_path) as f:
assert not f.readlines()
def _assert_in_trusted_list(self, line):
with open(self.trusted_list_path) as f:
assert line in (l.strip() for l in f)
@retry(Exception, tries=3)
def test_load_from_github(self):
hub_model = hub.load(
"ailzhang/torchhub_example",
"mnist",
source="github",
pretrained=True,
verbose=False,
)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_load_from_local_dir(self):
local_dir = hub._get_cache_or_reload(
"ailzhang/torchhub_example",
force_reload=False,
trust_repo=True,
calling_fn=None,
)
hub_model = hub.load(
local_dir, "mnist", source="local", pretrained=True, verbose=False
)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_load_from_branch(self):
hub_model = hub.load(
"ailzhang/torchhub_example:ci/test_slash",
"mnist",
pretrained=True,
verbose=False,
)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_get_set_dir(self):
previous_hub_dir = torch.hub.get_dir()
with tempfile.TemporaryDirectory("hub_dir") as tmpdir:
torch.hub.set_dir(tmpdir)
self.assertEqual(torch.hub.get_dir(), tmpdir)
self.assertNotEqual(previous_hub_dir, tmpdir)
hub_model = hub.load(
"ailzhang/torchhub_example", "mnist", pretrained=True, verbose=False
)
self.assertEqual(
sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE
)
assert os.path.exists(
os.path.join(tmpdir, "ailzhang_torchhub_example_master")
)
# Test that set_dir properly calls expanduser()
# non-regression test for https://github.com/pytorch/pytorch/issues/69761
new_dir = os.path.join("~", "hub")
torch.hub.set_dir(new_dir)
self.assertEqual(torch.hub.get_dir(), os.path.expanduser(new_dir))
@retry(Exception, tries=3)
def test_list_entrypoints(self):
entry_lists = hub.list("ailzhang/torchhub_example", trust_repo=True)
self.assertObjectIn("mnist", entry_lists)
@retry(Exception, tries=3)
def test_download_url_to_file(self):
with tempfile.TemporaryDirectory() as tmpdir:
f = os.path.join(tmpdir, "temp")
hub.download_url_to_file(TORCHHUB_EXAMPLE_RELEASE_URL, f, progress=False)
loaded_state = torch.load(f)
self.assertEqual(sum_of_state_dict(loaded_state), SUM_OF_HUB_EXAMPLE)
# Check that the downloaded file has default file permissions
f_ref = os.path.join(tmpdir, "reference")
open(f_ref, "w").close()
expected_permissions = oct(os.stat(f_ref).st_mode & 0o777)
actual_permissions = oct(os.stat(f).st_mode & 0o777)
assert actual_permissions == expected_permissions
@retry(Exception, tries=3)
def test_load_state_dict_from_url(self):
loaded_state = hub.load_state_dict_from_url(TORCHHUB_EXAMPLE_RELEASE_URL)
self.assertEqual(sum_of_state_dict(loaded_state), SUM_OF_HUB_EXAMPLE)
# with name
file_name = "the_file_name"
loaded_state = hub.load_state_dict_from_url(
TORCHHUB_EXAMPLE_RELEASE_URL, file_name=file_name
)
expected_file_path = os.path.join(torch.hub.get_dir(), "checkpoints", file_name)
self.assertTrue(os.path.exists(expected_file_path))
self.assertEqual(sum_of_state_dict(loaded_state), SUM_OF_HUB_EXAMPLE)
# with safe weight_only
loaded_state = hub.load_state_dict_from_url(
TORCHHUB_EXAMPLE_RELEASE_URL, weights_only=True
)
self.assertEqual(sum_of_state_dict(loaded_state), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_load_legacy_zip_checkpoint(self):
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always")
hub_model = hub.load(
"ailzhang/torchhub_example", "mnist_zip", pretrained=True, verbose=False
)
self.assertEqual(
sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE
)
assert any(
"will be deprecated in favor of default zipfile" in str(w) for w in ws
)
# Test the default zipfile serialization format produced by >=1.6 release.
@retry(Exception, tries=3)
def test_load_zip_1_6_checkpoint(self):
hub_model = hub.load(
"ailzhang/torchhub_example",
"mnist_zip_1_6",
pretrained=True,
verbose=False,
trust_repo=True,
)
self.assertEqual(sum_of_state_dict(hub_model.state_dict()), SUM_OF_HUB_EXAMPLE)
@retry(Exception, tries=3)
def test_hub_parse_repo_info(self):
# If the branch is specified we just parse the input and return
self.assertEqual(torch.hub._parse_repo_info("a/b:c"), ("a", "b", "c"))
# For torchvision, the default branch is main
self.assertEqual(
torch.hub._parse_repo_info("pytorch/vision"), ("pytorch", "vision", "main")
)
# For the torchhub_example repo, the default branch is still master
self.assertEqual(
torch.hub._parse_repo_info("ailzhang/torchhub_example"),
("ailzhang", "torchhub_example", "master"),
)
@retry(Exception, tries=3)
def test_load_commit_from_forked_repo(self):
with self.assertRaisesRegex(ValueError, "If it's a commit from a forked repo"):
torch.hub.load("pytorch/vision:4e2c216", "resnet18")
@retry(Exception, tries=3)
@patch("builtins.input", return_value="")
def test_trust_repo_false_emptystring(self, patched_input):
with self.assertRaisesRegex(Exception, "Untrusted repository."):
torch.hub.load(
"ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=False
)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
patched_input.reset_mock()
with self.assertRaisesRegex(Exception, "Untrusted repository."):
torch.hub.load(
"ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=False
)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch("builtins.input", return_value="no")
def test_trust_repo_false_no(self, patched_input):
with self.assertRaisesRegex(Exception, "Untrusted repository."):
torch.hub.load(
"ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=False
)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
patched_input.reset_mock()
with self.assertRaisesRegex(Exception, "Untrusted repository."):
torch.hub.load(
"ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=False
)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch("builtins.input", return_value="y")
def test_trusted_repo_false_yes(self, patched_input):
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=False)
self._assert_in_trusted_list("ailzhang_torchhub_example")
patched_input.assert_called_once()
# Loading a second time with "check", we don't ask for user input
patched_input.reset_mock()
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo="check")
patched_input.assert_not_called()
# Loading again with False, we still ask for user input
patched_input.reset_mock()
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=False)
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch("builtins.input", return_value="no")
def test_trust_repo_check_no(self, patched_input):
with self.assertRaisesRegex(Exception, "Untrusted repository."):
torch.hub.load(
"ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo="check"
)
self._assert_trusted_list_is_empty()
patched_input.assert_called_once()
patched_input.reset_mock()
with self.assertRaisesRegex(Exception, "Untrusted repository."):
torch.hub.load(
"ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo="check"
)
patched_input.assert_called_once()
@retry(Exception, tries=3)
@patch("builtins.input", return_value="y")
def test_trust_repo_check_yes(self, patched_input):
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo="check")
self._assert_in_trusted_list("ailzhang_torchhub_example")
patched_input.assert_called_once()
# Loading a second time with "check", we don't ask for user input
patched_input.reset_mock()
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo="check")
patched_input.assert_not_called()
@retry(Exception, tries=3)
def test_trust_repo_true(self):
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=True)
self._assert_in_trusted_list("ailzhang_torchhub_example")
@retry(Exception, tries=3)
def test_trust_repo_builtin_trusted_owners(self):
torch.hub.load("pytorch/vision", "resnet18", trust_repo="check")
self._assert_trusted_list_is_empty()
@retry(Exception, tries=3)
def test_trust_repo_none(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
torch.hub.load(
"ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=None
)
assert len(w) == 1
assert issubclass(w[-1].category, UserWarning)
assert (
"You are about to download and run code from an untrusted repository"
in str(w[-1].message)
)
self._assert_trusted_list_is_empty()
@retry(Exception, tries=3)
def test_trust_repo_legacy(self):
# We first download a repo and then delete the allowlist file
# Then we check that the repo is indeed trusted without a prompt,
# because it was already downloaded in the past.
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo=True)
os.remove(self.trusted_list_path)
torch.hub.load("ailzhang/torchhub_example", "mnist_zip_1_6", trust_repo="check")
self._assert_trusted_list_is_empty()
if __name__ == "__main__":
run_tests()
|
TestHub
|
python
|
ansible__ansible
|
lib/ansible/modules/package_facts.py
|
{
"start": 12169,
"end": 12714
}
|
class ____(CLIMgr):
CLI = 'qlist'
atoms = ['category', 'name', 'version', 'ebuild_revision', 'slots', 'prefixes', 'sufixes']
def list_installed(self):
rc, out, err = module.run_command(' '.join([self._cli, '-Iv', '|', 'xargs', '-n', '1024', 'qatom']), use_unsafe_shell=True)
if rc != 0:
raise RuntimeError("Unable to list packages rc=%s : %s" % (rc, to_native(err)))
return out.splitlines()
def get_package_details(self, package):
return dict(zip(self.atoms, package.split()))
|
PORTAGE
|
python
|
Textualize__textual
|
docs/examples/guide/layout/grid_layout7_gutter.py
|
{
"start": 80,
"end": 528
}
|
class ____(App):
CSS_PATH = "grid_layout7_gutter.tcss"
def compose(self) -> ComposeResult:
yield Static("One", classes="box")
yield Static("Two", classes="box")
yield Static("Three", classes="box")
yield Static("Four", classes="box")
yield Static("Five", classes="box")
yield Static("Six", classes="box")
if __name__ == "__main__":
app = GridLayoutExample()
app.run()
|
GridLayoutExample
|
python
|
streamlit__streamlit
|
lib/tests/streamlit/platform_test.py
|
{
"start": 814,
"end": 1158
}
|
class ____(DeltaGeneratorTestCase):
"""Tests the platform module functions"""
@parameterized.expand(["Hello", '{"name":"foo", "type":"bar"}'])
def test_post_parent_message(self, message: str):
post_parent_message(message)
c = self.get_message_from_queue().parent_message
assert c.message == message
|
PlatformTest
|
python
|
django__django
|
django/db/models/fields/mixins.py
|
{
"start": 972,
"end": 1945
}
|
class ____:
_default_hint = ("<valid default>", "<invalid default>")
def _check_default(self):
if (
self.has_default()
and self.default is not None
and not callable(self.default)
):
return [
checks.Warning(
"%s default should be a callable instead of an instance "
"so that it's not shared between all field instances."
% (self.__class__.__name__,),
hint=(
"Use a callable instead, e.g., use `%s` instead of "
"`%s`." % self._default_hint
),
obj=self,
id="fields.E010",
)
]
else:
return []
def check(self, **kwargs):
errors = super().check(**kwargs)
errors.extend(self._check_default())
return errors
|
CheckFieldDefaultMixin
|
python
|
getsentry__sentry-python
|
sentry_sdk/integrations/rq.py
|
{
"start": 948,
"end": 5307
}
|
class ____(Integration):
identifier = "rq"
origin = f"auto.queue.{identifier}"
@staticmethod
def setup_once():
# type: () -> None
version = parse_version(RQ_VERSION)
_check_minimum_version(RqIntegration, version)
old_perform_job = Worker.perform_job
@ensure_integration_enabled(RqIntegration, old_perform_job)
def sentry_patched_perform_job(self, job, *args, **kwargs):
# type: (Any, Job, *Queue, **Any) -> bool
with sentry_sdk.new_scope() as scope:
scope.clear_breadcrumbs()
scope.add_event_processor(_make_event_processor(weakref.ref(job)))
transaction = continue_trace(
job.meta.get("_sentry_trace_headers") or {},
op=OP.QUEUE_TASK_RQ,
name="unknown RQ task",
source=TransactionSource.TASK,
origin=RqIntegration.origin,
)
with capture_internal_exceptions():
transaction.name = job.func_name
with sentry_sdk.start_transaction(
transaction,
custom_sampling_context={"rq_job": job},
):
rv = old_perform_job(self, job, *args, **kwargs)
if self.is_horse:
# We're inside of a forked process and RQ is
# about to call `os._exit`. Make sure that our
# events get sent out.
sentry_sdk.get_client().flush()
return rv
Worker.perform_job = sentry_patched_perform_job
old_handle_exception = Worker.handle_exception
def sentry_patched_handle_exception(self, job, *exc_info, **kwargs):
# type: (Worker, Any, *Any, **Any) -> Any
retry = (
hasattr(job, "retries_left")
and job.retries_left
and job.retries_left > 0
)
failed = job._status == JobStatus.FAILED or job.is_failed
if failed and not retry:
_capture_exception(exc_info)
return old_handle_exception(self, job, *exc_info, **kwargs)
Worker.handle_exception = sentry_patched_handle_exception
old_enqueue_job = Queue.enqueue_job
@ensure_integration_enabled(RqIntegration, old_enqueue_job)
def sentry_patched_enqueue_job(self, job, **kwargs):
# type: (Queue, Any, **Any) -> Any
scope = sentry_sdk.get_current_scope()
if scope.span is not None:
job.meta["_sentry_trace_headers"] = dict(
scope.iter_trace_propagation_headers()
)
return old_enqueue_job(self, job, **kwargs)
Queue.enqueue_job = sentry_patched_enqueue_job
ignore_logger("rq.worker")
def _make_event_processor(weak_job):
# type: (Callable[[], Job]) -> EventProcessor
def event_processor(event, hint):
# type: (Event, dict[str, Any]) -> Event
job = weak_job()
if job is not None:
with capture_internal_exceptions():
extra = event.setdefault("extra", {})
rq_job = {
"job_id": job.id,
"func": job.func_name,
"args": job.args,
"kwargs": job.kwargs,
"description": job.description,
}
if job.enqueued_at:
rq_job["enqueued_at"] = format_timestamp(job.enqueued_at)
if job.started_at:
rq_job["started_at"] = format_timestamp(job.started_at)
extra["rq-job"] = rq_job
if "exc_info" in hint:
with capture_internal_exceptions():
if issubclass(hint["exc_info"][0], JobTimeoutException):
event["fingerprint"] = ["rq", "JobTimeoutException", job.func_name]
return event
return event_processor
def _capture_exception(exc_info, **kwargs):
# type: (ExcInfo, **Any) -> None
client = sentry_sdk.get_client()
event, hint = event_from_exception(
exc_info,
client_options=client.options,
mechanism={"type": "rq", "handled": False},
)
sentry_sdk.capture_event(event, hint=hint)
|
RqIntegration
|
python
|
scrapy__scrapy
|
tests/test_downloader_handler_twisted_http2.py
|
{
"start": 7602,
"end": 8652
}
|
class ____(H2DownloadHandlerMixin, TestHttpProxyBase):
is_secure = True
expected_http_proxy_request_body = b"/"
@deferred_f_from_coro_f
async def test_download_with_proxy_https_timeout(
self,
proxy_mockserver: ProxyEchoMockServer,
download_handler: DownloadHandlerProtocol,
) -> None:
with pytest.raises(NotImplementedError):
await maybe_deferred_to_future(
super().test_download_with_proxy_https_timeout(
proxy_mockserver, download_handler
)
)
@deferred_f_from_coro_f
async def test_download_with_proxy_without_http_scheme(
self,
proxy_mockserver: ProxyEchoMockServer,
download_handler: DownloadHandlerProtocol,
) -> None:
with pytest.raises(SchemeNotSupported):
await maybe_deferred_to_future(
super().test_download_with_proxy_without_http_scheme(
proxy_mockserver, download_handler
)
)
|
TestHttps2Proxy
|
python
|
openai__openai-python
|
examples/responses/background_async.py
|
{
"start": 187,
"end": 1149
}
|
class ____(BaseModel):
steps: List[Step]
final_answer: str
async def main() -> None:
client = AsyncOpenAI()
id = None
async with await client.responses.create(
input="solve 8x + 31 = 2",
model="gpt-4o-2024-08-06",
background=True,
stream=True,
) as stream:
async for event in stream:
if event.type == "response.created":
id = event.response.id
if "output_text" in event.type:
rich.print(event)
if event.sequence_number == 10:
break
print("Interrupted. Continuing...")
assert id is not None
async with await client.responses.retrieve(
response_id=id,
stream=True,
starting_after=10,
) as stream:
async for event in stream:
if "output_text" in event.type:
rich.print(event)
if __name__ == "__main__":
asyncio.run(main())
|
MathResponse
|
python
|
joke2k__faker
|
tests/providers/test_automotive.py
|
{
"start": 6087,
"end": 6176
}
|
class ____(TestEnPh):
"""Test fil_PH automotive provider methods"""
pass
|
TestFilPh
|
python
|
langchain-ai__langchain
|
libs/langchain_v1/tests/unit_tests/agents/middleware/implementations/test_tool_selection.py
|
{
"start": 10996,
"end": 16555
}
|
class ____:
"""Test always_include functionality."""
def test_always_include_tools_present(self) -> None:
"""Test that always_include tools are always present in the request."""
model_requests = []
@wrap_model_call
def trace_model_requests(request, handler):
model_requests.append(request)
return handler(request)
# Selector picks only search_web
tool_selection_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{
"name": "ToolSelectionResponse",
"id": "1",
"args": {"tools": ["search_web"]},
}
],
),
]
)
)
model = FakeModel(messages=iter([AIMessage(content="Done")]))
# But send_email is always included
tool_selector = LLMToolSelectorMiddleware(
max_tools=1, always_include=["send_email"], model=tool_selection_model
)
agent = create_agent(
model=model,
tools=[get_weather, search_web, send_email],
middleware=[tool_selector, trace_model_requests],
)
agent.invoke({"messages": [HumanMessage("test")]})
# Both selected and always_include tools should be present
assert len(model_requests) > 0
for request in model_requests:
tool_names = [tool.name for tool in request.tools]
assert "search_web" in tool_names
assert "send_email" in tool_names
assert len(tool_names) == 2
def test_always_include_not_counted_against_max(self) -> None:
"""Test that always_include tools don't count against max_tools limit."""
model_requests = []
@wrap_model_call
def trace_model_requests(request, handler):
model_requests.append(request)
return handler(request)
# Selector picks 2 tools
tool_selection_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{
"name": "ToolSelectionResponse",
"id": "1",
"args": {"tools": ["get_weather", "search_web"]},
}
],
),
]
)
)
model = FakeModel(messages=iter([AIMessage(content="Done")]))
# max_tools=2, but we also have 2 always_include tools
tool_selector = LLMToolSelectorMiddleware(
max_tools=2,
always_include=["send_email", "calculate"],
model=tool_selection_model,
)
agent = create_agent(
model=model,
tools=[get_weather, search_web, calculate, send_email],
middleware=[tool_selector, trace_model_requests],
)
agent.invoke({"messages": [HumanMessage("test")]})
# Should have 2 selected + 2 always_include = 4 total
assert len(model_requests) > 0
for request in model_requests:
assert len(request.tools) == 4
tool_names = [tool.name for tool in request.tools]
assert "get_weather" in tool_names
assert "search_web" in tool_names
assert "send_email" in tool_names
assert "calculate" in tool_names
def test_multiple_always_include_tools(self) -> None:
"""Test that multiple always_include tools are all present."""
model_requests = []
@wrap_model_call
def trace_model_requests(request, handler):
model_requests.append(request)
return handler(request)
# Selector picks 1 tool
tool_selection_model = FakeModel(
messages=cycle(
[
AIMessage(
content="",
tool_calls=[
{
"name": "ToolSelectionResponse",
"id": "1",
"args": {"tools": ["get_weather"]},
}
],
),
]
)
)
model = FakeModel(messages=iter([AIMessage(content="Done")]))
tool_selector = LLMToolSelectorMiddleware(
max_tools=1,
always_include=["send_email", "calculate", "get_stock_price"],
model=tool_selection_model,
)
agent = create_agent(
model=model,
tools=[get_weather, search_web, send_email, calculate, get_stock_price],
middleware=[tool_selector, trace_model_requests],
)
agent.invoke({"messages": [HumanMessage("test")]})
# Should have 1 selected + 3 always_include = 4 total
assert len(model_requests) > 0
for request in model_requests:
assert len(request.tools) == 4
tool_names = [tool.name for tool in request.tools]
assert "get_weather" in tool_names
assert "send_email" in tool_names
assert "calculate" in tool_names
assert "get_stock_price" in tool_names
|
TestAlwaysInclude
|
python
|
huggingface__transformers
|
tests/quantization/fbgemm_fp8/test_fbgemm_fp8.py
|
{
"start": 10653,
"end": 11612
}
|
class ____(unittest.TestCase):
def test_linear_preserves_shape(self):
"""
Test that FbgemmFp8Linear preserves shape when in_features == out_features.
"""
from transformers.integrations import FbgemmFp8Linear
with init_empty_weights(include_buffers=True):
linear = FbgemmFp8Linear(1024, 1024, True)
x = torch.rand((17, 23, 1024))
x_ = linear(x)
self.assertEqual(x_.shape, x.shape)
def test_linear_with_diff_feature_size_preserves_shape(self):
"""
Test that FbgemmFp8Linear generates the correct shape when in_features != out_features.
"""
from transformers.integrations import FbgemmFp8Linear
with init_empty_weights(include_buffers=True):
linear = FbgemmFp8Linear(1024, 2048, True)
x = torch.rand((17, 23, 1024))
x_ = linear(x)
self.assertEqual(x_.shape, (17, 23, 2048))
|
FbgemmFp8LinearTest
|
python
|
tensorflow__tensorflow
|
tensorflow/python/data/experimental/kernel_tests/sleep_test.py
|
{
"start": 1105,
"end": 2857
}
|
class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testSleep(self):
self.skipTest("b/123597912")
sleep_microseconds = 100
dataset = dataset_ops.Dataset.range(10).apply(
testing.sleep(sleep_microseconds))
next_element = self.getNext(dataset)
start_time = time.time()
for i in range(10):
self.assertEqual(i, self.evaluate(next_element()))
end_time = time.time()
self.assertGreater(end_time - start_time, (10 * sleep_microseconds) / 1e6)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(next_element())
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSleepCancellation(self):
sleep_microseconds = int(1e6) * 1000
ds = dataset_ops.Dataset.range(1)
ds = ds.apply(testing.sleep(sleep_microseconds))
ds = ds.prefetch(1)
get_next = self.getNext(ds, requires_initialization=True)
with self.cached_session() as sess:
thread = self.checkedThread(self.assert_op_cancelled, args=(get_next(),))
thread.start()
time.sleep(0.2)
sess.close()
thread.join()
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSleepBackgroundCancellation(self):
ds = dataset_ops.Dataset.range(1)
sleep_microseconds = int(1e6) * 1000
ds_sleep = dataset_ops.Dataset.range(1)
ds_sleep = ds.apply(testing.sleep(sleep_microseconds))
ds = ds.concatenate(ds_sleep)
ds = ds.prefetch(1)
get_next = self.getNext(ds, requires_initialization=True)
with self.cached_session():
self.assertEqual(self.evaluate(get_next()), 0)
if __name__ == "__main__":
test.main()
|
SleepTest
|
python
|
openai__openai-python
|
src/openai/types/completion_create_params.py
|
{
"start": 475,
"end": 6479
}
|
class ____(TypedDict, total=False):
model: Required[Union[str, Literal["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"]]]
"""ID of the model to use.
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
"""
prompt: Required[Union[str, SequenceNotStr[str], Iterable[int], Iterable[Iterable[int]], None]]
"""
The prompt(s) to generate completions for, encoded as a string, array of
strings, array of tokens, or array of token arrays.
Note that <|endoftext|> is the document separator that the model sees during
training, so if a prompt is not specified the model will generate as if from the
beginning of a new document.
"""
best_of: Optional[int]
"""
Generates `best_of` completions server-side and returns the "best" (the one with
the highest log probability per token). Results cannot be streamed.
When used with `n`, `best_of` controls the number of candidate completions and
`n` specifies how many to return – `best_of` must be greater than `n`.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
"""
echo: Optional[bool]
"""Echo back the prompt in addition to the completion"""
frequency_penalty: Optional[float]
"""Number between -2.0 and 2.0.
Positive values penalize new tokens based on their existing frequency in the
text so far, decreasing the model's likelihood to repeat the same line verbatim.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
logit_bias: Optional[Dict[str, int]]
"""Modify the likelihood of specified tokens appearing in the completion.
Accepts a JSON object that maps tokens (specified by their token ID in the GPT
tokenizer) to an associated bias value from -100 to 100. You can use this
[tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs.
Mathematically, the bias is added to the logits generated by the model prior to
sampling. The exact effect will vary per model, but values between -1 and 1
should decrease or increase likelihood of selection; values like -100 or 100
should result in a ban or exclusive selection of the relevant token.
As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token
from being generated.
"""
logprobs: Optional[int]
"""
Include the log probabilities on the `logprobs` most likely output tokens, as
well the chosen tokens. For example, if `logprobs` is 5, the API will return a
list of the 5 most likely tokens. The API will always return the `logprob` of
the sampled token, so there may be up to `logprobs+1` elements in the response.
The maximum value for `logprobs` is 5.
"""
max_tokens: Optional[int]
"""
The maximum number of [tokens](/tokenizer) that can be generated in the
completion.
The token count of your prompt plus `max_tokens` cannot exceed the model's
context length.
[Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken)
for counting tokens.
"""
n: Optional[int]
"""How many completions to generate for each prompt.
**Note:** Because this parameter generates many completions, it can quickly
consume your token quota. Use carefully and ensure that you have reasonable
settings for `max_tokens` and `stop`.
"""
presence_penalty: Optional[float]
"""Number between -2.0 and 2.0.
Positive values penalize new tokens based on whether they appear in the text so
far, increasing the model's likelihood to talk about new topics.
[See more information about frequency and presence penalties.](https://platform.openai.com/docs/guides/text-generation)
"""
seed: Optional[int]
"""
If specified, our system will make a best effort to sample deterministically,
such that repeated requests with the same `seed` and parameters should return
the same result.
Determinism is not guaranteed, and you should refer to the `system_fingerprint`
response parameter to monitor changes in the backend.
"""
stop: Union[Optional[str], SequenceNotStr[str], None]
"""Not supported with latest reasoning models `o3` and `o4-mini`.
Up to 4 sequences where the API will stop generating further tokens. The
returned text will not contain the stop sequence.
"""
stream_options: Optional[ChatCompletionStreamOptionsParam]
"""Options for streaming response. Only set this when you set `stream: true`."""
suffix: Optional[str]
"""The suffix that comes after a completion of inserted text.
This parameter is only supported for `gpt-3.5-turbo-instruct`.
"""
temperature: Optional[float]
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic.
We generally recommend altering this or `top_p` but not both.
"""
top_p: Optional[float]
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or `temperature` but not both.
"""
user: str
"""
A unique identifier representing your end-user, which can help OpenAI to monitor
and detect abuse.
[Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids).
"""
|
CompletionCreateParamsBase
|
python
|
pydantic__pydantic
|
tests/test_forward_ref.py
|
{
"start": 26491,
"end": 30762
}
|
class ____(BaseModel):
a: UndefinedType
"""
)
# Since we're testing the absence of an error, it's important to confirm pydantic was actually run.
# The presence of the `__pydantic_complete__` is a good indicator of this.
assert module.Foobar.__pydantic_complete__ is False
def test_undefined_types_warning_1b_suppressed_via_config_2b_forward_ref(create_module):
@create_module
def module():
from typing import ForwardRef
from pydantic import BaseModel
UndefinedType = ForwardRef('UndefinedType')
# Because we don't instantiate the type, no error for an undefined type is raised
class Foobar(BaseModel):
a: UndefinedType
# Since we're testing the absence of a warning, it's important to confirm pydantic was actually run.
# The presence of the `__pydantic_complete__` is a good indicator of this.
assert module.Foobar.__pydantic_complete__ is False
def test_undefined_types_warning_raised_by_usage(create_module):
with pytest_raises_user_error_for_undefined_type('Foobar', 'UndefinedType'):
@create_module
def module():
from typing import ForwardRef
from pydantic import BaseModel
UndefinedType = ForwardRef('UndefinedType')
class Foobar(BaseModel):
a: UndefinedType
Foobar(a=1)
def test_rebuild_recursive_schema():
from typing import ForwardRef
class Expressions_(BaseModel):
model_config = dict(undefined_types_warning=False)
items: list["types['Expression']"]
class Expression_(BaseModel):
model_config = dict(undefined_types_warning=False)
Or: ForwardRef("types['allOfExpressions']")
Not: ForwardRef("types['allOfExpression']")
class allOfExpression_(BaseModel):
model_config = dict(undefined_types_warning=False)
Not: ForwardRef("types['Expression']")
class allOfExpressions_(BaseModel):
model_config = dict(undefined_types_warning=False)
items: list["types['Expression']"]
types_namespace = {
'types': {
'Expression': Expression_,
'Expressions': Expressions_,
'allOfExpression': allOfExpression_,
'allOfExpressions': allOfExpressions_,
}
}
models = [allOfExpressions_, Expressions_]
for m in models:
m.model_rebuild(_types_namespace=types_namespace)
def test_forward_ref_in_generic(create_module: Any) -> None:
"""https://github.com/pydantic/pydantic/issues/6503"""
@create_module
def module():
from pydantic import BaseModel
class Foo(BaseModel):
x: dict['type[Bar]', type['Bar']]
class Bar(BaseModel):
pass
Foo = module.Foo
Bar = module.Bar
assert Foo(x={Bar: Bar}).x[Bar] is Bar
def test_forward_ref_in_generic_separate_modules(create_module: Any) -> None:
"""https://github.com/pydantic/pydantic/issues/6503"""
@create_module
def module_1():
from pydantic import BaseModel
class Foo(BaseModel):
x: dict['type[Bar]', type['Bar']]
@create_module
def module_2():
from pydantic import BaseModel
class Bar(BaseModel):
pass
Foo = module_1.Foo
Bar = module_2.Bar
Foo.model_rebuild(_types_namespace={'tp': typing, 'Bar': Bar})
assert Foo(x={Bar: Bar}).x[Bar] is Bar
def test_invalid_forward_ref() -> None:
class CustomType:
"""A custom type that isn't subscriptable."""
msg = "Unable to evaluate type annotation 'CustomType[int]'."
with pytest.raises(TypeError, match=re.escape(msg)):
class Model(BaseModel):
foo: 'CustomType[int]'
def test_pydantic_extra_forward_ref_separate_module(create_module: Any) -> None:
"""https://github.com/pydantic/pydantic/issues/10069"""
@create_module
def module_1():
from pydantic import BaseModel, ConfigDict
MyDict = dict
class Bar(BaseModel):
model_config = ConfigDict(defer_build=True, extra='allow')
__pydantic_extra__: 'MyDict[str, int]'
module_2 = create_module(
f"""
from pydantic import BaseModel
from {module_1.__name__} import Bar
|
Foobar
|
python
|
getsentry__sentry
|
tests/sentry/workflow_engine/endpoints/test_organization_detector_index.py
|
{
"start": 2017,
"end": 2556
}
|
class ____(APITestCase):
endpoint = "sentry-api-0-organization-detector-index"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
self.environment = Environment.objects.create(
organization_id=self.organization.id, name="production"
)
self.data_condition_group = self.create_data_condition_group(
organization_id=self.organization.id,
logic_type=DataConditionGroup.Type.ANY,
)
@region_silo_test
|
OrganizationDetectorIndexBaseTest
|
python
|
kamyu104__LeetCode-Solutions
|
Python/find-the-number-of-subsequences-with-equal-gcd.py
|
{
"start": 2921,
"end": 3749
}
|
class ____(object):
def subsequencePairCount(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
def gcd(a, b):
while b:
a, b = b, a%b
return a
mx = max(nums)
dp = [[0]*(mx+1) for _ in xrange(mx+1)]
dp[0][0] = 1
for x in nums:
new_dp = [row[:] for row in dp]
for g1 in reversed(xrange(mx+1)):
for g2 in reversed(xrange(mx+1)):
ng1, ng2 = gcd(g1, x), gcd(g2, x)
new_dp[ng1][g2] = (new_dp[ng1][g2]+dp[g1][g2])%MOD
new_dp[g1][ng2] = (new_dp[g1][ng2]+dp[g1][g2])%MOD
dp = new_dp
return reduce(lambda accu, x: (accu+x)%MOD, (dp[g][g] for g in xrange(1, mx+1)), 0)
|
SolutionTLE
|
python
|
Netflix__metaflow
|
test/core/tests/large_artifact.py
|
{
"start": 67,
"end": 1364
}
|
class ____(MetaflowTest):
"""
Test that you can serialize large objects (over 4GB)
with Python3 - although on OSX, some versions of Python3 fail
to serialize objects over 2GB - https://bugs.python.org/issue24658
so YMMV.
"""
PRIORITY = 2
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@steps(0, ["singleton"], required=True)
def step_single(self):
import sys
if sys.version_info[0] > 2:
self.large = b"x" * int(4.1 * 1024**3)
self.noop = False
else:
self.noop = True
@steps(0, ["end"])
def step_end(self):
import sys
if sys.version_info[0] > 2:
assert_equals(self.large, b"x" * int(4.1 * 1024**3))
@steps(1, ["all"])
def step_all(self):
pass
def check_results(self, flow, checker):
import sys
noop = next(iter(checker.artifact_dict("end", "noop").values()))["noop"]
if not noop and sys.version_info[0] > 2:
checker.assert_artifact("end", "large", b"x" * int(4.1 * 1024**3))
|
LargeArtifactTest
|
python
|
falconry__falcon
|
examples/things_advanced_asgi.py
|
{
"start": 4693,
"end": 7053
}
|
class ____:
def __init__(self, db):
self.db = db
self.logger = logging.getLogger('thingsapp.' + __name__)
async def on_get(self, req, resp, user_id):
marker = req.get_param('marker') or ''
limit = req.get_param_as_int('limit') or 50
try:
result = await self.db.get_things(marker, limit)
except Exception as ex:
self.logger.error(ex)
description = (
'Aliens have attacked our base! We will '
'be back as soon as we fight them off. '
'We appreciate your patience.'
)
raise falcon.HTTPServiceUnavailable(
title='Service Outage', description=description, retry_after=30
)
# NOTE: Normally you would use resp.media for this sort of thing;
# this example serves only to demonstrate how the context can be
# used to pass arbitrary values between middleware components,
# hooks, and resources.
resp.context.result = result
resp.set_header('Powered-By', 'Falcon')
resp.status = falcon.HTTP_200
@falcon.before(max_body(64 * 1024))
async def on_post(self, req, resp, user_id):
try:
doc = req.context.doc
except AttributeError:
raise falcon.HTTPBadRequest(
title='Missing thing',
description='A thing must be submitted in the request body.',
)
proper_thing = await self.db.add_thing(doc)
resp.status = falcon.HTTP_201
resp.location = '/{}/things/{}'.format(user_id, proper_thing['id'])
# The app instance is an ASGI callable
app = falcon.asgi.App(
middleware=[
AuthMiddleware(),
RequireJSON(),
JSONTranslator(),
]
)
db = StorageEngine()
things = ThingsResource(db)
app.add_route('/{user_id}/things', things)
# If a responder ever raises an instance of StorageError, pass control to
# the given handler.
app.add_error_handler(StorageError, StorageError.handle)
# Proxy some things to another service; this example shows how you might
# send parts of an API off to a legacy system that hasn't been upgraded
# yet, or perhaps is a single cluster that all data centers have to share.
sink = SinkAdapter()
app.add_sink(sink, r'/search/(?P<engine>ddg|y)\Z')
|
ThingsResource
|
python
|
pytorch__pytorch
|
torch/nn/parameter.py
|
{
"start": 4176,
"end": 7806
}
|
class ____:
_allowed_methods = [
torch.Tensor.__hash__,
torch.Tensor.size,
torch.Tensor.copy_,
torch.Tensor.is_complex,
torch.Tensor.is_floating_point,
torch.Tensor.half,
torch.Tensor.float,
torch.Tensor.double,
torch.Tensor.char,
torch.Tensor.short,
torch.Tensor.int,
torch.Tensor.long,
torch.Tensor.cuda,
torch.Tensor.cpu,
torch.Tensor.to,
torch.Tensor.get_device,
torch._has_compatible_shallow_copy_type,
]
def materialize(self, shape, device=None, dtype=None) -> None:
r"""Create a Parameter or Tensor with the same properties of the uninitialized one.
Given a shape, it materializes a parameter in the same device
and with the same `dtype` as the current one or the specified ones in the
arguments.
Args:
shape : (tuple): the shape for the materialized tensor.
device (:class:`torch.device`): the desired device of the parameters
and buffers in this module. Optional.
dtype (:class:`torch.dtype`): the desired floating point type of
the floating point parameters and buffers in this module. Optional.
"""
if device is None:
device = self.data.device
if dtype is None:
dtype = self.data.dtype
self.data = torch.empty(shape, device=device, dtype=dtype)
# pyrefly: ignore [bad-override, missing-attribute]
self.__class__ = self.cls_to_become
@property
def shape(self):
raise RuntimeError(
"Can't access the shape of an uninitialized parameter or buffer. "
"This error usually happens in `load_state_dict` when trying to load "
"an uninitialized parameter into an initialized one. "
"Call `forward` to initialize the parameters before accessing their attributes."
)
def share_memory_(self):
raise RuntimeError(
"Can't share memory on an uninitialized parameter or buffer. "
"Call `forward` to initialize the parameters before calling "
"`module.share_memory()`."
)
def __repr__(self) -> str:
return f"<{self.__class__.__name__}>"
def __reduce_ex__(self, proto):
# See Note [Don't serialize hooks]
# pyrefly: ignore [missing-attribute]
return (self.__class__, (self.requires_grad,))
@classmethod
def __torch_function__(cls, func, types, args=(), kwargs=None):
# method-wrapper is to detect access to Tensor properties that are
# wrapped in descriptors
if func in cls._allowed_methods or func.__class__.__name__ == "method-wrapper":
if kwargs is None:
kwargs = {}
# pyrefly: ignore [missing-attribute]
return super().__torch_function__(func, types, args, kwargs)
raise ValueError(
f"Attempted to use an uninitialized parameter in {func}. "
"This error happens when you are using a `LazyModule` or "
f"explicitly manipulating `torch.nn.parameter.{cls.__name__}` "
"objects. When using LazyModules Call `forward` with a dummy batch "
"to initialize the parameters before calling torch functions"
)
def is_lazy(param: Any) -> bool:
"""
Returns whether ``param`` is an ``UninitializedParameter`` or ``UninitializedBuffer``.
Args:
param (Any): the input to check.
"""
return isinstance(param, UninitializedTensorMixin)
|
UninitializedTensorMixin
|
python
|
doocs__leetcode
|
solution/1200-1299/1228.Missing Number In Arithmetic Progression/Solution2.py
|
{
"start": 0,
"end": 256
}
|
class ____:
def missingNumber(self, arr: List[int]) -> int:
n = len(arr)
d = (arr[-1] - arr[0]) // n
for i in range(1, n):
if arr[i] != arr[i - 1] + d:
return arr[i - 1] + d
return arr[0]
|
Solution
|
python
|
django-haystack__django-haystack
|
haystack/utils/loading.py
|
{
"start": 2677,
"end": 4061
}
|
class ____:
def __init__(self, connections_info):
self.connections_info = connections_info
self.thread_local = threading.local()
self._index = None
def ensure_defaults(self, alias):
try:
conn = self.connections_info[alias]
except KeyError:
raise ImproperlyConfigured(
"The key '%s' isn't an available connection." % alias
)
if not conn.get("ENGINE"):
conn["ENGINE"] = "haystack.backends.simple_backend.SimpleEngine"
def __getitem__(self, key):
if not hasattr(self.thread_local, "connections"):
self.thread_local.connections = {}
elif key in self.thread_local.connections:
return self.thread_local.connections[key]
self.ensure_defaults(key)
self.thread_local.connections[key] = load_backend(
self.connections_info[key]["ENGINE"]
)(using=key)
return self.thread_local.connections[key]
def reload(self, key):
if not hasattr(self.thread_local, "connections"):
self.thread_local.connections = {}
try:
del self.thread_local.connections[key]
except KeyError:
pass
return self.__getitem__(key)
def all(self): # noqa A003
return [self[alias] for alias in self.connections_info]
|
ConnectionHandler
|
python
|
pennersr__django-allauth
|
allauth/headless/mfa/response.py
|
{
"start": 2672,
"end": 2875
}
|
class ____(APIResponse):
def __init__(self, request, authenticator):
data = _authenticator_data(authenticator, sensitive=True)
super().__init__(request, data=data)
|
RecoveryCodesResponse
|
python
|
apache__airflow
|
providers/google/src/airflow/providers/google/cloud/hooks/workflows.py
|
{
"start": 1639,
"end": 16188
}
|
class ____(GoogleBaseHook):
"""
Hook for Google GCP APIs.
All the methods in the hook where project_id is used must be called with
keyword arguments rather than positional.
"""
def get_workflows_client(self) -> WorkflowsClient:
"""Return WorkflowsClient object."""
return WorkflowsClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
def get_executions_client(self) -> ExecutionsClient:
"""Return ExecutionsClient object."""
return ExecutionsClient(credentials=self.get_credentials(), client_info=CLIENT_INFO)
@GoogleBaseHook.fallback_to_default_project_id
def create_workflow(
self,
workflow: dict,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Create a new workflow.
If a workflow with the specified name already exists in the
specified project and location, the long-running operation will
return [ALREADY_EXISTS][google.rpc.Code.ALREADY_EXISTS] error.
:param workflow: Required. Workflow to be created.
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
parent = f"projects/{project_id}/locations/{location}"
return client.create_workflow(
request={"parent": parent, "workflow": workflow, "workflow_id": workflow_id},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_workflow(
self,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Workflow:
"""
Get details of a single Workflow.
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.get_workflow(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
def update_workflow(
self,
workflow: dict | Workflow,
update_mask: FieldMask | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Update an existing workflow.
Running this method has no impact on already running
executions of the workflow. A new revision of the
workflow may be created as a result of a successful
update operation. In that case, such revision will be
used in new workflow executions.
:param workflow: Required. Workflow to be created.
:param update_mask: List of fields to be updated. If not present,
the entire workflow will be updated.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
return client.update_workflow(
request={"workflow": workflow, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def delete_workflow(
self,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Operation:
"""
Delete a workflow with the specified name and all running executions of the workflow.
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.delete_workflow(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def list_workflows(
self,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
filter_: str | None = None,
order_by: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListWorkflowsPager:
"""
List Workflows in a given project and location; the default order is not specified.
:param filter_: Filter to restrict results to specific workflows.
:param order_by: Comma-separated list of fields that
specifies the order of the results. Default sorting order for a field is ascending.
To specify descending order for a field, append a "desc" suffix.
If not specified, the results will be returned in an unspecified order.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_workflows_client()
parent = f"projects/{project_id}/locations/{location}"
return client.list_workflows(
request={"parent": parent, "filter": filter_, "order_by": order_by},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def create_execution(
self,
workflow_id: str,
location: str,
execution: dict,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Execution:
"""
Create a new execution using the latest revision of the given workflow.
:param execution: Required. Input parameters of the execution represented as a dictionary.
:param workflow_id: Required. The ID of the workflow.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
parent = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
execution = {k: str(v) if isinstance(v, dict) else v for k, v in execution.items()}
return client.create_execution(
request={"parent": parent, "execution": execution},
retry=retry,
timeout=timeout,
metadata=metadata,
)
@GoogleBaseHook.fallback_to_default_project_id
def get_execution(
self,
workflow_id: str,
execution_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Execution:
"""
Return an execution for the given ``workflow_id`` and ``execution_id``.
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}/executions/{execution_id}"
return client.get_execution(request={"name": name}, retry=retry, timeout=timeout, metadata=metadata)
@GoogleBaseHook.fallback_to_default_project_id
def cancel_execution(
self,
workflow_id: str,
execution_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> Execution:
"""
Cancel an execution using the given ``workflow_id`` and ``execution_id``.
:param workflow_id: Required. The ID of the workflow.
:param execution_id: Required. The ID of the execution.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
name = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}/executions/{execution_id}"
return client.cancel_execution(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
@GoogleBaseHook.fallback_to_default_project_id
def list_executions(
self,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
) -> ListExecutionsPager:
"""
Return a list of executions which belong to the workflow with the given name.
The method returns executions of all workflow revisions. Returned
executions are ordered by their start time (newest first).
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
metadata = metadata or ()
client = self.get_executions_client()
parent = f"projects/{project_id}/locations/{location}/workflows/{workflow_id}"
return client.list_executions(
request={"parent": parent}, retry=retry, timeout=timeout, metadata=metadata
)
|
WorkflowsHook
|
python
|
openai__openai-python
|
src/openai/types/realtime/realtime_tracing_config.py
|
{
"start": 268,
"end": 871
}
|
class ____(BaseModel):
group_id: Optional[str] = None
"""
The group id to attach to this trace to enable filtering and grouping in the
Traces Dashboard.
"""
metadata: Optional[object] = None
"""
The arbitrary metadata to attach to this trace to enable filtering in the Traces
Dashboard.
"""
workflow_name: Optional[str] = None
"""The name of the workflow to attach to this trace.
This is used to name the trace in the Traces Dashboard.
"""
RealtimeTracingConfig: TypeAlias = Union[Literal["auto"], TracingConfiguration, None]
|
TracingConfiguration
|
python
|
keon__algorithms
|
tests/test_maths.py
|
{
"start": 7352,
"end": 7689
}
|
class ____(unittest.TestCase):
"""[summary]
Test for the file primes_sieve_of_eratosthenes.py
Arguments:
unittest {[type]} -- [description]
"""
def test_primes(self):
self.assertListEqual([2, 3, 5, 7], get_primes(7))
self.assertRaises(ValueError, get_primes, -42)
|
TestPrimesSieveOfEratosthenes
|
python
|
celery__celery
|
t/unit/utils/test_dispatcher.py
|
{
"start": 851,
"end": 5667
}
|
class ____:
"""Test suite for dispatcher (barely started)"""
def _testIsClean(self, signal):
"""Assert that everything has been cleaned up automatically"""
assert not signal.has_listeners()
assert signal.receivers == []
def test_exact(self):
a_signal.connect(receiver_1_arg, sender=self)
try:
expected = [(receiver_1_arg, 'test')]
result = a_signal.send(sender=self, val='test')
assert result == expected
finally:
a_signal.disconnect(receiver_1_arg, sender=self)
self._testIsClean(a_signal)
def test_ignored_sender(self):
a_signal.connect(receiver_1_arg)
try:
expected = [(receiver_1_arg, 'test')]
result = a_signal.send(sender=self, val='test')
assert result == expected
finally:
a_signal.disconnect(receiver_1_arg)
self._testIsClean(a_signal)
def test_garbage_collected(self):
a = Callable()
a_signal.connect(a.a, sender=self)
expected = []
del a
garbage_collect()
result = a_signal.send(sender=self, val='test')
assert result == expected
self._testIsClean(a_signal)
def test_multiple_registration(self):
a = Callable()
result = None
try:
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
a_signal.connect(a)
result = a_signal.send(sender=self, val='test')
assert len(result) == 1
assert len(a_signal.receivers) == 1
finally:
del a
del result
garbage_collect()
self._testIsClean(a_signal)
def test_uid_registration(self):
def uid_based_receiver_1(**kwargs):
pass
def uid_based_receiver_2(**kwargs):
pass
a_signal.connect(uid_based_receiver_1, dispatch_uid='uid')
try:
a_signal.connect(uid_based_receiver_2, dispatch_uid='uid')
assert len(a_signal.receivers) == 1
finally:
a_signal.disconnect(dispatch_uid='uid')
self._testIsClean(a_signal)
def test_robust(self):
def fails(val, **kwargs):
raise ValueError('this')
a_signal.connect(fails)
try:
a_signal.send(sender=self, val='test')
finally:
a_signal.disconnect(fails)
self._testIsClean(a_signal)
def test_disconnection(self):
receiver_1 = Callable()
receiver_2 = Callable()
receiver_3 = Callable()
try:
try:
a_signal.connect(receiver_1)
a_signal.connect(receiver_2)
a_signal.connect(receiver_3)
finally:
a_signal.disconnect(receiver_1)
del receiver_2
garbage_collect()
finally:
a_signal.disconnect(receiver_3)
self._testIsClean(a_signal)
def test_retry(self):
class non_local:
counter = 1
def succeeds_eventually(val, **kwargs):
non_local.counter += 1
if non_local.counter < 3:
raise ValueError('this')
return val
a_signal.connect(succeeds_eventually, sender=self, retry=True)
try:
result = a_signal.send(sender=self, val='test')
assert non_local.counter == 3
assert result[0][1] == 'test'
finally:
a_signal.disconnect(succeeds_eventually, sender=self)
self._testIsClean(a_signal)
def test_retry_with_dispatch_uid(self):
uid = 'abc123'
a_signal.connect(receiver_1_arg, sender=self, retry=True,
dispatch_uid=uid)
assert a_signal.receivers[0][0][0] == uid
a_signal.disconnect(receiver_1_arg, sender=self, dispatch_uid=uid)
self._testIsClean(a_signal)
def test_boundmethod(self):
a = Callable()
a_signal.connect(a.a, sender=self)
expected = [(a.a, 'test')]
garbage_collect()
result = a_signal.send(sender=self, val='test')
assert result == expected
del a, result, expected
garbage_collect()
self._testIsClean(a_signal)
def test_disconnect_retryable_decorator(self):
# Regression test for https://github.com/celery/celery/issues/9119
@a_signal.connect(sender=self, retry=True)
def succeeds_eventually(val, **kwargs):
return val
try:
a_signal.send(sender=self, val='test')
finally:
a_signal.disconnect(succeeds_eventually, sender=self)
self._testIsClean(a_signal)
|
test_Signal
|
python
|
kamyu104__LeetCode-Solutions
|
Python/k-th-smallest-in-lexicographical-order.py
|
{
"start": 35,
"end": 1188
}
|
class ____(object):
def findKthNumber(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
result = 0
cnts = [0] * 10
for i in xrange(1, 10):
cnts[i] = cnts[i - 1] * 10 + 1
nums = []
i = n
while i:
nums.append(i % 10)
i /= 10
total, target = n, 0
i = len(nums) - 1
while i >= 0 and k > 0:
target = target*10 + nums[i]
start = int(i == len(nums)-1)
for j in xrange(start, 10):
candidate = result*10 + j
if candidate < target:
num = cnts[i+1]
elif candidate > target:
num = cnts[i]
else:
num = total - cnts[i + 1]*(j-start) - cnts[i]*(9-j)
if k > num:
k -= num
else:
result = candidate
k -= 1
total = num-1
break
i -= 1
return result
# Time: O(logn * logn)
# Space: O(logn)
|
Solution
|
python
|
doocs__leetcode
|
lcof2/剑指 Offer II 098. 路径的数目/Solution2.py
|
{
"start": 0,
"end": 247
}
|
class ____:
def uniquePaths(self, m: int, n: int) -> int:
f = [[1] * n for _ in range(m)]
for i in range(1, m):
for j in range(1, n):
f[i][j] = f[i - 1][j] + f[i][j - 1]
return f[-1][-1]
|
Solution
|
python
|
getsentry__sentry
|
src/sentry/issues/endpoints/organization_shortid.py
|
{
"start": 1058,
"end": 4926
}
|
class ____(GroupEndpoint):
owner = ApiOwner.ISSUES
publish_status = {
"GET": ApiPublishStatus.PUBLIC,
}
@extend_schema(
operation_id="Resolve a Short ID",
parameters=[
GlobalParams.ORG_ID_OR_SLUG,
OpenApiParameter(
name="issue_id",
description="The short ID of the issue to resolve.",
required=True,
type=str,
location="path",
),
],
responses={
200: inline_sentry_response_serializer(
"ShortIdLookupResponse",
ShortIdLookupResponse,
),
400: RESPONSE_BAD_REQUEST,
401: RESPONSE_UNAUTHORIZED,
403: RESPONSE_FORBIDDEN,
404: RESPONSE_NOT_FOUND,
},
examples=[
OpenApiExample(
name="Short ID Lookup",
value={
"group": {
"annotations": [],
"assignedTo": {
"id": "1",
"name": "John Doe",
"email": "john@example.com",
"type": "user",
},
"count": "1",
"culprit": "raven.scripts.runner in main",
"firstSeen": "2018-11-06T21:19:55Z",
"hasSeen": False,
"id": "1",
"isBookmarked": False,
"isPublic": False,
"isSubscribed": True,
"lastSeen": "2018-11-06T21:19:55Z",
"level": "error",
"logger": None,
"metadata": {"title": "This is an example Python exception"},
"numComments": 0,
"permalink": "https://sentry.io/the-interstellar-jurisdiction/pump-station/issues/1/",
"project": {
"id": "2",
"name": "Pump Station",
"slug": "pump-station",
"platform": "python",
},
"shareId": "abc123",
"shortId": "PUMP-STATION-1",
"status": "unresolved",
"statusDetails": {},
"subscriptionDetails": None,
"title": "This is an example Python exception",
"type": "default",
"userCount": 0,
"issueCategory": "error",
"issueType": "error",
"platform": "python",
"priority": "medium",
"priorityLockedAt": None,
"seerFixabilityScore": 0.5,
"seerAutofixLastTriggered": None,
"substatus": "ongoing",
},
"groupId": "1",
"organizationSlug": "the-interstellar-jurisdiction",
"projectSlug": "pump-station",
"shortId": "PUMP-STATION-1",
},
)
],
)
def get(self, request: Request, group: Group) -> Response:
"""
Resolve a short ID to the project slug and group details.
"""
return Response(
{
"organizationSlug": group.project.organization.slug,
"projectSlug": group.project.slug,
"groupId": str(group.id),
"group": serialize(group, request.user),
"shortId": group.qualified_short_id,
}
)
|
ShortIdLookupEndpoint
|
python
|
pandas-dev__pandas
|
pandas/core/arrays/_arrow_string_mixins.py
|
{
"start": 475,
"end": 12919
}
|
class ____:
_pa_array: pa.ChunkedArray
def __init__(self, *args, **kwargs) -> None:
raise NotImplementedError
def _from_pyarrow_array(self, pa_array) -> Self:
raise NotImplementedError
def _convert_bool_result(self, result, na=lib.no_default, method_name=None):
# Convert a bool-dtype result to the appropriate result type
raise NotImplementedError
def _convert_int_result(self, result):
# Convert an integer-dtype result to the appropriate result type
raise NotImplementedError
def _apply_elementwise(self, func: Callable) -> list[list[Any]]:
raise NotImplementedError
def _str_len(self):
result = pc.utf8_length(self._pa_array)
return self._convert_int_result(result)
def _str_lower(self) -> Self:
return self._from_pyarrow_array(pc.utf8_lower(self._pa_array))
def _str_upper(self) -> Self:
return self._from_pyarrow_array(pc.utf8_upper(self._pa_array))
def _str_strip(self, to_strip=None) -> Self:
if to_strip is None:
result = pc.utf8_trim_whitespace(self._pa_array)
else:
result = pc.utf8_trim(self._pa_array, characters=to_strip)
return self._from_pyarrow_array(result)
def _str_lstrip(self, to_strip=None) -> Self:
if to_strip is None:
result = pc.utf8_ltrim_whitespace(self._pa_array)
else:
result = pc.utf8_ltrim(self._pa_array, characters=to_strip)
return self._from_pyarrow_array(result)
def _str_rstrip(self, to_strip=None) -> Self:
if to_strip is None:
result = pc.utf8_rtrim_whitespace(self._pa_array)
else:
result = pc.utf8_rtrim(self._pa_array, characters=to_strip)
return self._from_pyarrow_array(result)
def _str_pad(
self,
width: int,
side: Literal["left", "right", "both"] = "left",
fillchar: str = " ",
) -> Self:
if side == "left":
pa_pad = pc.utf8_lpad
elif side == "right":
pa_pad = pc.utf8_rpad
elif side == "both":
if pa_version_under17p0:
# GH#59624 fall back to object dtype
from pandas import array
obj_arr = self.astype(object, copy=False) # type: ignore[attr-defined]
obj = array(obj_arr, dtype=object)
result = obj._str_pad(width, side, fillchar) # type: ignore[attr-defined]
return type(self)._from_sequence(result, dtype=self.dtype) # type: ignore[attr-defined]
else:
# GH#54792
# https://github.com/apache/arrow/issues/15053#issuecomment-2317032347
lean_left = (width % 2) == 0
pa_pad = partial(pc.utf8_center, lean_left_on_odd_padding=lean_left)
else:
raise ValueError(
f"Invalid side: {side}. Side must be one of 'left', 'right', 'both'"
)
return self._from_pyarrow_array(
pa_pad(self._pa_array, width=width, padding=fillchar)
)
def _str_get(self, i: int) -> Self:
lengths = pc.utf8_length(self._pa_array)
if i >= 0:
out_of_bounds = pc.greater_equal(i, lengths)
start = i
stop = i + 1
step = 1
else:
out_of_bounds = pc.greater(-i, lengths)
start = i
stop = i - 1
step = -1
not_out_of_bounds = pc.invert(out_of_bounds.fill_null(True))
selected = pc.utf8_slice_codeunits(
self._pa_array, start=start, stop=stop, step=step
)
null_value = pa.scalar(None, type=self._pa_array.type)
result = pc.if_else(not_out_of_bounds, selected, null_value)
return self._from_pyarrow_array(result)
def _str_slice(
self, start: int | None = None, stop: int | None = None, step: int | None = None
) -> Self:
if start is None:
if step is not None and step < 0:
# GH#59710
start = -1
else:
start = 0
if step is None:
step = 1
return self._from_pyarrow_array(
pc.utf8_slice_codeunits(self._pa_array, start=start, stop=stop, step=step)
)
def _str_slice_replace(
self, start: int | None = None, stop: int | None = None, repl: str | None = None
) -> Self:
if repl is None:
repl = ""
if start is None:
start = 0
if stop is None:
stop = np.iinfo(np.int64).max
return self._from_pyarrow_array(
pc.utf8_replace_slice(self._pa_array, start, stop, repl)
)
def _str_replace(
self,
pat: str | re.Pattern,
repl: str | Callable,
n: int = -1,
case: bool = True,
flags: int = 0,
regex: bool = True,
) -> Self:
if (
isinstance(pat, re.Pattern)
or callable(repl)
or not case
or flags
or (isinstance(repl, str) and r"\g<" in repl)
):
raise NotImplementedError(
"replace is not supported with a re.Pattern, callable repl, "
"case=False, flags!=0, or when the replacement string contains "
"named group references (\\g<...>)"
)
func = pc.replace_substring_regex if regex else pc.replace_substring
# https://github.com/apache/arrow/issues/39149
# GH 56404, unexpected behavior with negative max_replacements with pyarrow.
pa_max_replacements = None if n < 0 else n
result = func(
self._pa_array,
pattern=pat,
replacement=repl,
max_replacements=pa_max_replacements,
)
return self._from_pyarrow_array(result)
def _str_capitalize(self) -> Self:
return self._from_pyarrow_array(pc.utf8_capitalize(self._pa_array))
def _str_title(self) -> Self:
return self._from_pyarrow_array(pc.utf8_title(self._pa_array))
def _str_swapcase(self) -> Self:
return self._from_pyarrow_array(pc.utf8_swapcase(self._pa_array))
def _str_removeprefix(self, prefix: str):
starts_with = pc.starts_with(self._pa_array, pattern=prefix)
removed = pc.utf8_slice_codeunits(self._pa_array, len(prefix))
result = pc.if_else(starts_with, removed, self._pa_array)
return self._from_pyarrow_array(result)
def _str_removesuffix(self, suffix: str):
ends_with = pc.ends_with(self._pa_array, pattern=suffix)
removed = pc.utf8_slice_codeunits(self._pa_array, 0, stop=-len(suffix))
result = pc.if_else(ends_with, removed, self._pa_array)
return self._from_pyarrow_array(result)
def _str_startswith(
self, pat: str | tuple[str, ...], na: Scalar | lib.NoDefault = lib.no_default
):
if isinstance(pat, str):
result = pc.starts_with(self._pa_array, pattern=pat)
else:
if len(pat) == 0:
# For empty tuple we return null for missing values and False
# for valid values.
result = pc.if_else(pc.is_null(self._pa_array), None, False)
else:
result = pc.starts_with(self._pa_array, pattern=pat[0])
for p in pat[1:]:
result = pc.or_(result, pc.starts_with(self._pa_array, pattern=p))
return self._convert_bool_result(result, na=na, method_name="startswith")
def _str_endswith(
self, pat: str | tuple[str, ...], na: Scalar | lib.NoDefault = lib.no_default
):
if isinstance(pat, str):
result = pc.ends_with(self._pa_array, pattern=pat)
else:
if len(pat) == 0:
# For empty tuple we return null for missing values and False
# for valid values.
result = pc.if_else(pc.is_null(self._pa_array), None, False)
else:
result = pc.ends_with(self._pa_array, pattern=pat[0])
for p in pat[1:]:
result = pc.or_(result, pc.ends_with(self._pa_array, pattern=p))
return self._convert_bool_result(result, na=na, method_name="endswith")
def _str_isalnum(self):
result = pc.utf8_is_alnum(self._pa_array)
return self._convert_bool_result(result)
def _str_isalpha(self):
result = pc.utf8_is_alpha(self._pa_array)
return self._convert_bool_result(result)
def _str_isascii(self):
result = pc.string_is_ascii(self._pa_array)
return self._convert_bool_result(result)
def _str_isdecimal(self):
result = pc.utf8_is_decimal(self._pa_array)
return self._convert_bool_result(result)
def _str_isdigit(self):
if pa_version_under21p0:
# https://github.com/pandas-dev/pandas/issues/61466
res_list = self._apply_elementwise(str.isdigit)
return self._convert_bool_result(
pa.chunked_array(res_list, type=pa.bool_())
)
result = pc.utf8_is_digit(self._pa_array)
return self._convert_bool_result(result)
def _str_islower(self):
result = pc.utf8_is_lower(self._pa_array)
return self._convert_bool_result(result)
def _str_isnumeric(self):
result = pc.utf8_is_numeric(self._pa_array)
return self._convert_bool_result(result)
def _str_isspace(self):
result = pc.utf8_is_space(self._pa_array)
return self._convert_bool_result(result)
def _str_istitle(self):
result = pc.utf8_is_title(self._pa_array)
return self._convert_bool_result(result)
def _str_isupper(self):
result = pc.utf8_is_upper(self._pa_array)
return self._convert_bool_result(result)
def _str_contains(
self,
pat,
case: bool = True,
flags: int = 0,
na: Scalar | lib.NoDefault = lib.no_default,
regex: bool = True,
):
if flags:
raise NotImplementedError(f"contains not implemented with {flags=}")
if regex:
pa_contains = pc.match_substring_regex
else:
pa_contains = pc.match_substring
result = pa_contains(self._pa_array, pat, ignore_case=not case)
return self._convert_bool_result(result, na=na, method_name="contains")
def _str_match(
self,
pat: str,
case: bool = True,
flags: int = 0,
na: Scalar | lib.NoDefault = lib.no_default,
):
if not pat.startswith("^"):
pat = f"^({pat})"
return self._str_contains(pat, case, flags, na, regex=True)
def _str_fullmatch(
self,
pat: str,
case: bool = True,
flags: int = 0,
na: Scalar | lib.NoDefault = lib.no_default,
):
if (not pat.endswith("$") or pat.endswith("\\$")) and not pat.startswith("^"):
pat = f"^({pat})$"
elif not pat.endswith("$") or pat.endswith("\\$"):
pat = f"^({pat[1:]})$"
elif not pat.startswith("^"):
pat = f"^({pat[0:-1]})$"
return self._str_match(pat, case, flags, na)
def _str_find(self, sub: str, start: int = 0, end: int | None = None):
if (start == 0 or start is None) and end is None:
result = pc.find_substring(self._pa_array, sub)
else:
if sub == "":
# GH#56792
res_list = self._apply_elementwise(
lambda val: val.find(sub, start, end)
)
return self._convert_int_result(pa.chunked_array(res_list))
if start is None:
start_offset = 0
start = 0
elif start < 0:
start_offset = pc.add(start, pc.utf8_length(self._pa_array))
start_offset = pc.if_else(pc.less(start_offset, 0), 0, start_offset)
else:
start_offset = start
slices = pc.utf8_slice_codeunits(self._pa_array, start, stop=end)
result = pc.find_substring(slices, sub)
found = pc.not_equal(result, pa.scalar(-1, type=result.type))
offset_result = pc.add(result, start_offset)
result = pc.if_else(found, offset_result, -1)
return self._convert_int_result(result)
|
ArrowStringArrayMixin
|
python
|
celery__celery
|
celery/contrib/migrate.py
|
{
"start": 737,
"end": 824
}
|
class ____(Exception):
"""Semi-predicate used to signal filter stop."""
|
StopFiltering
|
python
|
huggingface__transformers
|
src/transformers/models/qwen3_moe/modeling_qwen3_moe.py
|
{
"start": 13744,
"end": 14473
}
|
class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Qwen3MoeRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
|
Qwen3MoeRMSNorm
|
python
|
fsspec__filesystem_spec
|
fsspec/implementations/memory.py
|
{
"start": 406,
"end": 9623
}
|
class ____(AbstractFileSystem):
"""A filesystem based on a dict of BytesIO objects
This is a global filesystem so instances of this class all point to the same
in memory filesystem.
"""
store: ClassVar[dict[str, Any]] = {} # global, do not overwrite!
pseudo_dirs = [""] # global, do not overwrite!
protocol = "memory"
root_marker = "/"
@classmethod
def _strip_protocol(cls, path):
if isinstance(path, PurePath):
if isinstance(path, PureWindowsPath):
return LocalFileSystem._strip_protocol(path)
else:
path = stringify_path(path)
path = path.removeprefix("memory://")
if "::" in path or "://" in path:
return path.rstrip("/")
path = path.lstrip("/").rstrip("/")
return "/" + path if path else ""
def ls(self, path, detail=True, **kwargs):
path = self._strip_protocol(path)
if path in self.store:
# there is a key with this exact name
if not detail:
return [path]
return [
{
"name": path,
"size": self.store[path].size,
"type": "file",
"created": self.store[path].created.timestamp(),
}
]
paths = set()
starter = path + "/"
out = []
for p2 in tuple(self.store):
if p2.startswith(starter):
if "/" not in p2[len(starter) :]:
# exact child
out.append(
{
"name": p2,
"size": self.store[p2].size,
"type": "file",
"created": self.store[p2].created.timestamp(),
}
)
elif len(p2) > len(starter):
# implied child directory
ppath = starter + p2[len(starter) :].split("/", 1)[0]
if ppath not in paths:
out = out or []
out.append(
{
"name": ppath,
"size": 0,
"type": "directory",
}
)
paths.add(ppath)
for p2 in self.pseudo_dirs:
if p2.startswith(starter):
if "/" not in p2[len(starter) :]:
# exact child pdir
if p2 not in paths:
out.append({"name": p2, "size": 0, "type": "directory"})
paths.add(p2)
else:
# directory implied by deeper pdir
ppath = starter + p2[len(starter) :].split("/", 1)[0]
if ppath not in paths:
out.append({"name": ppath, "size": 0, "type": "directory"})
paths.add(ppath)
if not out:
if path in self.pseudo_dirs:
# empty dir
return []
raise FileNotFoundError(path)
if detail:
return out
return sorted([f["name"] for f in out])
def mkdir(self, path, create_parents=True, **kwargs):
path = self._strip_protocol(path)
if path in self.store or path in self.pseudo_dirs:
raise FileExistsError(path)
if self._parent(path).strip("/") and self.isfile(self._parent(path)):
raise NotADirectoryError(self._parent(path))
if create_parents and self._parent(path).strip("/"):
try:
self.mkdir(self._parent(path), create_parents, **kwargs)
except FileExistsError:
pass
if path and path not in self.pseudo_dirs:
self.pseudo_dirs.append(path)
def makedirs(self, path, exist_ok=False):
try:
self.mkdir(path, create_parents=True)
except FileExistsError:
if not exist_ok:
raise
def pipe_file(self, path, value, mode="overwrite", **kwargs):
"""Set the bytes of given file
Avoids copies of the data if possible
"""
mode = "xb" if mode == "create" else "wb"
self.open(path, mode=mode, data=value)
def rmdir(self, path):
path = self._strip_protocol(path)
if path == "":
# silently avoid deleting FS root
return
if path in self.pseudo_dirs:
if not self.ls(path):
self.pseudo_dirs.remove(path)
else:
raise OSError(ENOTEMPTY, "Directory not empty", path)
else:
raise FileNotFoundError(path)
def info(self, path, **kwargs):
logger.debug("info: %s", path)
path = self._strip_protocol(path)
if path in self.pseudo_dirs or any(
p.startswith(path + "/") for p in list(self.store) + self.pseudo_dirs
):
return {
"name": path,
"size": 0,
"type": "directory",
}
elif path in self.store:
filelike = self.store[path]
return {
"name": path,
"size": filelike.size,
"type": "file",
"created": getattr(filelike, "created", None),
}
else:
raise FileNotFoundError(path)
def _open(
self,
path,
mode="rb",
block_size=None,
autocommit=True,
cache_options=None,
**kwargs,
):
path = self._strip_protocol(path)
if "x" in mode and self.exists(path):
raise FileExistsError
if path in self.pseudo_dirs:
raise IsADirectoryError(path)
parent = path
while len(parent) > 1:
parent = self._parent(parent)
if self.isfile(parent):
raise FileExistsError(parent)
if mode in ["rb", "ab", "r+b", "a+b"]:
if path in self.store:
f = self.store[path]
if "a" in mode:
# position at the end of file
f.seek(0, 2)
else:
# position at the beginning of file
f.seek(0)
return f
else:
raise FileNotFoundError(path)
elif mode in {"wb", "w+b", "xb", "x+b"}:
if "x" in mode and self.exists(path):
raise FileExistsError
m = MemoryFile(self, path, kwargs.get("data"))
if not self._intrans:
m.commit()
return m
else:
name = self.__class__.__name__
raise ValueError(f"unsupported file mode for {name}: {mode!r}")
def cp_file(self, path1, path2, **kwargs):
path1 = self._strip_protocol(path1)
path2 = self._strip_protocol(path2)
if self.isfile(path1):
self.store[path2] = MemoryFile(
self, path2, self.store[path1].getvalue()
) # implicit copy
elif self.isdir(path1):
if path2 not in self.pseudo_dirs:
self.pseudo_dirs.append(path2)
else:
raise FileNotFoundError(path1)
def cat_file(self, path, start=None, end=None, **kwargs):
logger.debug("cat: %s", path)
path = self._strip_protocol(path)
try:
return bytes(self.store[path].getbuffer()[start:end])
except KeyError as e:
raise FileNotFoundError(path) from e
def _rm(self, path):
path = self._strip_protocol(path)
try:
del self.store[path]
except KeyError as e:
raise FileNotFoundError(path) from e
def modified(self, path):
path = self._strip_protocol(path)
try:
return self.store[path].modified
except KeyError as e:
raise FileNotFoundError(path) from e
def created(self, path):
path = self._strip_protocol(path)
try:
return self.store[path].created
except KeyError as e:
raise FileNotFoundError(path) from e
def isfile(self, path):
path = self._strip_protocol(path)
return path in self.store
def rm(self, path, recursive=False, maxdepth=None):
if isinstance(path, str):
path = self._strip_protocol(path)
else:
path = [self._strip_protocol(p) for p in path]
paths = self.expand_path(path, recursive=recursive, maxdepth=maxdepth)
for p in reversed(paths):
if self.isfile(p):
self.rm_file(p)
# If the expanded path doesn't exist, it is only because the expanded
# path was a directory that does not exist in self.pseudo_dirs. This
# is possible if you directly create files without making the
# directories first.
elif not self.exists(p):
continue
else:
self.rmdir(p)
|
MemoryFileSystem
|
python
|
redis__redis-py
|
redis/commands/search/reducers.py
|
{
"start": 3775,
"end": 4220
}
|
class ____(Reducer):
"""
Returns a random sample of items from the dataset, from the given property
"""
NAME = "RANDOM_SAMPLE"
def __init__(self, field: str, size: int) -> None:
"""
### Parameter
**field**: Field to sample from
**size**: Return this many items (can be less)
"""
args = [field, str(size)]
super().__init__(*args)
self._field = field
|
random_sample
|
python
|
qdrant__qdrant-client
|
qdrant_client/http/models/models.py
|
{
"start": 125095,
"end": 125358
}
|
class ____(BaseModel):
name: str = Field(..., description="")
creation_time: Optional[str] = Field(default=None, description="")
size: int = Field(..., description="")
checksum: Optional[str] = Field(default=None, description="")
|
SnapshotDescription
|
python
|
kamyu104__LeetCode-Solutions
|
Python/ternary-expression-parser.py
|
{
"start": 29,
"end": 681
}
|
class ____(object):
def parseTernary(self, expression):
"""
:type expression: str
:rtype: str
"""
if not expression:
return ""
stack = []
for c in expression[::-1]:
if stack and stack[-1] == '?':
stack.pop() # pop '?'
first = stack.pop()
stack.pop() # pop ':'
second = stack.pop()
if c == 'T':
stack.append(first)
else:
stack.append(second)
else:
stack.append(c)
return str(stack[-1])
|
Solution
|
python
|
getsentry__sentry
|
tests/sentry/tasks/test_store.py
|
{
"start": 485,
"end": 11803
}
|
class ____(Plugin2):
def get_event_preprocessors(self, data):
def remove_extra(data):
del data["extra"]
return data
def put_on_hold(data):
data["unprocessed"] = True
return data
if data.get("platform") == "mattlang":
return [remove_extra, lambda x: None]
if data.get("platform") == "noop":
return [lambda data: None]
if data.get("platform") == "holdmeclose":
return [put_on_hold]
return []
def is_enabled(self, project=None) -> bool:
return True
@pytest.fixture
def mock_save_event():
with mock.patch("sentry.tasks.store.save_event") as m:
yield m
@pytest.fixture
def mock_save_event_transaction():
with mock.patch("sentry.tasks.store.save_event_transaction") as m:
yield m
@pytest.fixture
def mock_process_event():
with mock.patch("sentry.tasks.store.process_event") as m:
yield m
@pytest.fixture
def mock_symbolicate_event():
with mock.patch("sentry.tasks.symbolication.symbolicate_event") as m:
yield m
@pytest.fixture
def mock_event_processing_store():
with mock.patch("sentry.services.eventstore.processing.event_processing_store") as m:
yield m
@pytest.fixture
def mock_transaction_processing_store():
with mock.patch("sentry.services.eventstore.processing.transaction_processing_store") as m:
yield m
@pytest.fixture
def mock_refund():
with mock.patch.object(quotas, "refund") as m:
yield m
@django_db_all
def test_move_to_process_event(
default_project, mock_process_event, mock_save_event, mock_symbolicate_event, register_plugin
):
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "mattlang",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
}
preprocess_event(cache_key="", data=data)
assert mock_symbolicate_event.delay.call_count == 0
assert mock_process_event.delay.call_count == 1
assert mock_save_event.delay.call_count == 0
@django_db_all
def test_move_to_save_event(
default_project, mock_process_event, mock_save_event, mock_symbolicate_event, register_plugin
):
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "NOTMATTLANG",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
}
preprocess_event(cache_key="", data=data)
assert mock_symbolicate_event.delay.call_count == 0
assert mock_process_event.delay.call_count == 0
assert mock_save_event.delay.call_count == 1
@django_db_all
def test_process_event_mutate_and_save(
default_project, mock_event_processing_store, mock_save_event, register_plugin
):
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "mattlang",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
}
mock_event_processing_store.get.return_value = data
mock_event_processing_store.store.return_value = "e:1"
process_event(cache_key="e:1", start_time=1)
# The event mutated, so make sure we save it back
((_, (event,), _),) = mock_event_processing_store.store.mock_calls
assert "extra" not in event
mock_save_event.delay.assert_called_once_with(
cache_key="e:1", data=None, start_time=1, event_id=EVENT_ID, project_id=default_project.id
)
@django_db_all
def test_process_event_no_mutate_and_save(
default_project, mock_event_processing_store, mock_save_event, register_plugin
):
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "noop",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
}
mock_event_processing_store.get.return_value = data
process_event(cache_key="e:1", start_time=1)
# The event did not mutate, so we shouldn't reset it in cache
assert mock_event_processing_store.store.call_count == 0
mock_save_event.delay.assert_called_once_with(
cache_key="e:1", data=None, start_time=1, event_id=EVENT_ID, project_id=default_project.id
)
@django_db_all
def test_process_event_unprocessed(
default_project, mock_event_processing_store, mock_save_event, register_plugin
):
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "holdmeclose",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
}
mock_event_processing_store.get.return_value = data
mock_event_processing_store.store.return_value = "e:1"
process_event(cache_key="e:1", start_time=1)
((_, (event,), _),) = mock_event_processing_store.store.mock_calls
assert event["unprocessed"] is True
mock_save_event.delay.assert_called_once_with(
cache_key="e:1", data=None, start_time=1, event_id=EVENT_ID, project_id=default_project.id
)
@django_db_all
def test_hash_discarded_raised(default_project, mock_refund, register_plugin) -> None:
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "NOTMATTLANG",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
}
now = time()
mock_save = mock.Mock()
mock_save.side_effect = HashDiscarded
with mock.patch.object(EventManager, "save", mock_save):
save_event(data=data, start_time=now)
# should be caught
@pytest.fixture(params=["org", "project"])
def options_model(request, default_organization, default_project):
if request.param == "org":
return default_organization
elif request.param == "project":
return default_project
else:
raise AssertionError(request.param)
@django_db_all
@pytest.mark.parametrize("setting_method", ["datascrubbers", "piiconfig"])
def test_scrubbing_after_processing(
default_project,
default_organization,
mock_save_event,
register_plugin,
mock_event_processing_store,
setting_method: str,
options_model,
):
class TestPlugin(Plugin2):
def get_event_preprocessors(self, data):
# Right now we do not scrub data from event preprocessors
def more_extra(data):
data["extra"]["ooo2"] = "event preprocessor"
return data
return [more_extra]
def is_enabled(self, project=None) -> bool:
return True
register_plugin(globals(), TestPlugin)
if setting_method == "datascrubbers":
options_model.update_option("sentry:sensitive_fields", ["o"])
options_model.update_option("sentry:scrub_data", True)
elif setting_method == "piiconfig":
options_model.update_option(
"sentry:relay_pii_config", '{"applications": {"extra.ooo": ["@anything:replace"]}}'
)
else:
raise AssertionError(setting_method)
data = {
"project": default_project.id,
"platform": "python",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"ooo": "remove me"},
}
mock_event_processing_store.get.return_value = data
mock_event_processing_store.store.return_value = "e:1"
# We pass data_has_changed=True to pretend that we've added "extra" attribute
# to "data" shortly before (e.g. during symbolication).
process_event(cache_key="e:1", start_time=1, data_has_changed=True)
((_, (event,), _),) = mock_event_processing_store.store.mock_calls
assert event["extra"] == {"ooo": "[Filtered]", "ooo2": "event preprocessor"}
mock_save_event.delay.assert_called_once_with(
cache_key="e:1", data=None, start_time=1, event_id=EVENT_ID, project_id=default_project.id
)
@django_db_all
def test_killswitch() -> None:
assert not is_process_disabled(1, "asdasdasd", "null")
options.set("store.load-shed-process-event-projects-gradual", {1: 0.0})
assert not is_process_disabled(1, "asdasdasd", "null")
options.set("store.load-shed-process-event-projects-gradual", {1: 1.0})
assert is_process_disabled(1, "asdasdasd", "null")
options.set("store.load-shed-process-event-projects-gradual", {})
@django_db_all
def test_transactions_store(
default_project, register_plugin, mock_transaction_processing_store
) -> None:
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "transaction",
"event_id": EVENT_ID,
"type": "transaction",
"transaction": "minimal_transaction",
"timestamp": time(),
"start_timestamp": time() - 1,
}
mock_transaction_processing_store.store.return_value = "e:1"
mock_transaction_processing_store.get.return_value = data
with mock.patch("sentry.event_manager.EventManager.save", return_value=None):
save_event_transaction(
cache_key="e:1",
data=None,
start_time=1,
event_id=EVENT_ID,
project_id=default_project.id,
)
mock_transaction_processing_store.get.assert_called_once_with("e:1")
@django_db_all
def test_store_consumer_type(
default_project,
mock_save_event,
mock_save_event_transaction,
register_plugin,
mock_event_processing_store,
mock_transaction_processing_store,
):
register_plugin(globals(), BasicPreprocessorPlugin)
data = {
"project": default_project.id,
"platform": "python",
"logentry": {"formatted": "test"},
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
"timestamp": time(),
}
mock_event_processing_store.get.return_value = data
mock_event_processing_store.store.return_value = "e:2"
process_event(cache_key="e:2", start_time=1)
mock_event_processing_store.get.assert_called_once_with("e:2")
mock_save_event.delay.assert_called_once_with(
cache_key="e:2",
data=None,
start_time=1,
event_id=EVENT_ID,
project_id=default_project.id,
)
transaction_data = {
"project": default_project.id,
"platform": "transaction",
"event_id": EVENT_ID,
"extra": {"foo": "bar"},
"timestamp": time(),
"start_timestamp": time() - 1,
}
mock_transaction_processing_store.get.return_value = transaction_data
mock_transaction_processing_store.store.return_value = "tx:3"
with mock.patch("sentry.event_manager.EventManager.save", return_value=None):
save_event_transaction(
cache_key="tx:3",
data=None,
start_time=1,
event_id=EVENT_ID,
project_id=default_project.id,
)
mock_transaction_processing_store.get.assert_called_once_with("tx:3")
mock_transaction_processing_store.delete_by_key.assert_called_once_with("tx:3")
mock_transaction_processing_store.store.assert_not_called()
|
BasicPreprocessorPlugin
|
python
|
mitmproxy__pdoc
|
test/testdata/misc.py
|
{
"start": 6245,
"end": 6587
}
|
class ____:
"""This is a class that wraps a function. It will be documented correctly."""
def __init__(self, func):
self._func = func
@ClassDecorator
def another_decorated_function(arg: str) -> str:
"""This is another decorated function. It will not be documented correctly."""
raise NotImplementedError
|
ClassDecorator
|
python
|
dask__distributed
|
distributed/dashboard/components/scheduler.py
|
{
"start": 46244,
"end": 50930
}
|
class ____(DashboardComponent):
"""Bar chart showing time spend in action by key prefix"""
@log_errors
def __init__(self, scheduler, **kwargs):
self.last = 0
self.scheduler = scheduler
if TaskStreamPlugin.name not in self.scheduler.plugins:
self.scheduler.add_plugin(TaskStreamPlugin(self.scheduler))
compute_data = {
"times": [0.2, 0.1],
"formatted_time": ["0.2 ms", "2.8 us"],
"angles": [3.14, 0.785],
"color": [ts_color_lookup["transfer"], ts_color_lookup["compute"]],
"names": ["sum", "sum_partial"],
}
self.compute_source = ColumnDataSource(data=compute_data)
fig = figure(
title="Compute Time Per Task",
tools="",
name="compute_time_per_key",
x_range=["a", "b"],
**kwargs,
)
rect = fig.vbar(
source=self.compute_source,
x="names",
top="times",
width=0.7,
color="color",
)
fig.y_range.start = 0
fig.yaxis.axis_label = "Time (s)"
fig.yaxis[0].formatter = NumeralTickFormatter(format="0")
fig.yaxis.ticker = AdaptiveTicker(**TICKS_1024)
fig.xaxis.major_label_orientation = XLABEL_ORIENTATION
rect.nonselection_glyph = None
fig.xaxis.minor_tick_line_alpha = 0
fig.xgrid.visible = False
fig.toolbar_location = None
hover = HoverTool()
hover.tooltips = """
<div>
<p><b>Name:</b> @names</p>
<p><b>Time:</b> @formatted_time</p>
</div>
"""
hover.point_policy = "follow_mouse"
fig.add_tools(hover)
fig.add_layout(
Title(
text="Note: tasks less than 2% of max are not displayed",
text_font_style="italic",
),
"below",
)
self.fig = fig
tab1 = TabPanel(child=fig, title="Bar Chart")
fig2 = figure(
title="Compute Time Per Task",
tools="",
name="compute_time_per_key-pie",
x_range=(-0.5, 1.0),
**kwargs,
)
fig2.wedge(
x=0,
y=1,
radius=0.4,
start_angle=cumsum("angles", include_zero=True),
end_angle=cumsum("angles"),
line_color="white",
fill_color="color",
legend_field="names",
source=self.compute_source,
)
fig2.axis.axis_label = None
fig2.axis.visible = False
fig2.grid.grid_line_color = None
fig2.add_layout(
Title(
text="Note: tasks less than 2% of max are not displayed",
text_font_style="italic",
),
"below",
)
hover = HoverTool()
hover.tooltips = """
<div>
<p><b>Name:</b> @names</p>
<p><b>Time:</b> @formatted_time</p>
</div>
"""
hover.point_policy = "follow_mouse"
fig2.add_tools(hover)
self.wedge_fig = fig2
tab2 = TabPanel(child=fig2, title="Pie Chart")
self.root = Tabs(tabs=[tab1, tab2], sizing_mode="stretch_both")
@without_property_validation
@log_errors
def update(self):
compute_times = defaultdict(float)
for name, tp in self.scheduler.task_prefixes.items():
for action, t in tp.all_durations.items():
if action == "compute":
compute_times[name] += t
if not compute_times:
return
# order by largest time first
compute_times = sorted(compute_times.items(), key=second, reverse=True)
# Keep only times which are 2% of max or greater
max_time = compute_times[0][1] * 0.02
compute_colors = []
compute_names = []
compute_time = []
total_time = 0
for name, t in compute_times:
if t < max_time:
break
compute_names.append(name)
compute_colors.append(ts_color_of(name))
compute_time.append(t)
total_time += t
angles = [t / total_time * 2 * math.pi for t in compute_time]
self.fig.x_range.factors = compute_names
compute_result = dict(
angles=angles,
times=compute_time,
color=compute_colors,
names=compute_names,
formatted_time=[format_time(t) for t in compute_time],
)
update(self.compute_source, compute_result)
|
ComputePerKey
|
python
|
joke2k__faker
|
faker/providers/currency/es/__init__.py
|
{
"start": 46,
"end": 5989
}
|
class ____(CurrencyProvider):
# Format: (code, name)
currencies = (
("AED", "Dírham de los Emiratos Árabes Unidos"),
("AFN", "Afghaní"),
("ALL", "Lek albanés"),
("AMD", "Dram armenio"),
("ANG", "Florín de las Antillas Holandesas"),
("AOA", "Kwanza angoleño"),
("ARS", "Peso argentino"),
("AUD", "Dólar australiano"),
("AWG", "Florín arubeño"),
("AZN", "Manat azerbaiyano"),
("BAM", "Marco bosnioherzegovino"),
("BBD", "Dólar barbadense"),
("BDT", "Taka bangladesí"),
("BGN", "Lev búlgaro"),
("BHD", "Dinar bahreiní"),
("BIF", "Franco burundés"),
("BMD", "Dólar de Bermudas"),
("BND", "Dólar bruneano"),
("BOB", "Boliviano"),
("BRL", "Real brasileño"),
("BSD", "Dólar bahameño"),
("BTN", "Ngultrum butanés"),
("BWP", "Pula de Botswana"),
("BYR", "Rublio bielurruso"),
("BZD", "Dólar beliceño"),
("CAD", "Dólar canadiense"),
("CDF", "Franco congolés"),
("CHF", "Franco suizo"),
("CLP", "Peso chileno"),
("CNY", "Yuan"),
("COP", "Peso colombiano"),
("CRC", "Colón costarricense"),
("CUC", "Peso cubano convertible"),
("CUP", "Peso subano"),
("CVE", "Escudo de Cabo Verde"),
("CZK", "Corona checa"),
("DJF", "Franco yibutiano"),
("DKK", "Corona danesa"),
("DOP", "Peso dominicano"),
("DZD", "Dinar argelino"),
("EGP", "Libra egipcia"),
("ERN", "Nafka"),
("ETB", "Bir de Etiopía"),
("EUR", "Euro"),
("FJD", "Dólar fiyiano"),
("FKP", "Libra de las islas Falkland"),
("GBP", "Libra esterlina"),
("GEL", "Larí georgiano"),
("GGP", "Libra de Guernsey"),
("GHS", "Cedi"),
("GIP", "Libra de Gibraltar"),
("GMD", "Dalasi"),
("GNF", "Franco guineano"),
("GTQ", "Quetzal guatemalteco"),
("GYD", "Dólar guyanés"),
("HKD", "Dólar hongkonés"),
("HNL", "Lempira hondureño"),
("HRK", "Kuna croata"),
("HTG", "Gourde haitiano"),
("HUF", "Forinto húngaro"),
("IDR", "Rupia indonesia"),
("ILS", "Séquel israelí"),
("NIS", "Nuevo Séquel israelí"),
("IMP", "Libra manesa"),
("INR", "Rupia india"),
("IQD", "Dinar iraquí"),
("IRR", "Rial iraní"),
("ISK", "Corona islandesa"),
("JEP", "Libra de Jersey"),
("JMD", "Dólar jamaicano"),
("JOD", "Dinar jordano"),
("JPY", "Yen japonés"),
("KES", "Chelín keniano"),
("KGS", "Som kirguís"),
("KHR", "Riel camboyano"),
("KMF", "Franco comorense"),
("KPW", "Won norcoreano"),
("KRW", "Krahn Occidental"),
("KWD", "Dinar kuwaití"),
("KYD", "Dólar de las islas Cayman"),
("KZT", "Tenge kazako"),
("LAK", "Kip laosiano"),
("LBP", "Libra libanesa"),
("LKR", "Rupia esrilanquesa"),
("LRD", "Dólar liberiano"),
("LSL", "Loti lesothense"),
("LTL", "Litas lituana"),
("LYD", "Dinar libio"),
("MAD", "Dirham marroquí"),
("MDL", "Leu moldavo"),
("MGA", "Ariary malgache"),
("MKD", "Denar normacedonio"),
("MMK", "Kyat birmano"),
("MNT", "Tugrik mongol"),
("MOP", "Pataca macaense"),
("MRO", "Ouguiya mauritano"),
("MUR", "Rupia mauritana"),
("MVR", "Rupia de Maldivas"),
("MWK", "Kwacha malauí"),
("MXN", "Peso mexicano"),
("MYR", "Ringgit"),
("MZN", "Metical mozambiqueño"),
("NAD", "Dólar namibio"),
("NGN", "Naira nigeriano"),
("NIO", "Córdoba nicaragüense"),
("NOK", "Corona noruega"),
("NPR", "Rupia nepalí"),
("NZD", "Dólar neozelandés"),
("OMR", "Rial omaní"),
("PAB", "Balboa panameño"),
("PEN", "Sol peruano"),
("PGK", "Kina"),
("PHP", "Peso filipino"),
("PKR", "Rupia pakistaní"),
("PLN", "Złoty polaco"),
("PYG", "Guaraní paraguayo"),
("QAR", "Riyal catarí"),
("RON", "Leu rumano"),
("RSD", "Dinar serbio"),
("RUB", "Rublo ruso"),
("RWF", "Franco ruandés"),
("SAR", "Riyal saudí"),
("SBD", "Dólar de las islas Solomon"),
("SCR", "Rupia seychellense"),
("SDG", "Libra sudanesa"),
("SEK", "Corona sueca"),
("SGD", "Dólar de Singapur"),
("SHP", "Libra de Santa Elena"),
("SLL", "Leona"),
("SOS", "Chelín somalí"),
("SPL", "Luigino"),
("SRD", "Dólar surinamés"),
("STD", "Dobra santotomense"),
("SVC", "Colón salvadoreño"),
("SYP", "Libra siria"),
("SZL", "Lilangeni"),
("THB", "Baht tailandés"),
("TJS", "Somoni tayiko"),
("TMT", "Manat turcomano"),
("TND", "Dinar tunecino"),
("TOP", "Pa'anga tongano"),
("TRY", "Lira turca"),
("TTD", "Dólar de Trinidad and Tobago"),
("TVD", "Dólar tuvaluano"),
("TWD", "Nuevo dólar taiwanés"),
("TZS", "Chelín tanzano"),
("UAH", "Grivna ucraniano"),
("UGX", "Chelín ugandés"),
("USD", "Dólar de Estados Unidos"),
("UYU", "Peso uruguayo"),
("UZS", "Soʻm Uzbekistani"),
("VEF", "Bolívar venezolano"),
("VND", "Đồng vietnamita"),
("VUV", "Vanuatu vatu"),
("WST", "Tālā samoano"),
("XAF", "Franco centro africano"),
("XCD", "Dólar del Caribe Oriental"),
("XDR", "Derechos especiales de giro"),
("XOF", "Franco de África occidental"),
("XPF", "Franco CFP"),
("YER", "Rial yemení"),
("ZAR", "Rand sudafricano"),
("ZMW", "Kwacha zambiano"),
("ZWD", "Dólar zimbabuense"),
)
|
Provider
|
python
|
tensorflow__tensorflow
|
tensorflow/lite/python/metrics/metrics_portable.py
|
{
"start": 1193,
"end": 1827
}
|
class ____(metrics_interface.TFLiteMetricsInterface):
"""TFLite metrics helper."""
def __init__(self,
model_hash: Optional[Text] = None,
model_path: Optional[Text] = None) -> None:
pass
def increase_counter_debugger_creation(self):
pass
def increase_counter_interpreter_creation(self):
pass
def increase_counter_converter_attempt(self):
pass
def increase_counter_converter_success(self):
pass
def set_converter_param(self, name, value):
pass
def set_converter_error(self, error_data):
pass
def set_converter_latency(self, value):
pass
|
TFLiteMetrics
|
python
|
getsentry__sentry
|
src/sentry/workflow_engine/migrations/0099_backfill_metric_issue_detectorgroup.py
|
{
"start": 524,
"end": 3921
}
|
class ____(Enum):
LATEST = ["project_id", "-timestamp", "-event_id"]
OLDEST = ["project_id", "timestamp", "event_id"]
RECOMMENDED = [
"-replay.id",
"-trace.sampled",
"num_processing_errors",
"-profile.id",
"-timestamp",
"-event_id",
]
def get_oldest_or_latest_event(
group: Any,
ordering: EventOrdering,
conditions: Sequence[Condition] | None = None,
start: datetime | None = None,
end: datetime | None = None,
) -> Any:
dataset = Dataset.IssuePlatform
all_conditions = [
Condition(Column("project_id"), Op.IN, [group.project.id]),
Condition(Column("group_id"), Op.IN, [group.id]),
]
if conditions:
all_conditions.extend(conditions)
events = eventstore.backend.get_events_snql(
organization_id=group.project.organization_id,
group_id=group.id,
start=start,
end=end,
conditions=all_conditions,
limit=1,
orderby=ordering.value,
referrer="Group.get_latest",
dataset=dataset,
tenant_ids={"organization_id": group.project.organization_id},
)
if events:
return events[0].for_group(group)
return None
def backfill_metric_issue_detectorgroup(
apps: StateApps, schema_editor: BaseDatabaseSchemaEditor
) -> None:
"""
Backfill the DetectorGroup table for metric issues.
"""
Group = apps.get_model("sentry", "Group")
DetectorGroup = apps.get_model("workflow_engine", "DetectorGroup")
Detector = apps.get_model("workflow_engine", "Detector")
for group in Group.objects.filter(type=8001, detectorgroup__isnull=True).select_related(
"project"
): # metric issues
# figure out the detector
latest_event = get_oldest_or_latest_event(group, EventOrdering.LATEST)
if not latest_event:
DetectorGroup.objects.create(
group_id=group.id,
detector_id=None,
)
logger.info(
"No latest event found for group, creating DetectorGroup with null detector",
extra={"group_id": group.id},
)
continue
occurrence = latest_event.occurrence
if not occurrence:
logger.info(
"No occurrence found for latest event", extra={"event_id": latest_event.event_id}
)
continue
detector_id = occurrence.evidence_data.get("detector_id")
if detector_id is None:
logger.info(
"No detector id found for occurrence", extra={"occurrence_id": occurrence.id}
)
continue
# try to fetch detector
detector = Detector.objects.filter(id=detector_id).first()
if detector is None:
DetectorGroup.objects.create(
group_id=group.id,
detector_id=None,
)
logger.info(
"Creating DetectorGroup with null detector",
extra={"group_id": group.id, "detector_id": detector_id},
)
continue
DetectorGroup.objects.create(
group_id=group.id,
detector_id=detector.id,
)
logger.info(
"Creating DetectorGroup",
extra={"group_id": group.id, "detector_id": detector_id},
)
|
EventOrdering
|
python
|
arrow-py__arrow
|
tests/test_locales.py
|
{
"start": 33808,
"end": 35119
}
|
class ____:
def test_plurals2(self):
assert self.locale._format_timeframe("hours", 0) == "0 часа"
assert self.locale._format_timeframe("hours", 1) == "1 час"
assert self.locale._format_timeframe("hours", 2) == "2 часа"
assert self.locale._format_timeframe("hours", 4) == "4 часа"
assert self.locale._format_timeframe("hours", 5) == "5 часа"
assert self.locale._format_timeframe("hours", 21) == "21 час"
assert self.locale._format_timeframe("hours", 22) == "22 часа"
assert self.locale._format_timeframe("hours", 25) == "25 часа"
# feminine grammatical gender should be tested separately
assert self.locale._format_timeframe("minutes", 0) == "0 минути"
assert self.locale._format_timeframe("minutes", 1) == "1 минута"
assert self.locale._format_timeframe("minutes", 2) == "2 минути"
assert self.locale._format_timeframe("minutes", 4) == "4 минути"
assert self.locale._format_timeframe("minutes", 5) == "5 минути"
assert self.locale._format_timeframe("minutes", 21) == "21 минута"
assert self.locale._format_timeframe("minutes", 22) == "22 минути"
assert self.locale._format_timeframe("minutes", 25) == "25 минути"
@pytest.mark.usefixtures("lang_locale")
|
TestBulgarianLocale
|
python
|
PrefectHQ__prefect
|
src/prefect/filesystems.py
|
{
"start": 21751,
"end": 22393
}
|
class ____(BaseModel):
"""
A file system that does not store any data.
"""
async def read_path(self, path: str) -> None:
pass
async def write_path(self, path: str, content: bytes) -> None:
pass
async def get_directory(
self, from_path: Optional[str] = None, local_path: Optional[str] = None
) -> None:
pass
async def put_directory(
self,
local_path: Optional[str] = None,
to_path: Optional[str] = None,
ignore_file: Optional[str] = None,
) -> None:
pass
__getattr__: Callable[[str], Any] = getattr_migration(__name__)
|
NullFileSystem
|
python
|
ray-project__ray
|
rllib/algorithms/tests/test_local.py
|
{
"start": 77,
"end": 786
}
|
class ____(unittest.TestCase):
def setUp(self) -> None:
ray.init(local_mode=True)
def tearDown(self) -> None:
ray.shutdown()
def test_local(self):
config = (
PPOConfig()
.api_stack(
enable_rl_module_and_learner=True,
enable_env_runner_and_connector_v2=True,
)
.environment("CartPole-v1")
.env_runners(num_env_runners=2)
.training(model={"fcnet_hiddens": [10]})
)
algo = config.build()
print(algo.train())
algo.stop()
if __name__ == "__main__":
import sys
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
LocalModeTest
|
python
|
django-haystack__django-haystack
|
test_haystack/elasticsearch_tests/test_elasticsearch_backend.py
|
{
"start": 61373,
"end": 64131
}
|
class ____(TestCase):
def setUp(self):
super().setUp()
# Wipe it clean.
self.raw_es = elasticsearch.Elasticsearch(
settings.HAYSTACK_CONNECTIONS["elasticsearch"]["URL"]
)
clear_elasticsearch_index()
# Stow.
self.old_ui = connections["elasticsearch"].get_unified_index()
self.ui = UnifiedIndex()
self.smmi = ElasticsearchBoostMockSearchIndex()
self.ui.build(indexes=[self.smmi])
connections["elasticsearch"]._index = self.ui
self.sb = connections["elasticsearch"].get_backend()
self.sample_objs = []
for i in range(1, 5):
mock = AFourthMockModel()
mock.id = i
if i % 2:
mock.author = "daniel"
mock.editor = "david"
else:
mock.author = "david"
mock.editor = "daniel"
mock.pub_date = datetime.date(2009, 2, 25) - datetime.timedelta(days=i)
self.sample_objs.append(mock)
def tearDown(self):
connections["elasticsearch"]._index = self.old_ui
super().tearDown()
def raw_search(self, query):
return self.raw_es.search(
q="*:*", index=settings.HAYSTACK_CONNECTIONS["elasticsearch"]["INDEX_NAME"]
)
def test_boost(self):
self.sb.update(self.smmi, self.sample_objs)
self.assertEqual(self.raw_search("*:*")["hits"]["total"], 4)
results = SearchQuerySet(using="elasticsearch").filter(
SQ(author="daniel") | SQ(editor="daniel")
)
self.assertEqual(
set([result.id for result in results]),
set(
[
"core.afourthmockmodel.4",
"core.afourthmockmodel.3",
"core.afourthmockmodel.1",
"core.afourthmockmodel.2",
]
),
)
def test__to_python(self):
self.assertEqual(self.sb._to_python("abc"), "abc")
self.assertEqual(self.sb._to_python("1"), 1)
self.assertEqual(self.sb._to_python("2653"), 2653)
self.assertEqual(self.sb._to_python("25.5"), 25.5)
self.assertEqual(self.sb._to_python("[1, 2, 3]"), [1, 2, 3])
self.assertEqual(
self.sb._to_python('{"a": 1, "b": 2, "c": 3}'), {"a": 1, "c": 3, "b": 2}
)
self.assertEqual(
self.sb._to_python("2009-05-09T16:14:00"),
datetime.datetime(2009, 5, 9, 16, 14),
)
self.assertEqual(
self.sb._to_python("2009-05-09T00:00:00"),
datetime.datetime(2009, 5, 9, 0, 0),
)
self.assertEqual(self.sb._to_python(None), None)
|
ElasticsearchBoostBackendTestCase
|
python
|
microsoft__pyright
|
packages/pyright-internal/src/tests/samples/methodOverride3.py
|
{
"start": 2226,
"end": 2559
}
|
class ____:
@property
def prop1(self) -> int:
return 3
@property
def prop2(self) -> int:
return 3
@prop2.setter
def prop2(self, val: int) -> None:
pass
@property
def prop3(self) -> int:
return 3
@prop3.setter
def prop3(self, val: int) -> None:
pass
|
H1
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/util/typing.py
|
{
"start": 19986,
"end": 20398
}
|
class ____(Generic[_FN]):
"""a descriptor that refers to a callable.
works around mypy's limitation of not allowing callables assigned
as instance variables
"""
if TYPE_CHECKING:
def __get__(self, instance: object, owner: Any) -> _FN: ...
def __set__(self, instance: Any, value: _FN) -> None: ...
def __delete__(self, instance: Any) -> None: ...
|
CallableReference
|
python
|
sqlalchemy__sqlalchemy
|
tools/generate_proxy_methods.py
|
{
"start": 2934,
"end": 14973
}
|
class ____:
__slots__ = ("sym",)
def __init__(self, sym: str):
self.sym = sym
def __repr__(self) -> str:
return self.sym
classes: collections.defaultdict[str, Dict[str, Tuple[Any, ...]]] = (
collections.defaultdict(dict)
)
_T = TypeVar("_T", bound="Any")
def create_proxy_methods(
target_cls: Type[Any],
target_cls_sphinx_name: str,
proxy_cls_sphinx_name: str,
classmethods: Iterable[str] = (),
methods: Iterable[str] = (),
attributes: Iterable[str] = (),
use_intermediate_variable: Iterable[str] = (),
) -> Callable[[Type[_T]], Type[_T]]:
"""A class decorator that will copy attributes to a proxy class.
The class to be instrumented must define a single accessor "_proxied".
"""
def decorate(cls: Type[_T]) -> Type[_T]:
# collect the class as a separate step. since the decorator
# is called as a result of imports, the order in which classes
# are collected (like in asyncio) can't be well controlled. however,
# the proxies (specifically asyncio session and asyncio scoped_session)
# have to be generated in dependency order, so run them in order in a
# second step.
classes[cls.__module__][cls.__name__] = (
target_cls,
target_cls_sphinx_name,
proxy_cls_sphinx_name,
classmethods,
methods,
attributes,
use_intermediate_variable,
cls,
)
return cls
return decorate
def _grab_overloads(fn):
"""grab @overload entries for a function, assuming black-formatted
code ;) so that we can do a simple regex
"""
# functions that use @util.deprecated and whatnot will have a string
# generated fn. we can look at __wrapped__ but these functions don't
# have any overloads in any case right now so skip
if fn.__code__.co_filename == "<string>":
return []
with open(fn.__code__.co_filename) as f:
lines = [l for i, l in zip(range(fn.__code__.co_firstlineno), f)]
lines.reverse()
output = []
current_ov = []
for line in lines[1:]:
current_ov.append(line)
outside_block_match = re.match(r"^\w", line)
if outside_block_match:
current_ov[:] = []
break
fn_match = re.match(r"^ (?: )?(?:async )?def (.*)\(", line)
if fn_match and fn_match.group(1) != fn.__name__:
current_ov[:] = []
break
ov_match = re.match(r"^ (?: )?@overload$", line)
if ov_match:
output.append("".join(reversed(current_ov)))
current_ov[:] = []
if re.match(r"^ if (?:typing\.)?TYPE_CHECKING:", line):
output.append(line)
current_ov[:] = []
output.reverse()
return output
def process_class(
buf: TextIO,
target_cls: Type[Any],
target_cls_sphinx_name: str,
proxy_cls_sphinx_name: str,
classmethods: Iterable[str],
methods: Iterable[str],
attributes: Iterable[str],
use_intermediate_variable: Iterable[str],
cls: Type[Any],
):
sphinx_symbol_match = re.match(r":class:`(.+)`", target_cls_sphinx_name)
if not sphinx_symbol_match:
raise Exception(
f"Couldn't match sphinx class identifier from "
f"target_cls_sphinx_name f{target_cls_sphinx_name!r}. Currently "
'this program expects the form ":class:`_<prefix>.<clsname>`"'
)
sphinx_symbol = sphinx_symbol_match.group(1)
require_intermediate = set(use_intermediate_variable)
def instrument(buf: TextIO, name: str, clslevel: bool = False) -> None:
fn = getattr(target_cls, name)
overloads = _grab_overloads(fn)
for overload in overloads:
buf.write(overload)
spec = compat.inspect_getfullargspec(fn)
iscoroutine = inspect.iscoroutinefunction(fn)
if spec.defaults or spec.kwonlydefaults:
elem = list(spec)
if spec.defaults:
new_defaults = tuple(
(
_repr_sym("util.EMPTY_DICT")
if df is util.EMPTY_DICT
else df
)
for df in spec.defaults
)
elem[3] = new_defaults
if spec.kwonlydefaults:
new_kwonlydefaults = {
name: (
_repr_sym("util.EMPTY_DICT")
if df is util.EMPTY_DICT
else df
)
for name, df in spec.kwonlydefaults.items()
}
elem[5] = new_kwonlydefaults
spec = compat.FullArgSpec(*elem)
caller_argspec = format_argspec_plus(spec, grouped=False)
metadata = {
"name": fn.__name__,
"async": "async " if iscoroutine else "",
"await": "await " if iscoroutine else "",
"apply_pos_proxied": caller_argspec["apply_pos_proxied"],
"target_cls_name": target_cls.__name__,
"apply_kw_proxied": caller_argspec["apply_kw_proxied"],
"grouped_args": caller_argspec["grouped_args"],
"self_arg": caller_argspec["self_arg"],
"doc": textwrap.indent(
inject_docstring_text(
fn.__doc__,
textwrap.indent(
".. container:: class_bases\n\n"
f" Proxied for the {target_cls_sphinx_name} "
"class on \n"
f" behalf of the {proxy_cls_sphinx_name} "
"class.",
" ",
),
1,
),
" ",
).lstrip(),
}
if fn.__name__ in require_intermediate:
metadata["line_prefix"] = "result ="
metadata["after_line"] = "return result\n"
else:
metadata["line_prefix"] = "return"
metadata["after_line"] = ""
if clslevel:
code = (
'''\
@classmethod
%(async)sdef %(name)s%(grouped_args)s:
r"""%(doc)s\n """ # noqa: E501
%(line_prefix)s %(await)s%(target_cls_name)s.%(name)s(%(apply_kw_proxied)s)
%(after_line)s
'''
% metadata
)
else:
code = (
'''\
%(async)sdef %(name)s%(grouped_args)s:
r"""%(doc)s\n """ # noqa: E501
%(line_prefix)s %(await)s%(self_arg)s._proxied.%(name)s(%(apply_kw_proxied)s)
%(after_line)s
''' # noqa: E501
% metadata
)
buf.write(textwrap.indent(code, " "))
def makeprop(buf: TextIO, name: str) -> None:
attr = target_cls.__dict__.get(name, None)
return_type = target_cls.__annotations__.get(name, "Any")
assert isinstance(return_type, str), (
"expected string annotations, is from __future__ "
"import annotations set up?"
)
existing_doc = None
if attr is not None:
if isinstance(attr, property):
readonly = attr.fset is None
existing_doc = attr.__doc__
elif isinstance(attr, langhelpers.generic_fn_descriptor):
readonly = True
existing_doc = attr.__doc__
elif hasattr(attr, "__get__"):
readonly = not hasattr(attr, "__set__")
existing_doc = attr.__doc__
else:
# not a descriptor
readonly = False
else:
readonly = False
if existing_doc:
doc = textwrap.indent(
inject_docstring_text(
attr.__doc__,
textwrap.indent(
".. container:: class_bases\n\n"
f" Proxied for the {target_cls_sphinx_name} "
"class \n"
f" on behalf of the {proxy_cls_sphinx_name} "
"class.",
" ",
),
1,
),
" ",
).lstrip()
else:
doc = (
f"Proxy for the :attr:`{sphinx_symbol}.{name}` "
"attribute \n"
f" on behalf of the {proxy_cls_sphinx_name} "
"class.\n"
)
code = (
"@property\n"
"def %(name)s(self) -> %(return_type)s:\n"
' r"""%(doc)s\n """ # noqa: E501\n\n'
" return self._proxied.%(name)s\n\n"
) % {"name": name, "doc": doc, "return_type": return_type}
if not readonly:
code += (
"@%(name)s.setter\n"
"def %(name)s(self, attr: %(return_type)s) -> None:\n"
" self._proxied.%(name)s = attr\n\n"
) % {"name": name, "return_type": return_type}
buf.write(textwrap.indent(code, " "))
for meth in methods:
instrument(buf, meth)
for prop in attributes:
makeprop(buf, prop)
for prop in classmethods:
instrument(buf, prop, clslevel=True)
def process_module(modname: str, filename: str, cmd: code_writer_cmd) -> str:
class_entries = classes[modname]
# use tempfile in same path as the module, or at least in the
# current working directory, so that black / zimports use
# local pyproject.toml
with (
NamedTemporaryFile(
mode="w",
delete=False,
suffix=".py",
) as buf,
open(filename) as orig_py,
):
in_block = False
current_clsname = None
for line in orig_py:
m = re.match(r" # START PROXY METHODS (.+)$", line)
if m:
current_clsname = m.group(1)
args = class_entries[current_clsname]
cmd.write_status(
f"Generating attributes for class {current_clsname}\n"
)
in_block = True
buf.write(line)
buf.write(
"\n # code within this block is "
"**programmatically, \n"
" # statically generated** by"
f" tools/{os.path.basename(__file__)}\n\n"
)
process_class(buf, *args)
if line.startswith(f" # END PROXY METHODS {current_clsname}"):
in_block = False
if not in_block:
buf.write(line)
return buf.name
def run_module(modname: str, cmd: code_writer_cmd) -> None:
cmd.write_status(f"importing module {modname}\n")
mod = importlib.import_module(modname)
destination_path = mod.__file__
assert destination_path is not None
tempfile = process_module(modname, destination_path, cmd)
cmd.run_zimports(tempfile)
cmd.run_black(tempfile)
cmd.write_output_file_from_tempfile(tempfile, destination_path)
def main(cmd: code_writer_cmd) -> None:
from sqlalchemy import util
from sqlalchemy.util import langhelpers
util.create_proxy_methods = langhelpers.create_proxy_methods = (
create_proxy_methods
)
for entry in entries:
if cmd.args.module in {"all", entry}:
run_module(entry, cmd)
entries = [
"sqlalchemy.orm.scoping",
"sqlalchemy.ext.asyncio.engine",
"sqlalchemy.ext.asyncio.session",
"sqlalchemy.ext.asyncio.scoping",
]
if __name__ == "__main__":
cmd = code_writer_cmd(__file__)
with cmd.add_arguments() as parser:
parser.add_argument(
"--module",
choices=entries + ["all"],
default="all",
help="Which file to generate. Default is to regenerate all files",
)
with cmd.run_program():
main(cmd)
|
_repr_sym
|
python
|
kamyu104__LeetCode-Solutions
|
Python/vowel-spellchecker.py
|
{
"start": 29,
"end": 935
}
|
class ____(object):
def spellchecker(self, wordlist, queries):
"""
:type wordlist: List[str]
:type queries: List[str]
:rtype: List[str]
"""
vowels = set(['a', 'e', 'i', 'o', 'u'])
def todev(word):
return "".join('*' if c.lower() in vowels else c.lower()
for c in word)
words = set(wordlist)
caps = {}
vows = {}
for word in wordlist:
caps.setdefault(word.lower(), word)
vows.setdefault(todev(word), word)
def check(query):
if query in words:
return query
lower = query.lower()
if lower in caps:
return caps[lower]
devow = todev(lower)
if devow in vows:
return vows[devow]
return ""
return map(check, queries)
|
Solution
|
python
|
pytorch__pytorch
|
torch/_inductor/runtime/coordinate_descent_tuner.py
|
{
"start": 906,
"end": 13944
}
|
class ____:
"""
The coordinate descent tuner. Tune one field/coordinate at a time.
TODO will it be necessary to tune multiple fields simultaneously.
TODO: what if both increasing and decreasing a field can improve perf.
i.e., there are multiple local optima..
"""
def __init__(
self,
is_mm=False,
is_native_matmul=False,
is_mix_order_reduction=False,
name="unknown",
size_hints=None,
inductor_meta=None,
frozen_fields=None,
):
self.is_mm = is_mm # we will tune num_stages for mm
# Native matmul codegen assumes ZBLOCK=1 always.
# This is because 3d tl.dot is slow and so we want to tile y and x only.
# tl.dot also does not support size smaller than 16; we put this restriction.
self.is_native_matmul = is_native_matmul
assert not (self.is_mm and self.is_native_matmul)
self.is_mix_order_reduction = is_mix_order_reduction
self.cached_benchmark_results = {}
self.name = name
self.size_hints = size_hints
self.inductor_meta = inductor_meta or {}
self.frozen_fields: OrderedSet[str] = (
OrderedSet(frozen_fields) if frozen_fields is not None else OrderedSet()
)
def get_config_max(self, prefix: str) -> int:
max_block = TRITON_MAX_BLOCK[prefix.upper()]
size_hint = self.size_hints.get(prefix) if self.size_hints is not None else None
return min(max_block, size_hint) if size_hint is not None else max_block
def get_warpsmax(self):
# Currently, CUDA has a maximum of 1024 threads, so 32 is the max
# number of warps.
return 1024 // 32
def cache_benchmark_result(self, config, timing):
self.cached_benchmark_results[triton_config_to_hashable(config)] = timing
def lookup_in_cache(self, config):
return self.cached_benchmark_results.get(triton_config_to_hashable(config))
def call_func(self, func, config):
found = self.lookup_in_cache(config)
if found is not None:
log.debug(" CACHED")
return found
timing = func(config)
self.cache_benchmark_result(config, timing)
return timing
@property
def tunable_fields(self):
out = [
"XBLOCK",
"YBLOCK",
"ZBLOCK",
# NOTE: we should not tune R0_BLOCK for persistent reduction.
# We rely on the fact that persistent reduction's triton.Config
# does not have the R0_BLOCK field to guarantee that.
"R0_BLOCK",
"R1_BLOCK",
# the following 3 are for mm
"BLOCK_M",
"BLOCK_N",
"BLOCK_K",
"num_warps",
]
if self.is_mm:
out.append("num_stages")
if self.inductor_meta.get("is_hip") is True:
out.append("waves_per_eu")
if self.is_native_matmul:
out.append("num_stages")
out.remove("ZBLOCK") # ZBLOCK=1 always in native matmul
if self.is_mix_order_reduction:
# unlike TritonConfig.num_stages, this one is
# put in TritonConfig.kwargs["NUM_STAGES"] and is used to
# control the stage of pipelining of tl.range.
out.append("NUM_STAGES")
return [f for f in out if f not in self.frozen_fields]
def value_too_large(self, name: str, val: int) -> bool:
block_suffix = "BLOCK"
if name.endswith(block_suffix):
prefix = name.strip(block_suffix).lower()
return val > self.get_config_max(prefix)
if name == "num_warps":
return val > self.get_warpsmax()
if name == "waves_per_eu":
return val > 8
return False
def value_too_small(self, name: str, val: int) -> bool:
# In native matmul, block size should be >= 16 for tl.dot
if self.is_native_matmul:
if name in ["YBLOCK", "XBLOCK", "R0_BLOCK"]:
return val < 16
# Break if value becomes 0/neg
return val <= 0
def get_neighbour_values(self, name, orig_val, radius=None, include_self=False):
"""
Get neighbour values in 'radius' steps. The original value is not
returned as it's own neighbour.
"""
if radius is None:
radius = 1
if name == "NUM_STAGES":
# we see cases that
# NUM_STAGES=1 is better than NUM_STAGES=2
# while NUM_STAGES=1 is worse than NUM_STAGES=3
radius = max(radius, 2)
assert radius >= 1
def update(cur_val, inc=True):
if name in ["num_stages", "NUM_STAGES"]:
if inc:
return cur_val + 1
else:
return cur_val - 1
else:
if inc:
return cur_val * 2
else:
return cur_val // 2
out = []
# increment loop
cur_val = orig_val
for _ in range(radius):
cur_val = update(cur_val, True)
if self.value_too_large(name, cur_val):
break
out.append(cur_val)
# decrement loop
cur_val = orig_val
for _ in range(radius):
cur_val = update(cur_val, False)
if self.value_too_small(name, cur_val):
break
out.append(cur_val)
if include_self:
out.append(orig_val)
return out
@staticmethod
def has_improvement(baseline, test):
threshold = 0.001 # 0.1%
return test is not None and test < baseline * (1 - threshold)
def is_valid_config(self, config) -> bool:
if self.is_mix_order_reduction:
# Mix order reduction has an extra constraint that
# we should not tune XBLOCK beyond RSPLIT_SIZE
xblock = config.kwargs["XBLOCK"]
split_size = config.kwargs["RSPLIT_SIZE"]
return xblock <= split_size
return True
def check_all_tuning_directions(
self,
# pyrefly: ignore [missing-attribute]
func: Callable[["triton.Config"], float],
best_config,
best_timing,
):
"""
Check all directions. We only do this once the regular coordinate
descent tuning find no better choices any more.
We only have a few tunable fields, so this should be fine.
"""
candidate_values_list = []
effective_fields = []
for field in self.tunable_fields:
old_value = get_field(best_config, field)
if old_value is None:
continue
radius = self.inductor_meta.get("coordinate_descent_search_radius", 1)
candidate_values = self.get_neighbour_values(
field,
old_value,
radius=radius,
include_self=True,
)
candidate_values_list.append(candidate_values)
effective_fields.append(field)
choices = itertools.product(*candidate_values_list)
improved = False
for choice in choices:
assert len(choice) == len(effective_fields)
candidate_config = copy.deepcopy(best_config)
for new_val, field in zip(choice, effective_fields):
set_field(candidate_config, field, new_val)
if not self.is_valid_config(candidate_config):
continue
cmp_res, candidate_timing = self.compare_config(
func, candidate_config, best_config, best_timing
)
if cmp_res:
improved = True
best_config = candidate_config
best_timing = candidate_timing
return improved, best_config, best_timing
def compare_config(self, func, candidate_config, best_config, best_timing):
"""
Check if candidate_config is better than best_config.
Return a tuple of (compare_result, candidate_timing).
compare_result is true iff candidate_config is better.
"""
log.debug("Try config %s", candidate_config)
try:
candidate_timing = self.call_func(func, candidate_config)
except Exception as e:
log.debug("Got exception %s", e) # noqa: G200
return False, float("inf")
if self.has_improvement(best_timing, candidate_timing):
log.debug(
"Tune from %s %f -> %s %f",
best_config,
best_timing,
candidate_config,
candidate_timing,
)
return True, candidate_timing
return False, candidate_timing
def autotune(
self,
# pyrefly: ignore [missing-attribute]
func: Callable[["triton.Config"], float],
# pyrefly: ignore [missing-attribute]
baseline_config: "triton.Config",
baseline_timing: float | None = None,
) -> "triton.Config": # pyrefly: ignore # missing-attribute
if baseline_timing is None:
baseline_timing = self.call_func(func, baseline_config)
log.debug("= Do coordinate descent tuning for %s =", self.name)
log.debug(
"%s: Baseline Config %s, baseline timing %f",
self.name,
baseline_config,
baseline_timing,
)
improved = True
best_config = baseline_config
best_timing = baseline_timing
tunable_fields = self.tunable_fields
while improved:
improved = False
for name in tunable_fields:
cur_val = get_field(best_config, name)
# some kernel don't have R0_BLOCK/YBLOCK/ZBLOCK. So cur_val may be None
if cur_val is None:
continue
# It's possible that candidate_values is empty.
# E.g., if XBLOCK is 1 initially and size_hint for x is also 1.
# We would not try either larger or smaller XBLOCK in this case.
candidate_values = self.get_neighbour_values(name, cur_val)
for next_val in candidate_values:
candidate_config = copy.deepcopy(best_config)
set_field(candidate_config, name, next_val)
if not self.is_valid_config(candidate_config):
continue
cmp_res, candidate_timing = self.compare_config(
func, candidate_config, best_config, best_timing
)
if cmp_res:
improved = True
best_config, best_timing = candidate_config, candidate_timing
if not improved and self.inductor_meta.get(
"coordinate_descent_check_all_directions"
):
old_best_timing = best_timing
improved, best_config, best_timing = self.check_all_tuning_directions(
func, best_config, best_timing
)
if improved:
msg = red_text(
"%s: Coordinate descend tuning found improvement of %.3fx by looking in all directions."
)
log.debug(
msg,
self.name,
old_best_timing / best_timing,
)
log.debug(
"%s: Improve from %s %f -> %s %f, %.3fx",
self.name,
baseline_config,
baseline_timing,
best_config,
best_timing,
baseline_timing / best_timing,
)
return best_config
@staticmethod
def autotune_single_field(fn, init_val, min_val=None, max_val=None):
"""
fn is a function that takes the field value and returns the benchmarking result
init_val is the starting point of autotuning.
Should work well for parabola like curve. Here is a real example
for split-size of mix-order-reduction: https://github.com/pytorch/pytorch/pull/166461
"""
cache = {}
def _bench(val):
if val not in cache:
cache[val] = fn(val)
# print(f"split size {val} -> {cache[val]:.3f} ms")
return cache[val]
if min_val is None:
min_val = 1
if max_val is None:
max_val = 2**30 # some arbitrary large value
best_val = init_val
improved = True
while improved:
improved = False
candlist = [best_val // 2, best_val * 2]
for cand in candlist:
cand = max(cand, min_val)
cand = min(cand, max_val)
if _bench(cand) < _bench(best_val):
best_val = cand
improved = True
return best_val
|
CoordescTuner
|
python
|
sympy__sympy
|
sympy/categories/baseclasses.py
|
{
"start": 5838,
"end": 11648
}
|
class ____(Morphism):
r"""
Represents a morphism which is a composition of other morphisms.
Explanation
===========
Two composite morphisms are equal if the morphisms they were
obtained from (components) are the same and were listed in the
same order.
The arguments to the constructor for this class should be listed
in diagram order: to obtain the composition `g\circ f` from the
instances of :class:`Morphism` ``g`` and ``f`` use
``CompositeMorphism(f, g)``.
Examples
========
>>> from sympy.categories import Object, NamedMorphism, CompositeMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> g * f
CompositeMorphism((NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g")))
>>> CompositeMorphism(f, g) == g * f
True
"""
@staticmethod
def _add_morphism(t, morphism):
"""
Intelligently adds ``morphism`` to tuple ``t``.
Explanation
===========
If ``morphism`` is a composite morphism, its components are
added to the tuple. If ``morphism`` is an identity, nothing
is added to the tuple.
No composability checks are performed.
"""
if isinstance(morphism, CompositeMorphism):
# ``morphism`` is a composite morphism; we have to
# denest its components.
return t + morphism.components
elif isinstance(morphism, IdentityMorphism):
# ``morphism`` is an identity. Nothing happens.
return t
else:
return t + Tuple(morphism)
def __new__(cls, *components):
if components and not isinstance(components[0], Morphism):
# Maybe the user has explicitly supplied a list of
# morphisms.
return CompositeMorphism.__new__(cls, *components[0])
normalised_components = Tuple()
for current, following in zip(components, components[1:]):
if not isinstance(current, Morphism) or \
not isinstance(following, Morphism):
raise TypeError("All components must be morphisms.")
if current.codomain != following.domain:
raise ValueError("Uncomposable morphisms.")
normalised_components = CompositeMorphism._add_morphism(
normalised_components, current)
# We haven't added the last morphism to the list of normalised
# components. Add it now.
normalised_components = CompositeMorphism._add_morphism(
normalised_components, components[-1])
if not normalised_components:
# If ``normalised_components`` is empty, only identities
# were supplied. Since they all were composable, they are
# all the same identities.
return components[0]
elif len(normalised_components) == 1:
# No sense to construct a whole CompositeMorphism.
return normalised_components[0]
return Basic.__new__(cls, normalised_components)
@property
def components(self):
"""
Returns the components of this composite morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).components
(NamedMorphism(Object("A"), Object("B"), "f"),
NamedMorphism(Object("B"), Object("C"), "g"))
"""
return self.args[0]
@property
def domain(self):
"""
Returns the domain of this composite morphism.
The domain of the composite morphism is the domain of its
first component.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).domain
Object("A")
"""
return self.components[0].domain
@property
def codomain(self):
"""
Returns the codomain of this composite morphism.
The codomain of the composite morphism is the codomain of its
last component.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).codomain
Object("C")
"""
return self.components[-1].codomain
def flatten(self, new_name):
"""
Forgets the composite structure of this morphism.
Explanation
===========
If ``new_name`` is not empty, returns a :class:`NamedMorphism`
with the supplied name, otherwise returns a :class:`Morphism`.
In both cases the domain of the new morphism is the domain of
this composite morphism and the codomain of the new morphism
is the codomain of this composite morphism.
Examples
========
>>> from sympy.categories import Object, NamedMorphism
>>> A = Object("A")
>>> B = Object("B")
>>> C = Object("C")
>>> f = NamedMorphism(A, B, "f")
>>> g = NamedMorphism(B, C, "g")
>>> (g * f).flatten("h")
NamedMorphism(Object("A"), Object("C"), "h")
"""
return NamedMorphism(self.domain, self.codomain, new_name)
|
CompositeMorphism
|
python
|
spack__spack
|
lib/spack/spack/relocate_text.py
|
{
"start": 5070,
"end": 10865
}
|
class ____(PrefixReplacer):
def __init__(self, prefix_to_prefix: Dict[bytes, bytes], suffix_safety_size: int = 7) -> None:
"""
prefix_to_prefix: Ordered dictionary where the keys are bytes representing the old prefixes
and the values are the new
suffix_safety_size: in case of null terminated strings, what size of the suffix should
remain to avoid aliasing issues?
"""
assert suffix_safety_size >= 0
super().__init__(prefix_to_prefix)
self.suffix_safety_size = suffix_safety_size
self.regex = self.binary_text_regex(self.prefix_to_prefix.keys(), suffix_safety_size)
@classmethod
def binary_text_regex(
cls, binary_prefixes: Iterable[bytes], suffix_safety_size: int = 7
) -> PatternBytes:
"""Create a regex that looks for exact matches of prefixes, and also tries to match a
C-string type null terminator in a small lookahead window.
Arguments:
binary_prefixes: Iterable of byte strings of prefixes to match
suffix_safety_size: Sizeof the lookahed for null-terminated string.
"""
# Note: it's important not to use capture groups for the prefix, since it destroys
# performance due to common prefix optimization.
return re.compile(
b"("
+ b"|".join(re.escape(p) for p in binary_prefixes)
+ b")([^\0]{0,%d}\0)?" % suffix_safety_size
)
@classmethod
def from_strings_or_bytes(
cls, prefix_to_prefix: PrefixToPrefix, suffix_safety_size: int = 7
) -> "BinaryFilePrefixReplacer":
"""Create a BinaryFilePrefixReplacer from an ordered prefix to prefix map.
Arguments:
prefix_to_prefix: Ordered mapping of prefix to prefix.
suffix_safety_size: Number of bytes to retain at the end of a C-string to avoid binary
string-aliasing issues.
"""
return cls(_prefix_to_prefix_as_bytes(prefix_to_prefix), suffix_safety_size)
def _apply_to_file(self, f: IO[bytes]) -> bool:
"""
Given a file opened in rb+ mode, apply the string replacements as specified by an ordered
dictionary of prefix to prefix mappings. This method takes special care of null-terminated
C-strings. C-string constants are problematic because compilers and linkers optimize
readonly strings for space by aliasing those that share a common suffix (only suffix since
all of them are null terminated). See https://github.com/spack/spack/pull/31739 and
https://github.com/spack/spack/pull/32253 for details. Our logic matches the original
prefix with a ``suffix_safety_size + 1`` lookahead for null bytes. If no null terminator
is found, we simply pad with leading /, assuming that it's a long C-string; the full
C-string after replacement has a large suffix in common with its original value. If there
*is* a null terminator we can do the same as long as the replacement has a sufficiently
long common suffix with the original prefix. As a last resort when the replacement does
not have a long enough common suffix, we can try to shorten the string, but this only
works if the new length is sufficiently short (typically the case when going from large
padding -> normal path) If the replacement string is longer, or all of the above fails,
we error out.
Arguments:
f: file opened in rb+ mode
Returns:
bool: True if file was modified
"""
assert f.tell() == 0
# We *could* read binary data in chunks to avoid loading all in memory, but it's nasty to
# deal with matches across boundaries, so let's stick to something simple.
modified = False
for match in self.regex.finditer(f.read()):
# The matching prefix (old) and its replacement (new)
old = match.group(1)
new = self.prefix_to_prefix[old]
# Did we find a trailing null within a N + 1 bytes window after the prefix?
null_terminated = match.end(0) > match.end(1)
# Suffix string length, excluding the null byte. Only makes sense if null_terminated
suffix_strlen = match.end(0) - match.end(1) - 1
# How many bytes are we shrinking our string?
bytes_shorter = len(old) - len(new)
# We can't make strings larger.
if bytes_shorter < 0:
raise CannotGrowString(old, new)
# If we don't know whether this is a null terminated C-string (we're looking only N + 1
# bytes ahead), or if it is and we have a common suffix, we can simply pad with leading
# dir separators.
elif (
not null_terminated
or suffix_strlen >= self.suffix_safety_size # == is enough, but let's be defensive
or old[-self.suffix_safety_size + suffix_strlen :]
== new[-self.suffix_safety_size + suffix_strlen :]
):
replacement = b"/" * bytes_shorter + new
# If it *was* null terminated, all that matters is that we can leave N bytes of old
# suffix in place. Note that > is required since we also insert an additional null
# terminator.
elif bytes_shorter > self.suffix_safety_size:
replacement = new + match.group(2) # includes the trailing null
# Otherwise... we can't :(
else:
raise CannotShrinkCString(old, new, match.group()[:-1])
f.seek(match.start())
f.write(replacement)
modified = True
return modified
|
BinaryFilePrefixReplacer
|
python
|
plotly__plotly.py
|
plotly/graph_objs/parcoords/line/colorbar/_tickformatstop.py
|
{
"start": 233,
"end": 8549
}
|
class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcoords.line.colorbar"
_path_str = "parcoords.line.colorbar.tickformatstop"
_valid_props = {"dtickrange", "enabled", "name", "templateitemname", "value"}
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs,
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.parcoords.line
.colorbar.Tickformatstop`
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super().__init__("tickformatstops")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcoords.line.colorbar.Tickformatstop
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcoords.line.colorbar.Tickformatstop`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("dtickrange", arg, dtickrange)
self._set_property("enabled", arg, enabled)
self._set_property("name", arg, name)
self._set_property("templateitemname", arg, templateitemname)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
|
Tickformatstop
|
python
|
ansible__ansible
|
lib/ansible/plugins/lookup/file.py
|
{
"start": 1744,
"end": 2924
}
|
class ____(LookupBase):
def run(self, terms, variables=None, **kwargs):
ret = []
self.set_options(var_options=variables, direct=kwargs)
for term in terms:
display.debug("File lookup term: %s" % term)
# Find the file in the expected search path
try:
lookupfile = self.find_file_in_search_path(variables, 'files', term, ignore_missing=True)
display.vvvv(u"File lookup using %s as file" % lookupfile)
if lookupfile:
contents = self._loader.get_text_file_contents(lookupfile)
if self.get_option('lstrip'):
contents = contents.lstrip()
if self.get_option('rstrip'):
contents = contents.rstrip()
ret.append(contents)
else:
# TODO: only add search info if abs path?
raise AnsibleError("File not found. Use -vvvvv to see paths searched.")
except AnsibleError as ex:
raise AnsibleError(f"Unable to access the file {term!r}.") from ex
return ret
|
LookupModule
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.