id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_11587
def _is_alarm_supported(self, alarm_details: MetricAlarm) -> bool: def get_metric_alarm_details_for_alarm_arn(alarm_arn: str) -> Optional[MetricAlarm]: alarm_name = arns.extract_resource_from_arn(alarm_arn).split(":", 1)[1] client = get_cloudwatch_client_for_region_of_alarm(alarm_arn) - response = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"] - if len(response) == 1: - return response[0] - return None def get_cloudwatch_client_for_region_of_alarm(alarm_arn: str) -> "CloudWatchClient": nit: would probably rename `response` to something like `metric_alarms`. Being this one a list, lines 125 to 127 could be shortened to `return metric_alarms[0] if metrics_alarms else None`. def _is_alarm_supported(self, alarm_details: MetricAlarm) -> bool: def get_metric_alarm_details_for_alarm_arn(alarm_arn: str) -> Optional[MetricAlarm]: alarm_name = arns.extract_resource_from_arn(alarm_arn).split(":", 1)[1] client = get_cloudwatch_client_for_region_of_alarm(alarm_arn) + metric_alarms = client.describe_alarms(AlarmNames=[alarm_name])["MetricAlarms"] + return metric_alarms[0] if metric_alarms else None def get_cloudwatch_client_for_region_of_alarm(alarm_arn: str) -> "CloudWatchClient":
codereview_new_python_data_11588
def test_macro_deployment( create_lambda_function( func_name=func_name, handler_file=macro_function_path, - runtime=Runtime.python3_8, client=lambda_client, - timeout=1, ) stack_with_macro = deploy_cfn_template( nit: I'd suggest using `Runtime.python3_9` def test_macro_deployment( create_lambda_function( func_name=func_name, handler_file=macro_function_path, + runtime=Runtime.python3_9, client=lambda_client, ) stack_with_macro = deploy_cfn_template(
codereview_new_python_data_11589
from collections import OrderedDict from typing import Final from localstack.services.stepfunctions.backend.execution import Execution from localstack.services.stepfunctions.backend.state_machine import StateMachine from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute class SFNStore(BaseStore): - sm_by_arn: dict[str, StateMachine] = LocalAttribute(default=dict) - execs_by_exec_arn: dict[str, Execution] = LocalAttribute( default=OrderedDict ) # TODO: when snapshot to pods stop execution(?) nit: The more idiomatic naming would be `state_machines` here - and adding a comment/docstring that this variable maps ARNs to state machines. (same for `execs_by_exec_arn` -> `executions` below) from collections import OrderedDict from typing import Final +from localstack.aws.api.stepfunctions import Arn from localstack.services.stepfunctions.backend.execution import Execution from localstack.services.stepfunctions.backend.state_machine import StateMachine from localstack.services.stores import AccountRegionBundle, BaseStore, LocalAttribute class SFNStore(BaseStore): + # Maps ARNs to state machines. + state_machines: Final[dict[Arn, StateMachine]] = LocalAttribute(default=dict) + # Maps Execution-ARNs to state machines. + executions: Final[dict[Arn, Execution]] = LocalAttribute( default=OrderedDict ) # TODO: when snapshot to pods stop execution(?)
codereview_new_python_data_11590
from localstack.utils.files import load_file -from tests.integration.apigateway import OPENAPI_SPEC_PULUMI_JSON def test_import_rest_api(import_apigw, snapshot): Not too exhaustive for now because we will be moving "import api" tests from existing modules. +import os + from localstack.utils.files import load_file + +# parent directory of this file +PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +OPENAPI_SPEC_PULUMI_JSON = os.path.join(PARENT_DIR, "files", "openapi.spec.pulumi.json") def test_import_rest_api(import_apigw, snapshot):
codereview_new_python_data_11591
def check_invocations(): cleanup(sm_arn, state_machines_before, stepfunctions_client) events.delete_event_bus(Name=bus_name) - def test_create_state_machines_in_parallel(self, stepfunctions_client): """ Perform a test that creates a series of state machines in parallel. Without concurrency control, using StepFunctions-Local, the following error is pretty consistently reproducible: ```suggestion def test_create_state_machines_in_parallel(self, stepfunctions_client, cleanups): ``` def check_invocations(): cleanup(sm_arn, state_machines_before, stepfunctions_client) events.delete_event_bus(Name=bus_name) + def test_create_state_machines_in_parallel(self, stepfunctions_client, cleanups): """ Perform a test that creates a series of state machines in parallel. Without concurrency control, using StepFunctions-Local, the following error is pretty consistently reproducible:
codereview_new_python_data_11592
# Default bucket name of the s3 bucket used for local lambda development # This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions -DEFAULT_BUCKET_MARKER_LOCAL = "hot-reloading-bucket" -OLD_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" # user that starts the opensearch process if the current user is root OS_USER_OPENSEARCH = "localstack" ```suggestion LEGACY_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" ``` Would be a bit more inline with the conventions in config.py for example # Default bucket name of the s3 bucket used for local lambda development # This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions +DEFAULT_BUCKET_MARKER_LOCAL = "hot-reload" +LEGACY_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" # user that starts the opensearch process if the current user is root OS_USER_OPENSEARCH = "localstack"
codereview_new_python_data_11593
def start(self, env_vars: dict[str, str]) -> None: ) if self.function_version.config.package_type == PackageType.Zip: if self.function_version.config.code.is_hot_reloading(): - # this basically means hot reloading container_config.env_vars["LOCALSTACK_HOT_RELOADING_ENABLED"] = "1" if container_config.volumes is None: container_config.volumes = VolumeMappings() this seems a bit redundant in the `is_hot_reloading()` branch def start(self, env_vars: dict[str, str]) -> None: ) if self.function_version.config.package_type == PackageType.Zip: if self.function_version.config.code.is_hot_reloading(): container_config.env_vars["LOCALSTACK_HOT_RELOADING_ENABLED"] = "1" if container_config.volumes is None: container_config.volumes = VolumeMappings()
codereview_new_python_data_11594
def _subresources(resource_id, resources, resource_type, func, stack_name): kwargs["uri"] = uri - integration_responses = [] - if kwargs.get("integrationResponses"): - integration_responses = kwargs["integrationResponses"] - del kwargs["integrationResponses"] apigateway.put_integration( restApiId=api_id, nit: could this work? Then you don't have to don't have to check and delete from `kwargs` ```suggestion integration_responses = kwargs.pop("integrationResponses", []) ``` def _subresources(resource_id, resources, resource_type, func, stack_name): kwargs["uri"] = uri + integration_responses = kwargs.pop("integrationResponses", []) apigateway.put_integration( restApiId=api_id,
codereview_new_python_data_11595
def test_generic_destination_routing( self, lambda_client, logs_client, deploy_cfn_template, cfn_client, on_success, on_failure ): """ - This fairly simple template lets us choose between the 4 different destinations for both OnSuccess as well as OnSuccess. The template chooses between one of 4 ARNs via indexed access according to this mapping: 0: SQS Both OnSuccess and OnFailure, right? ```suggestion This fairly simple template lets us choose between the 4 different destinations for both OnSuccess as well as OnFailure. ``` def test_generic_destination_routing( self, lambda_client, logs_client, deploy_cfn_template, cfn_client, on_success, on_failure ): """ + This fairly simple template lets us choose between the 4 different destinations for both OnSuccess as well as OnFailure. The template chooses between one of 4 ARNs via indexed access according to this mapping: 0: SQS
codereview_new_python_data_11596
def test_render_template_values(): ('"""', '\\"\\"\\"'), ('{"foo": 123}', '{\\"foo\\": 123}'), ('{"foo"": 123}', '{\\"foo\\"\\": 123}'), - (1, 1), - (None, None), ) for string, expected in escape_tests: escaped = util.escapeJavaScript(string) Shouldn't this be ```suggestion (None, "null"), ``` ? Ideally we'd parity test this against real AWS, but I would expect that the VLT template should not render the literal string "None" (as this is Python specific), but rather "null" (Java- or JSON-specific). Should we fire up a quick parity test (or manual check in AWS) to confirm this? def test_render_template_values(): ('"""', '\\"\\"\\"'), ('{"foo": 123}', '{\\"foo\\": 123}'), ('{"foo"": 123}', '{\\"foo\\"\\": 123}'), + (1, "1"), + (None, "null"), ) for string, expected in escape_tests: escaped = util.escapeJavaScript(string)
codereview_new_python_data_11597
def factory(name: str) -> None: @pytest.fixture -def ses_configuration_set_event_destination(ses_client): event_destinations = [] def factory(config_set_name: str, event_destination_name: str, topic_arn: str) -> None: There seems to be 4 different destinations possible, maybe this should have `sns` in the name to be aware we would configure only SNS as an event destination? It also seems SES needs to be allowed to publish to the SNS topic? (not currently enforced in LocalStack, but to be able to AWS-test it) https://docs.aws.amazon.com/ses/latest/dg/event-publishing-add-event-destination-sns.html Maybe we could add this policy to the topic while creating the configuration set event destination. def factory(name: str) -> None: @pytest.fixture +def ses_configuration_set_sns_event_destination(ses_client): event_destinations = [] def factory(config_set_name: str, event_destination_name: str, topic_arn: str) -> None:
codereview_new_python_data_11598
def add_xray_header(request, **kwargs): ] = "Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1" try: - # TODO this needs to be removed again! sns_client.meta.events.register("before-send.sns.Publish", add_xray_header) topic = sns_create_topic() what does this depend on? (when can we remove it?) def add_xray_header(request, **kwargs): ] = "Root=1-3152b799-8954dae64eda91bc9a23a7e8;Parent=7fa8c0f79203be72;Sampled=1" try: sns_client.meta.events.register("before-send.sns.Publish", add_xray_header) topic = sns_create_topic()
codereview_new_python_data_11599
def get_docker_image_to_start(): "The localstack/localstack-full image is deprecated. Please remove this environment variable." ) image_name = constants.DOCKER_IMAGE_NAME_FULL - if os.environ.get("LOCALSTACK_API_KEY") and os.environ.get("LOCALSTACK_API_KEY").strip(): image_name = constants.DOCKER_IMAGE_NAME_PRO return image_name nit: ```suggestion if os.environ.get("LOCALSTACK_API_KEY","").strip(): ``` def get_docker_image_to_start(): "The localstack/localstack-full image is deprecated. Please remove this environment variable." ) image_name = constants.DOCKER_IMAGE_NAME_FULL + if is_api_key_configured(): image_name = constants.DOCKER_IMAGE_NAME_PRO return image_name
codereview_new_python_data_11600
def _clear_bucket_from_store(self, bucket: BucketName): def on_after_init(self): apply_moto_patches() - # registering of virtual host routes happens with the hook on_infra_ready register_website_hosting_routes(router=ROUTER) register_custom_handlers() def __init__(self) -> None: super().__init__() nit: I think this comment might be a bit confusing (since it seems you are commenting on the `register_website_hosting_routes`). Maybe you could move this down to L168? def _clear_bucket_from_store(self, bucket: BucketName): def on_after_init(self): apply_moto_patches() register_website_hosting_routes(router=ROUTER) register_custom_handlers() + # registering of virtual host routes happens with the hook on_infra_ready in virtual_host.py def __init__(self) -> None: super().__init__()
codereview_new_python_data_11601
def generate_random( self, context: RequestContext, request: GenerateRandomRequest ) -> GenerateRandomResponse: number_of_bytes = request.get("NumberOfBytes") - if not number_of_bytes: raise ValidationException("NumberOfBytes is required.") - if number_of_bytes > 1024: raise ValidationException( f"1 validation error detected: Value '{number_of_bytes}' at 'numberOfBytes' failed " "to satisfy constraint: Member must have value less than or " The `not` here is problematic, as this condition evaluates true for `number_of_bytes` being `0` and `None`. The exceptions raised by AWS do differ for these cases though. def generate_random( self, context: RequestContext, request: GenerateRandomRequest ) -> GenerateRandomResponse: number_of_bytes = request.get("NumberOfBytes") + if number_of_bytes is None: raise ValidationException("NumberOfBytes is required.") + if number_of_bytes < 1 or number_of_bytes > 1024: raise ValidationException( f"1 validation error detected: Value '{number_of_bytes}' at 'numberOfBytes' failed " "to satisfy constraint: Member must have value less than or "
codereview_new_python_data_11602
-from localstack.services.awslambda.invocation.runtime_executor import RuntimeExecutorPlugin class DockerRuntimeExecutorPlugin(RuntimeExecutorPlugin): we're still importing this entire import path when we start localstack. which includes the lambda API, and everything under `localstack.services.awslambda.invocation.lambda_models`. could we just move the `RuntimeExecutorPlugin` class here as well, to make sure we have no other imports? +from plugin import Plugin + + +class RuntimeExecutorPlugin(Plugin): + namespace = "localstack.lambda.runtime_executor" class DockerRuntimeExecutorPlugin(RuntimeExecutorPlugin):
codereview_new_python_data_11603
def test_publish_get_delete_message_batch(self, sqs_client, sqs_create_queue): assert "Messages" not in confirmation.keys() @pytest.mark.aws_validated - def test_delete_message_batch_invalid_msg_id(self, sqs_create_queue, sqs_client, snapshot): self._add_error_detail_transformer(snapshot) queue_name = f"queue-{short_uid()}" queue_url = sqs_create_queue(QueueName=queue_name) - invalid_ids = ["", "testLongId" * 10, "invalid:id"] - - for idx, invalid_id in enumerate(invalid_ids): - delete_entries = [{"Id": invalid_id, "ReceiptHandle": "testHandle1"}] - with pytest.raises(ClientError) as e: - sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=delete_entries) - snapshot.match(f"error_response_{idx}", e.value.response) @pytest.mark.aws_validated def test_create_and_send_to_fifo_queue(self, sqs_client, sqs_create_queue): nit: i guess we could use parameterization here: ```python @pytest.mark.parametrize(argnames="invalid_message_id", argvalues=["", "testLongId"*10, "invalid:id"]) def test_delete_message_batch_invalid_msg_id(self, invalid_message_id, sqs_create_queue, sqs_client, snapshot): # ... proceed without loop code snapshot.match("error_response", e.value.response) ``` def test_publish_get_delete_message_batch(self, sqs_client, sqs_create_queue): assert "Messages" not in confirmation.keys() @pytest.mark.aws_validated + @pytest.mark.parametrize( + argnames="invalid_message_id", argvalues=["", "testLongId" * 10, "invalid:id"] + ) + def test_delete_message_batch_invalid_msg_id( + self, invalid_message_id, sqs_create_queue, sqs_client, snapshot + ): self._add_error_detail_transformer(snapshot) queue_name = f"queue-{short_uid()}" queue_url = sqs_create_queue(QueueName=queue_name) + delete_entries = [{"Id": invalid_message_id, "ReceiptHandle": "testHandle1"}] + with pytest.raises(ClientError) as e: + sqs_client.delete_message_batch(QueueUrl=queue_url, Entries=delete_entries) + snapshot.match("error_response", e.value.response) @pytest.mark.aws_validated def test_create_and_send_to_fifo_queue(self, sqs_client, sqs_create_queue):
codereview_new_python_data_11604
def delete_lambda_function(function_name: str) -> Dict[None, None]: def get_lambda_url_config(api_id: str, region: str = None): - store = get_awslambda_store() url_configs = store.url_configs.values() lambda_url_configs = [config for config in url_configs if config.get("CustomId") == api_id] return lambda_url_configs[0] We are not using the region parameter here at all anymore. Even though it might be extractable from the contextvars (or thread locals I think), would be better to explicitly pass them here, I think. def delete_lambda_function(function_name: str) -> Dict[None, None]: def get_lambda_url_config(api_id: str, region: str = None): + store = get_awslambda_store(region=region) url_configs = store.url_configs.values() lambda_url_configs = [config for config in url_configs if config.get("CustomId") == api_id] return lambda_url_configs[0]
codereview_new_python_data_11605
from localstack.utils.strings import short_uid -# Domain deployment in AWS takes too much time -@pytest.mark.only_localstack def test_domain(deploy_cfn_template, opensearch_client, cfn_client): name = f"domain-{short_uid()}" ```suggestion ``` the tests aren't run against AWS by default anyway from localstack.utils.strings import short_uid def test_domain(deploy_cfn_template, opensearch_client, cfn_client): name = f"domain-{short_uid()}"
codereview_new_python_data_11606
def execute(event, context): lambda_handler = execute if runtime.startswith("go1") and not use_docker(): - go_installer = awslambda_go_runtime_package.get_installer() - if not go_installer.is_installed(): - go_installer.install() ensure_readable(main_file) You can just call `awslambda_go_runtime_package.install()` here. def execute(event, context): lambda_handler = execute if runtime.startswith("go1") and not use_docker(): + awslambda_go_runtime_package.install() ensure_readable(main_file)
codereview_new_python_data_11607
class LambdaStore(BaseStore): functions: dict[str, Function] = LocalAttribute(default=dict) event_source_mappings: dict[str, EventSourceMapping] = LocalAttribute(default=dict) code_signing_configs: dict[str, CodeSigningConfig] = LocalAttribute(default=dict) layers: dict[str, Layer] = LocalAttribute(default=dict) settings: AccountSettings = LocalAttribute(default=AccountSettings) Maybe we can add a short docstring to these fields, for clarity. For example, `functions` seems to use the function name as index, whereas `code_signing_configs` uses the code signing ARN as index. class LambdaStore(BaseStore): + # maps function names to the respective Function functions: dict[str, Function] = LocalAttribute(default=dict) + + # maps EventSourceMapping ARNs to the respective EventSourceMapping event_source_mappings: dict[str, EventSourceMapping] = LocalAttribute(default=dict) + + # maps CodeSigningConfig ARNs to the respective CodeSigningConfig code_signing_configs: dict[str, CodeSigningConfig] = LocalAttribute(default=dict) + + # maps Layer ARNs to the respective Layer layers: dict[str, Layer] = LocalAttribute(default=dict) + + # region-specific account settings/limits settings: AccountSettings = LocalAttribute(default=AccountSettings)
codereview_new_python_data_11608
def prepare_version(function_version: FunctionVersion) -> None: target_code = get_code_path_for_function(function_version) target_code.mkdir(parents=True, exist_ok=True) with NamedTemporaryFile() as file: file.write(function_version.config.code.get_lambda_archive()) file.flush() unzip(file.name, str(target_code)) i can see that `get_lambda_archive` reads the entire archive file into memory before writing it to disk again. will this be a problem for large lambda archives? what are the largest files we've seen? s3 has a `download_file` operation that handles streaming IO, maybe we could use that instead? that saves us the extra in-memory roundtrip. OK to add a todo, seems this was part of the old code already. def prepare_version(function_version: FunctionVersion) -> None: target_code = get_code_path_for_function(function_version) target_code.mkdir(parents=True, exist_ok=True) with NamedTemporaryFile() as file: + # TODO use streaming to avoid heavy memory impact of loading zip file, e.g. via s3.download_file file.write(function_version.config.code.get_lambda_archive()) file.flush() unzip(file.name, str(target_code))
codereview_new_python_data_11609
"sqs", "ssm", "stepfunctions", - "stores", ] CloudFormation is a bit of a special case - the `models` package is more intended for the CFn resource model implementations (e.g., `AWS::Lambda::Function`, etc). I understand that the name clashes with the general convention of putting stores into `models.py` - but perhaps we could move the file to `services/cloudformation/stores.py`? "sqs", "ssm", "stepfunctions", ]
codereview_new_python_data_11610
def stop(self, quiet: bool = False) -> None: def start_thread(method, *args, **kwargs) -> FuncThread: # TODO: find all usages and add names... """Start the given method in a background thread, and add the thread to the TMP_THREADS shutdown hook""" _shutdown_hook = kwargs.pop("_shutdown_hook", True) - # if not kwargs.get("name"): - # print("oh no") kwargs.setdefault("name", method.__name__) thread = FuncThread(method, *args, **kwargs) thread.start() Should be removed / proper warning? def stop(self, quiet: bool = False) -> None: def start_thread(method, *args, **kwargs) -> FuncThread: # TODO: find all usages and add names... """Start the given method in a background thread, and add the thread to the TMP_THREADS shutdown hook""" _shutdown_hook = kwargs.pop("_shutdown_hook", True) + if not kwargs.get("name"): + LOG.debug("start_thread called without providing a custom name") # technically we should add a new level here for *internal* warnings kwargs.setdefault("name", method.__name__) thread = FuncThread(method, *args, **kwargs) thread.start()
codereview_new_python_data_11611
def send_events(): record["Data"] = to_str(base64.b64encode(record["Data"])) last_sequence_number = record["SequenceNumber"] if not records: - # on AWS this is approximately 5 sec, however since this is not async, it's a blocking call - # putting temporarily to 3 seconds until ASF migration or an async call time.sleep(3) response = { Could you elaborate on that comment? The `send_events` generator is executed async by the HTTP framework already, isn't it? def send_events(): record["Data"] = to_str(base64.b64encode(record["Data"])) last_sequence_number = record["SequenceNumber"] if not records: + # On AWS there is *at least* 1 event every 5 seconds + # but this is not possible in this structure. + # In order to avoid a 5-second blocking call, we make the compromise of 3 seconds. time.sleep(3) response = {
codereview_new_python_data_11612
def create_cluster( # FIXME: in AWS, the Endpoint is set once the cluster is running, not before (like here), but our tests and # in particular cloudformation currently relies on the assumption that it is set when the domain is created. status = region.opensearch_domains[domain_key.domain_name] status["Endpoint"] = cluster.url.split("://")[-1].replace("0.0.0.0", LOCALSTACK_HOSTNAME) status["EngineVersion"] = engine_version I think the content of `cluster.url` here depends on the `EDGE_BIND_HOST` (as implemented in `cluster_manager.py`). So this won't work if the `EDGE_BIND_HOST` is not `0.0.0.0`, right? def create_cluster( # FIXME: in AWS, the Endpoint is set once the cluster is running, not before (like here), but our tests and # in particular cloudformation currently relies on the assumption that it is set when the domain is created. status = region.opensearch_domains[domain_key.domain_name] + # Replacing only 0.0.0.0 here as usage of this bind address mostly means running in docker which is used locally + # If another bind address is used we want to keep it in the endpoint as this is a conscious user decision to + # access from another device on the network. status["Endpoint"] = cluster.url.split("://")[-1].replace("0.0.0.0", LOCALSTACK_HOSTNAME) status["EngineVersion"] = engine_version
codereview_new_python_data_11613
def handler(event, context): protocol = "https" if os.environ.get("USE_SSL") else "http" endpoint_url = "{}://{}:{}".format(protocol, os.environ["LOCALSTACK_HOSTNAME"], EDGE_PORT) s3 = boto3.client("s3", endpoint_url=endpoint_url, verify=False) - # print(f"{os.environ['BUCKET_NAME']}") s3.download_file( os.environ["BUCKET_NAME"], os.environ["OBJECT_NAME"], This is probably a leftover from debugging? def handler(event, context): protocol = "https" if os.environ.get("USE_SSL") else "http" endpoint_url = "{}://{}:{}".format(protocol, os.environ["LOCALSTACK_HOSTNAME"], EDGE_PORT) s3 = boto3.client("s3", endpoint_url=endpoint_url, verify=False) s3.download_file( os.environ["BUCKET_NAME"], os.environ["OBJECT_NAME"],
codereview_new_python_data_11614
def cmd_logs(follow: bool): if not DOCKER_CLIENT.is_container_running(container_name): console.print("localstack container not running") - with open(logfile) as fd: - for line in fd: - console.print(line.rstrip("\n\r")) sys.exit(1) if follow: We could improve the message here by indicating, when the logs are available, that these are the logs from the previous run def cmd_logs(follow: bool): if not DOCKER_CLIENT.is_container_running(container_name): console.print("localstack container not running") + if os.path.exists(logfile): + console.print("printing logs from previous run") + with open(logfile) as fd: + for line in fd: + click.echo(line, nl=False) sys.exit(1) if follow:
codereview_new_python_data_11616
def _entity_as_text(self): @property def _boolean_as_text(self): if self.value: - return (_("Yes")) - return (_("No")) @property def value_as_html(self): why not ```_("Yes")```? def _entity_as_text(self): @property def _boolean_as_text(self): if self.value: + return _("Yes") + return _("No") @property def value_as_html(self):
codereview_new_python_data_11618
def test_choose_param_value_objective(objective_alias): @pytest.mark.parametrize('collection', ['1d_np', '2d_np', 'pd_float', 'pd_str', '1d_list', '2d_list']) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) -def test__list_to_1d_numpy(collection, dtype): collection2y = { '1d_np': np.random.rand(10), '2d_np': np.random.rand(10, 1), ```suggestion def test_list_to_1d_numpy(collection, dtype): ``` Thanks for the thoroughness, but please revert this line. It isn't necessary for the test name to exactly match the function name, and I'd prefer to leave this line unchanged so that the [git blame](https://docs.github.com/en/repositories/working-with-files/using-files/viewing-a-file#viewing-the-line-by-line-revision-history-for-a-file) is refers to the last time the test definition line was materially changed. def test_choose_param_value_objective(objective_alias): @pytest.mark.parametrize('collection', ['1d_np', '2d_np', 'pd_float', 'pd_str', '1d_list', '2d_list']) @pytest.mark.parametrize('dtype', [np.float32, np.float64]) +def test_list_to_1d_numpy(collection, dtype): collection2y = { '1d_np': np.random.rand(10), '2d_np': np.random.rand(10, 1),
codereview_new_python_data_11619
def fit( eval_sample_weight: Optional[List[_DaskVectorLike]] = None, eval_init_score: Optional[List[_DaskVectorLike]] = None, eval_group: Optional[List[_DaskVectorLike]] = None, - eval_metric: Optional[Union[_LGBM_ScikitCustomEvalFunction, str, List[Union[_LGBM_ScikitCustomEvalFunction, str]]]] = None, eval_at: Union[List[int], Tuple[int]] = (1, 2, 3, 4, 5), **kwargs: Any ) -> "DaskLGBMRanker": ```suggestion eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None, ``` def fit( eval_sample_weight: Optional[List[_DaskVectorLike]] = None, eval_init_score: Optional[List[_DaskVectorLike]] = None, eval_group: Optional[List[_DaskVectorLike]] = None, + eval_metric: Optional[_LGBM_ScikitEvalMetricType] = None, eval_at: Union[List[int], Tuple[int]] = (1, 2, 3, 4, 5), **kwargs: Any ) -> "DaskLGBMRanker":
codereview_new_python_data_11622
def train_fn(): assert eval_result['valid']['l2'][1] > eval_result['valid']['l2'][0] # valid didn't else: with pytest.warns(UserWarning, match='Only training set found, disabling early stopping.'): - train_fn() @pytest.mark.parametrize('first_metric_only', [True, False]) ```suggestion bst = train_fn() assert bst.current_iteration() == 2 assert bst.best_iteration == 0 ``` Instead of only checking that this warning is raised, can you also please add some checks that early stopping was not actually triggered? That would catch cases where this warning is raised but early stopping is still accidentally enabled. def train_fn(): assert eval_result['valid']['l2'][1] > eval_result['valid']['l2'][0] # valid didn't else: with pytest.warns(UserWarning, match='Only training set found, disabling early stopping.'): + bst = train_fn() + assert bst.current_iteration() == 2 + assert bst.best_iteration == 0 @pytest.mark.parametrize('first_metric_only', [True, False])
codereview_new_python_data_11626
def c_str(string: str) -> ctypes.c_char_p: return ctypes.c_char_p(string.encode('utf-8')) -def c_array(ctype: type, values: List[ctypes.c_char_p]) -> ctypes.Array: """Convert a Python array to C array.""" return (ctype * len(values))(*values) I believe this is universal function, not just for strings. For example, https://github.com/dmlc/xgboost/blob/ff1c559084cf2b3732b8406109f2b39e9b665528/python-package/xgboost/core.py#L371-L377 https://github.com/dmlc/xgboost/blob/ff1c559084cf2b3732b8406109f2b39e9b665528/python-package/xgboost/core.py#L1427 https://github.com/dmlc/xgboost/blob/ff1c559084cf2b3732b8406109f2b39e9b665528/python-package/xgboost/core.py#L1552 ```suggestion def c_array(ctype: type, values: List[Any]) -> ctypes.Array: ``` def c_str(string: str) -> ctypes.c_char_p: return ctypes.c_char_p(string.encode('utf-8')) +def c_array(ctype: type, values: List[Any]) -> ctypes.Array: """Convert a Python array to C array.""" return (ctype * len(values))(*values)
codereview_new_python_data_11627
def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_va params = deepcopy(params) aliases = _ConfigAliases.get_sorted(main_param_name) - aliases = [a for a in aliases if a in params.keys() and a != main_param_name] # if main_param_name was provided, keep that value and remove all aliases if main_param_name in params.keys(): Why this part `if a in params.keys()` is needed here? def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_va params = deepcopy(params) aliases = _ConfigAliases.get_sorted(main_param_name) + aliases = [a for a in aliases if a != main_param_name] # if main_param_name was provided, keep that value and remove all aliases if main_param_name in params.keys():
codereview_new_python_data_11665
def set_have_nest(): def have_computronium() -> bool: - """Do we have a planet with a computronium moon, which is set to research focus?""" return _get_planet_catalog().have_computronium def computronium_candidates() -> List[PlanetId]: - """Returns list of own planets that have a computronium moon and a species capable of research.""" return _get_planet_catalog().pids_computronium def have_honeycomb() -> bool: - """Do we have a planet with the honeycomb special, which is set to industry focus?""" return _get_planet_catalog().have_honeycomb def honeycomb_candidates() -> List[PlanetId]: - """Returns list of own planets that have the honeycomb special and a species capable of production.""" return _get_planet_catalog().pids_honeycomb ```suggestion """Return True if we have a planet with a computronium moon, which is set to research focus.""" ``` def set_have_nest(): def have_computronium() -> bool: + """Return True if we have a planet with a computronium moon, which is set to research focus.""" return _get_planet_catalog().have_computronium def computronium_candidates() -> List[PlanetId]: + """Return list of own planets that have a computronium moon and a species capable of research.""" return _get_planet_catalog().pids_computronium def have_honeycomb() -> bool: + """Return True if we have a planet with a honeycomb special, which is set to production focus.""" return _get_planet_catalog().have_honeycomb def honeycomb_candidates() -> List[PlanetId]: + """Return list of own planets that have the honeycomb special and a species capable of production.""" return _get_planet_catalog().pids_honeycomb
codereview_new_python_data_11671
def systems_connected_to_system(system_id: SystemId) -> Set[SystemId]: @cache_for_current_turn def within_n_jumps(system_id: SystemId, n: int) -> FrozenSet[SystemId]: if n < 1: return frozenset({system_id}) - inner_tier = within_n_jumps(system_id, n - 1) - result = set(inner_tier) - for sys_id in inner_tier: result.update(get_neighbors(sys_id)) return frozenset(result) We will call `get_neighbors` for system_id n times. For each neighbor n-1 times. We use memorization for `get_neighbors`, so it won't affect performance very much. Some experiments: If we see 11 systems around it will be 90 calls When we see 78 it's will 354 calls. Classical BFS without recursion with check of visited systems will be a bit faster. It still pretty fast and 10 should be extream number. def systems_connected_to_system(system_id: SystemId) -> Set[SystemId]: @cache_for_current_turn def within_n_jumps(system_id: SystemId, n: int) -> FrozenSet[SystemId]: + """ + Returns a frozenset of all systems within n jumps from the given system. + """ if n < 1: return frozenset({system_id}) + elif n == 1: + return frozenset({system_id} & get_neighbors(system_id)) + tier_minus_2 = within_n_jumps(system_id, n - 2) + tier_minus_1 = within_n_jumps(system_id, n - 1) + result = set(tier_minus_1) + for sys_id in tier_minus_1 - tier_minus_2: result.update(get_neighbors(sys_id)) return frozenset(result)
codereview_new_python_data_11676
def test_s2nd_falls_back_to_full_connection(managed_process, tmp_path, cipher, c @pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name) @pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name) @pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name) -@pytest.mark.parametrize("protocol", [p for p in PROTOCOLS if p != Protocols.TLS13], ids=get_parameter_name) @pytest.mark.parametrize("provider", [OpenSSL, S2N], ids=get_parameter_name) @pytest.mark.parametrize("other_provider", [S2N], ids=get_parameter_name) def test_session_resumption_s2n_client_tls13_server_not_tls13(managed_process, cipher, curve, protocol, provider, other_provider, certificate): I'm not sure this scenario warrants an integ test. My first thought was that you could just recreate it using a self talk test. s2n_self_talk_resumption test.c has a lot of complex session resumption tests, you could add it there. You would just need a client with a tls13-capable config and a server with a tls12-capable config. def test_s2nd_falls_back_to_full_connection(managed_process, tmp_path, cipher, c @pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name) @pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name) @pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name) +@pytest.mark.parametrize("protocol", [p for p in PROTOCOLS if p < Protocols.TLS13], ids=get_parameter_name) @pytest.mark.parametrize("provider", [OpenSSL, S2N], ids=get_parameter_name) @pytest.mark.parametrize("other_provider", [S2N], ids=get_parameter_name) def test_session_resumption_s2n_client_tls13_server_not_tls13(managed_process, cipher, curve, protocol, provider, other_provider, certificate):
codereview_new_python_data_11695
def test_s2n_server_tls12_signature_algorithm_fallback(managed_process, cipher, # extension. # # This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.4.1 - expected_signature_algorithm_tls12 = (Signatures.ECDSA_SHA1, Signatures.RSA_SHA1)[ - signature == Signatures.RSA_SHA224] for results in server.get_results(): results.assert_success() assert to_bytes("Actual protocol version: {}".format( expected_version)) in results.stdout assert signature_marker(Provider.ServerMode, - expected_signature_algorithm_tls12) in results.stdout assert random_bytes in results.stdout I see what you're doing here, but it seems like this could be written in a less confusing way. I would maybe consider just if/elses here to set the expected signature algorithm. def test_s2n_server_tls12_signature_algorithm_fallback(managed_process, cipher, # extension. # # This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.4.1 + expected_sig_alg_tls12: None + if signature == Signatures.RSA_SHA224: + expected_sig_alg_tls12 = Signatures.RSA_SHA1 + else: + expected_sig_alg_tls12 = Signatures.ECDSA_SHA1 for results in server.get_results(): results.assert_success() assert to_bytes("Actual protocol version: {}".format( expected_version)) in results.stdout assert signature_marker(Provider.ServerMode, + expected_sig_alg_tls12) in results.stdout assert random_bytes in results.stdout
codereview_new_python_data_11696
def _zip(x): return L(x).zip() @docs @funcs_kwargs class DataBlock(): - """ - Generic container to quickly build `Datasets` and `DataLoaders`. - """ get_x=get_items=splitter=get_y = None blocks,dl_type = (TransformBlock,TransformBlock),TfmdDL _methods = 'get_items splitter get_y get_x'.split() Please don't change the formatting of existing fastai code, and when you add code please try to follow fastai standards. Our standard for docstrings is to use a single line and a single pair of double quotes. def _zip(x): return L(x).zip() @docs @funcs_kwargs class DataBlock(): + "Generic container to quickly build `Datasets` and `DataLoaders`." get_x=get_items=splitter=get_y = None blocks,dl_type = (TransformBlock,TransformBlock),TfmdDL _methods = 'get_items splitter get_y get_x'.split()
codereview_new_python_data_11700
SEARCH_USER_LIMIT = 10 -@api_bp.route('/playlist/search/users/', methods=['GET', 'OPTIONS']) @crossdomain @ratelimit() def search_user(): search_term = request.args.get("search_term") user = validate_auth_header() user_id = user.id if search_term: - users = db_user.search(search_term, SEARCH_USER_LIMIT, user_id) else: users = [] return jsonify( Let us do this, as there can be need in the frontend/client to get more users than the default count value ```suggestion count = get_non_negative_param('count', SEARCH_USER_LIMIT) users = db_user.search(search_term, count, user_id) ``` SEARCH_USER_LIMIT = 10 +@api_bp.route('/search/users/', methods=['GET', 'OPTIONS']) @crossdomain @ratelimit() def search_user(): search_term = request.args.get("search_term") user = validate_auth_header() user_id = user.id if search_term: + users = db_user.search_user_name(search_term, SEARCH_USER_LIMIT,user_id) else: users = [] return jsonify(
codereview_new_python_data_11701
def fetch_playlist_recording_metadata(playlist: Playlist): caa_id = row.get("caa_id") caa_release_mbid = row.get("caa_release_mbid") if caa_id and caa_release_mbid: - # TODO: Discuss image vs additional_metadata, what to permit caa.org link archive.org link etc? rec.additional_metadata = {"caa_id": caa_id, "caa_release_mbid": caa_release_mbid} Remove or ticket? def fetch_playlist_recording_metadata(playlist: Playlist): caa_id = row.get("caa_id") caa_release_mbid = row.get("caa_release_mbid") if caa_id and caa_release_mbid: rec.additional_metadata = {"caa_id": caa_id, "caa_release_mbid": caa_release_mbid}
codereview_new_python_data_11702
USERS_PER_MESSAGE = 5 -FRESH_RELEASES_ENDPOINT = "https://test-api.listenbrainz.org/1/explore/fresh-releases/" def load_all_releases(): Is this still desired? USERS_PER_MESSAGE = 5 +FRESH_RELEASES_ENDPOINT = "https://api.listenbrainz.org/1/explore/fresh-releases/" def load_all_releases():
codereview_new_python_data_11703
def test_create_full_db(self): # make sure that the dump contains a full listens dump, a public and private dump (postgres), # a public and private dump (timescale) and a spark dump. archive_count = 0 - print(os.listdir(os.path.join(self.tempdir, dump_name))) for file_name in os.listdir(os.path.join(self.tempdir, dump_name)): if file_name.endswith('.tar.xz') or file_name.endswith(".tar"): archive_count += 1 intended or leftover? def test_create_full_db(self): # make sure that the dump contains a full listens dump, a public and private dump (postgres), # a public and private dump (timescale) and a spark dump. archive_count = 0 for file_name in os.listdir(os.path.join(self.tempdir, dump_name)): if file_name.endswith('.tar.xz') or file_name.endswith(".tar"): archive_count += 1
codereview_new_python_data_11704
def test_index(self): def test_downloads(self): resp = self.client.get(url_for('index.downloads')) - self.assertEqual(resp.status_code, 302) - self.assertEqual(resp.location, url_for('index.data')) def test_data(self): resp = self.client.get(url_for('index.data')) we didn't do it in CB, but I realised that we have a base test class (ServerTestCase) where we could implement our own assertRedirects that checks both the status code and the location without the full protocol/hostname. Do you think it's worth it? Note that we have a "variant" of this test also in https://github.com/metabrainz/listenbrainz-server/blob/981e2fe6413e344e78f255a2d605fd5045d5fb11/listenbrainz/webserver/views/test/test_user.py#L48-L57 def test_index(self): def test_downloads(self): resp = self.client.get(url_for('index.downloads')) + self.assertRedirects(resp, url_for('index.data')) def test_data(self): resp = self.client.get(url_for('index.data'))
codereview_new_python_data_11705
def assertRedirects(self, response, location, message=None, permanent=False): not_redirect = "HTTP Status %s expected but got %d" % (valid_status_code_str, response.status_code) self.assertIn(response.status_code, valid_status_codes, message or not_redirect) - self.assertTrue(response.location.endswith(location), message) assert_redirects = assertRedirects because we have to use assertTrue here, it might be nice to also have a default message that includes the expected and actual locations to help with debugging def assertRedirects(self, response, location, message=None, permanent=False): not_redirect = "HTTP Status %s expected but got %d" % (valid_status_code_str, response.status_code) self.assertIn(response.status_code, valid_status_codes, message or not_redirect) + location_mismatch = "Expected redirect location %s but got %s" % (response.location, location) + self.assertTrue(response.location.endswith(location), message or location_mismatch) assert_redirects = assertRedirects
codereview_new_python_data_11706
def select_timezone(): try: update_timezone = str(form.timezone.data) db_usersetting.set_timezone(current_user.id, update_timezone) - flash.info("timezone reset") except DatabaseException: flash.error("Something went wrong! Unable to update timezone right now.") return redirect(url_for("profile.info")) if form.csrf_token.errors: - flash.error('Cannot update timezone due to error during authentication, please try again later.') return redirect(url_for('profile.info')) return render_template( This should say: "Your timezone has been saved." def select_timezone(): try: update_timezone = str(form.timezone.data) db_usersetting.set_timezone(current_user.id, update_timezone) + flash.info("Your timezone has been saved.") except DatabaseException: flash.error("Something went wrong! Unable to update timezone right now.") return redirect(url_for("profile.info")) if form.csrf_token.errors: + flash.error('Unable to update timezone.') return redirect(url_for('profile.info')) return render_template(
codereview_new_python_data_11782
"-DUSES_CHART_JS", "-DUSE_SETTINGS_ARCHIVE", "-DUSE_CUSTOM_PROVISIONING" ] This should be enabled by default to be backward compatible with current feature set, and be undefined at will. The regular builds should still include the P2P feature. Maybe it would be better to change this from a `bool` to an `integer` define, like this: ``` #ifndef FEATURE_ESP_P2P #define FEATURE_ESP_P2P 1 // Defaults to on #endif // ifndef FEATURE_ESP_P2P ``` In your special builds or `Custom.h` you could use: ``` #if defined(FEATURE_ESP_P2P) && FEATURE_ESP_P2P #undef FEATURE_ESP_P2P #define FEATURE_ESP_P2P 0 // Disable P2P feature #endif // if defined(FEATURE_ESP_P2P) && FEATURE_ESP_P2P ``` And then test the setting with: ``` #if FEATURE_ESP_P2P // Enabled? ... ``` To turn it off in the pre_custom* Python scripts: ``` "-DFEATURE_ESP_P2P=0" ``` "-DUSES_CHART_JS", "-DUSE_SETTINGS_ARCHIVE", + "-DFEATURE_ESPEASY_P2P=1", "-DUSE_CUSTOM_PROVISIONING" ]
codereview_new_python_data_11786
def test_reduce_prod(): [2, -7, 65]] def test_reduce_prod_same_column(): - # See issue #3390 - f0 = dt.Frame({"ints" : [0, 1, 0, 0, 1, 2]}) - f1 = f0[:, {"prod" : prod(f.ints)}, f.ints] - frame_integrity_check(f1) - assert_equals(f1, dt.Frame({"ints" : [0, 1, 2], "prod" : [0, 1, 2]/dt.int64})) - - -def test_reduce_prod_same_column1(): # See issue #3390 f0 = dt.Frame({"ints" : [0, 1, 2, 2, 1, 2]}) f1 = f0[:, {"prod" : prod(f.ints)}, f.ints] I guess this test is enough, as the one above only tests a very simple case when `prod(ints) == ints`. def test_reduce_prod(): [2, -7, 65]] def test_reduce_prod_same_column(): # See issue #3390 f0 = dt.Frame({"ints" : [0, 1, 2, 2, 1, 2]}) f1 = f0[:, {"prod" : prod(f.ints)}, f.ints]
codereview_new_python_data_11791
def flatc_annotate(schema, include=None, data=None, cwd=tests_path): ) # Run the generate_grpc_examples script -# generate_grpc_examples.GenerateGRPCExamples() Can you undo this? def flatc_annotate(schema, include=None, data=None, cwd=tests_path): ) # Run the generate_grpc_examples script +generate_grpc_examples.GenerateGRPCExamples()
codereview_new_python_data_11911
-from unittest import TestCase - -from pyk.kast import KApply, KSequence, KVariable -from pyk.kastManip import splitConfigFrom - - -class SplitConfigTest(TestCase): - def test_splitConfigFrom(self): - k_cell = KSequence([KApply('foo'), KApply('bar')]) - term = KApply('<k>', [k_cell]) - config, subst = splitConfigFrom(term) - self.assertEqual(config, KApply('<k>', [KVariable('K_CELL')])) - self.assertEqual(subst, {'K_CELL': k_cell}) - - map_item_cell = KApply('<mapItem>', [KApply('foo')]) - map_cell = KApply('<mapCell>', [KApply('map_join', [map_item_cell, map_item_cell])]) - config, subst = splitConfigFrom(map_cell) - self.assertEqual(config, KApply('<mapCell>', [KVariable('MAPCELL_CELL')])) - self.assertEqual(subst, {'MAPCELL_CELL': KApply('map_join', [map_item_cell, map_item_cell])}) Can this test be moved into `test_kastManip.py`?
codereview_new_python_data_11912
def setUp(self): self.kprove = KProve(self.kompiled_dir, kprove_main_file, Path(self.KPROVE_USE_DIR)) self.kprove.prover_args += list(chain.from_iterable(['-I', include_dir] for include_dir in kprove_include_dirs)) - # force computation of the symbol_table before updating it - if self.kprove.symbol_table: - self._update_symbol_table(self.kprove._symbol_table) def tearDown(self): shutil.rmtree(self.KPROVE_USE_DIR, ignore_errors=True) To me it looks like the original version achieves the same thing. Did you run into issues with that? def setUp(self): self.kprove = KProve(self.kompiled_dir, kprove_main_file, Path(self.KPROVE_USE_DIR)) self.kprove.prover_args += list(chain.from_iterable(['-I', include_dir] for include_dir in kprove_include_dirs)) + self._update_symbol_table(self.kprove.symbol_table) def tearDown(self): shutil.rmtree(self.KPROVE_USE_DIR, ignore_errors=True)
codereview_new_python_data_11913
def vattr(sort: str) -> KAtt: '==Int', KToken('N ==Int 1', 'Bool'), KApply('_==Int_', KVariable('N', vattr('Int')), intToken(1)), - ), # noqa ) for (name, token, expected) in tests: `# noqa` can be removed here. def vattr(sort: str) -> KAtt: '==Int', KToken('N ==Int 1', 'Bool'), KApply('_==Int_', KVariable('N', vattr('Int')), intToken(1)), + ), ) for (name, token, expected) in tests:
codereview_new_python_data_11914
def vattr(sort: str) -> KAtt: return KAtt(FrozenDict({'org.kframework.kore.Sort': FrozenDict({'node': 'KSort', 'name': sort})})) tests: Iterable[Tuple[str, KToken, KInner]] = ( - ('variable', KToken('N', 'Int'), KVariable('N', vattr('K'))), # TODO: This should parse as an int. # noqa ( '==Int', KToken('N ==Int 1', 'Bool'), ```suggestion ('variable', KToken('N', 'Int'), KVariable('N', vattr('K'))), # TODO: This should parse as an int. ``` def vattr(sort: str) -> KAtt: return KAtt(FrozenDict({'org.kframework.kore.Sort': FrozenDict({'node': 'KSort', 'name': sort})})) tests: Iterable[Tuple[str, KToken, KInner]] = ( + ('variable', KToken('N', 'Int'), KVariable('N', vattr('K'))), # TODO: This should parse as an int. ( '==Int', KToken('N ==Int 1', 'Bool'),
codereview_new_python_data_11915
def test(self): class BooleanTest(TestCase): - def test_bool_simplify(self): # Given bool_test_1 = Bool.andBool([Bool.false, Bool.true]) bool_test_2 = Bool.andBool([KApply('_==Int_', [intToken(3), intToken(4)]), Bool.true]) ```suggestion def test_simplify_bool(self): ``` def test(self): class BooleanTest(TestCase): + def test_simplify_bool(self): # Given bool_test_1 = Bool.andBool([Bool.false, Bool.true]) bool_test_2 = Bool.andBool([KApply('_==Int_', [intToken(3), intToken(4)]), Bool.true])
codereview_new_python_data_11916
-from typing import cast -from ..kast import KAst -from ..ktool.kprint import KPrint, prettyPrintKast - -class MockKPrint: - def pretty_print(self, term: KAst) -> str: - return prettyPrintKast(term, symbol_table={}) - - -def mock_kprint() -> KPrint: - return cast(KPrint, MockKPrint()) Did you consider implementing this with inheritance? Also, if this class is used in only a single test, let's make it local to that module. +from ..ktool.kprint import KPrint +class MockKPrint(KPrint): + def __init__(self): + self.symbol_table = {} + return
codereview_new_python_data_11917
def nonempty_str(x: Any) -> str: def add_indent(indent: str, lines: Iterable[str]) -> List[str]: - return list(map(lambda line: indent + line, lines)) def is_hexstring(x: str) -> bool: ```suggestion return [indent + line for line in lines] ``` def nonempty_str(x: Any) -> str: def add_indent(indent: str, lines: Iterable[str]) -> List[str]: + return [indent + line for line in lines] def is_hexstring(x: str) -> bool:
codereview_new_python_data_11918
def syntax_productions(self) -> List[KProduction]: @staticmethod def _is_non_free_constructor(label: str) -> bool: is_cell_map_constructor = label.endswith('CellMapItem') or label.endswith('CellMap_') - is_builtin_data_constructor = label in ['_Set_', '_List_', '_Map_', 'SetItem', 'ListItem', '_|->_'] return is_cell_map_constructor or is_builtin_data_constructor @property Consider moving this method under the `functions` property. def syntax_productions(self) -> List[KProduction]: @staticmethod def _is_non_free_constructor(label: str) -> bool: is_cell_map_constructor = label.endswith('CellMapItem') or label.endswith('CellMap_') + is_builtin_data_constructor = label in {'_Set_', '_List_', '_Map_', 'SetItem', 'ListItem', '_|->_'} return is_cell_map_constructor or is_builtin_data_constructor @property
codereview_new_python_data_11919
def buildRule(ruleId, initConstrainedTerm, finalConstrainedTerm, claim=False, pr return (minimizeRule(rule, keepVars=newKeepVars), vremapSubst) -def abstract_term_safely(kast, base_name='V'): vname = hash_str(kast)[0:8] return KVariable(base_name + '_' + vname) Please add type hints. def buildRule(ruleId, initConstrainedTerm, finalConstrainedTerm, claim=False, pr return (minimizeRule(rule, keepVars=newKeepVars), vremapSubst) +def abstract_term_safely(kast: KInner, base_name: str = 'V') -> KVariable: vname = hash_str(kast)[0:8] return KVariable(base_name + '_' + vname)
codereview_new_python_data_11920
def _print_subgraph(indent: str, curr_node: KCFG.Node, prior_on_trace: List[KCFG edges_from = sorted(self.edge_likes(source_id=curr_node.id)) if curr_node in processed_nodes: - if len(edges_from) == 0: return ret if curr_node in prior_on_trace: ret.append(indent + '┊ (looped back)') ```suggestion if not edges_from: ``` def _print_subgraph(indent: str, curr_node: KCFG.Node, prior_on_trace: List[KCFG edges_from = sorted(self.edge_likes(source_id=curr_node.id)) if curr_node in processed_nodes: + if not edges_from: return ret if curr_node in prior_on_trace: ret.append(indent + '┊ (looped back)')
codereview_new_python_data_11921
def deconstruct_short_hash(h: str) -> Tuple[str, str]: x = h.lower() if is_hash(x): return (x, x) - (l, sep, r) = x.partition('...') - if sep == '...' and is_hexstring(l) and is_hexstring(r): - return (l, r) (l, sep, r) = x.partition('..') if sep == '..' and is_hexstring(l) and is_hexstring(r): return (l, r) Shouldn't it be: ```suggestion return (x, '') ``` Also, I think we should support short hashes without the separator (e.g. `'abcd'`), interpreted as the hash prefix (e.g. `'abcd..'`). def deconstruct_short_hash(h: str) -> Tuple[str, str]: x = h.lower() if is_hash(x): return (x, x) (l, sep, r) = x.partition('..') if sep == '..' and is_hexstring(l) and is_hexstring(r): return (l, r)
codereview_new_python_data_11922
def node_dicts(n: int) -> List[Dict[str, Any]]: def edge_dicts(*edges: Tuple[int, int]) -> List[Dict[str, Any]]: - def make_edge_dict(i, j, depth=1): return {'source': nid(i), 'target': nid(j), 'condition': TRUE.to_dict(), 'depth': depth} - return [make_edge_dict(*edge) for edge in edges] def cover_dicts(*edges: Tuple[int, int]) -> List[Dict[str, Any]]: ```suggestion def _make_edge_dict(i, j, depth=1): return {'source': nid(i), 'target': nid(j), 'condition': TRUE.to_dict(), 'depth': depth} return [_make_edge_dict(*edge) for edge in edges] ``` I'm pretty sure our codestyle enforces the space around internally defined functions (maybe this repos isn't as strict as ERC20s?). But we have been naming internally defined functions with `_...` prefixes as a convention. def node_dicts(n: int) -> List[Dict[str, Any]]: def edge_dicts(*edges: Tuple[int, int]) -> List[Dict[str, Any]]: + + def _make_edge_dict(i, j, depth=1): return {'source': nid(i), 'target': nid(j), 'condition': TRUE.to_dict(), 'depth': depth} + + return [_make_edge_dict(*edge) for edge in edges] def cover_dicts(*edges: Tuple[int, int]) -> List[Dict[str, Any]]:
codereview_new_python_data_11923
def applyByNode(requestContext, seriesList, nodeNum, templateFunction, newName=N """ prefixes = set() for series in seriesList: - prefix = '.'.join(series.name.split('.')[:nodeNum + 1]) prefixes.add(prefix) results = [] newContext = requestContext.copy() ```suggestion if nodeNum >= len(nodes): ``` def applyByNode(requestContext, seriesList, nodeNum, templateFunction, newName=N """ prefixes = set() for series in seriesList: + nodes = series.name.split('.') + if nodeNum >= len(nodes): + raise InputParameterError("{} do not contans {} nodes".format(series.name, nodeNum)) + prefix = '.'.join(nodes[:nodeNum + 1]) prefixes.add(prefix) results = [] newContext = requestContext.copy()
codereview_new_python_data_11924
def get_value_stats( bindings: list[Any], ) -> tuple[FVal, list[tuple[str, FVal, FVal]]]: """Returns the sum of the USD value at the time of acquisition and the amount received - by asset""" usd_value = ZERO try: query = 'SELECT SUM(CAST(usd_value AS REAL)) FROM history_events ' + query_filters Can you edit the docstring and add that this is only used by liquity for now? And also give some information on `query_filters` and `bindings` argument since you removed what a normal person would expects, which is a normal query? Or the more I write about this ... the more I don't like this approach. 1. If this is supposed to be used by multiple places, a common interface with a filter needs to be created. 2. If not thenthis should not be in db/history_events.py and instead should be under liquity/db.py if it's only for liquity def get_value_stats( bindings: list[Any], ) -> tuple[FVal, list[tuple[str, FVal, FVal]]]: """Returns the sum of the USD value at the time of acquisition and the amount received + by asset + TODO: At the moment this function is used by liquity and kraken. Change it to use a filter + instead of query string and bindings when the refactor of the history events is made. + """ usd_value = ZERO try: query = 'SELECT SUM(CAST(usd_value AS REAL)) FROM history_events ' + query_filters
codereview_new_python_data_11925
-from typing import TYPE_CHECKING -from rotkehlchen.constants.timing import DATA_UPDATES_REFRESH -from rotkehlchen.db.updates import LAST_DATA_UPDATES_KEY from rotkehlchen.serialization.deserialize import deserialize_timestamp from rotkehlchen.utils.misc import ts_now if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler -def should_check_data_updates(database: 'DBHandler') -> bool: """ - Checks if the last time we checked data updates is far enough to trigger - the process of querying it again. """ with database.conn.read_ctx() as cursor: - cursor.execute('SELECT value FROM settings WHERE name=?', (LAST_DATA_UPDATES_KEY,)) timestamp_in_db = cursor.fetchone() if timestamp_in_db is None: return True last_update_ts = deserialize_timestamp(timestamp_in_db) - return ts_now() - last_update_ts >= DATA_UPDATES_REFRESH ```suggestion Checks if enough time has elapsed since the last run of a periodic task in order to run it again. ``` +from typing import TYPE_CHECKING, Literal + from rotkehlchen.serialization.deserialize import deserialize_timestamp from rotkehlchen.utils.misc import ts_now if TYPE_CHECKING: from rotkehlchen.db.dbhandler import DBHandler +def should_run_periodic_task( + database: 'DBHandler', + key_name: Literal['last_data_updates_ts', 'last_evm_accounts_detect_ts'], + refresh_period: int, +) -> bool: """ + Checks if enough time has elapsed since the last run of a periodic task in order to run + it again. """ with database.conn.read_ctx() as cursor: + cursor.execute('SELECT value FROM settings WHERE name=?', (key_name,)) timestamp_in_db = cursor.fetchone() if timestamp_in_db is None: return True last_update_ts = deserialize_timestamp(timestamp_in_db) + return ts_now() - last_update_ts >= refresh_period
codereview_new_python_data_11926
def test_curve_remove_imbalanced(database, ethereum_transaction_decoder): @pytest.mark.vcr() @pytest.mark.parametrize('ethereum_accounts', [['0x6Bb553FFC5716782051f51b564Bb149D9946f0d2']]) def test_deposit_multiple_tokens(ethereum_transaction_decoder, ethereum_accounts): _populate_curve_pools(ethereum_transaction_decoder) tx_hex = deserialize_evm_tx_hash('0xe954a396a02ebbea45a1d206c9918f717c55509c8138fccc63155d0262ef4dc4 ') # noqa: E501 evmhash = deserialize_evm_tx_hash(tx_hex) Need a docstring here. Also this seems to fail in the tests. def test_curve_remove_imbalanced(database, ethereum_transaction_decoder): @pytest.mark.vcr() @pytest.mark.parametrize('ethereum_accounts', [['0x6Bb553FFC5716782051f51b564Bb149D9946f0d2']]) def test_deposit_multiple_tokens(ethereum_transaction_decoder, ethereum_accounts): + """Check the case for a pool where multiple deposit events appear in the transaction""" _populate_curve_pools(ethereum_transaction_decoder) tx_hex = deserialize_evm_tx_hash('0xe954a396a02ebbea45a1d206c9918f717c55509c8138fccc63155d0262ef4dc4 ') # noqa: E501 evmhash = deserialize_evm_tx_hash(tx_hex)
codereview_new_python_data_11940
def xfail( condition: bool = True, *, reason: str = "", - raises: Union[Type[BaseException], Tuple[BaseException, ...]] = BaseException, ) -> "example": """Mark this example as an expected failure, like pytest.mark.xfail(). ```suggestion raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = BaseException, ``` def xfail( condition: bool = True, *, reason: str = "", + raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = BaseException, ) -> "example": """Mark this example as an expected failure, like pytest.mark.xfail().
codereview_new_python_data_11941
def _boolean_dtypes(xp: Any) -> st.SearchStrategy[DataType]: def _real_dtypes(xp: Any) -> st.SearchStrategy[DataType]: - """Return a strategy for all real dtype objects.""" return st.one_of( _integer_dtypes(xp), _unsigned_integer_dtypes(xp), ```suggestion """Return a strategy for all real-valued dtype objects.""" ``` To make it clear that we're excluding complex numbers, not e.g. mock dtype objects. def _boolean_dtypes(xp: Any) -> st.SearchStrategy[DataType]: def _real_dtypes(xp: Any) -> st.SearchStrategy[DataType]: + """Return a strategy for all real-valued dtype objects.""" return st.one_of( _integer_dtypes(xp), _unsigned_integer_dtypes(xp),
codereview_new_python_data_11942
def complex_dtypes( namespace = StrategiesNamespace(**kwargs) try: - _args_to_xps[(xp, api_version)] = namespace except TypeError: pass We want to cache the `api_version=None` case too, so let's add: ```python if inferred_version: _args_to_xps[(xp, None)] = namespace ``` plus corresponding test cases to check both that None is cached, and that passing the inferred version hits the newly-cached version (and vice-versa). def complex_dtypes( namespace = StrategiesNamespace(**kwargs) try: + _args_to_xps[(xp, None if inferred_version else api_version)] = namespace except TypeError: pass
codereview_new_python_data_11943
from hypothesistooling.projects.hypothesispython import PYTHON_SRC from hypothesistooling.scripts import pip_tool, tool_path -PYTHON_VERSIONS = [f"3.{v}" for v in range(7, 11)] def test_mypy_passes_on_hypothesis(): I think I'd prefer to write these out as literals, unless we can pull them out of the autoupdated CI config? Just thinking about how they'll stay up to date. I think we can also test against 3.11? from hypothesistooling.projects.hypothesispython import PYTHON_SRC from hypothesistooling.scripts import pip_tool, tool_path +PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] def test_mypy_passes_on_hypothesis():
codereview_new_python_data_11944
-# SPDX-License-Identifier: GPL-2.0-only # This file is part of Scapy # See https://scapy.net/ for more information # Copyright (C) 2009 Jochen Bartl This should be GPL-2.0-or-later +# SPDX-License-Identifier: GPL-2.0-or-later # This file is part of Scapy # See https://scapy.net/ for more information # Copyright (C) 2009 Jochen Bartl
codereview_new_python_data_11977
def __init__(self, webelement) -> None: Select(driver.find_element(By.TAG_NAME, "select")).select_by_index(2) """ if webelement.tag_name.lower() != "select": - raise UnexpectedTagNameException( - "Select only works on <select> elements, not on <%s>" % - webelement.tag_name) self._el = webelement multi = self._el.get_dom_attribute("multiple") self.is_multiple = multi and multi != "false" minor: can we use `f"... not on {webelement.tag_name}"` strings here? def __init__(self, webelement) -> None: Select(driver.find_element(By.TAG_NAME, "select")).select_by_index(2) """ if webelement.tag_name.lower() != "select": + raise UnexpectedTagNameException(f"Select only works on <select> elements, not on {webelement.tag_name}") self._el = webelement multi = self._el.get_dom_attribute("multiple") self.is_multiple = multi and multi != "false"
codereview_new_python_data_11978
setup_args = { 'cmdclass': {'install': install}, 'name': 'selenium', - 'version': "4.4.4", 'license': 'Apache 2.0', 'description': 'Python bindings for Selenium', 'long_description': open(join(abspath(dirname(__file__)), "README.rst")).read(), Please do not bump the version as we do not know if this will be 4.4.4 or 4.5.0 setup_args = { 'cmdclass': {'install': install}, 'name': 'selenium', + 'version': "4.4.3", 'license': 'Apache 2.0', 'description': 'Python bindings for Selenium', 'long_description': open(join(abspath(dirname(__file__)), "README.rst")).read(),
codereview_new_python_data_12060
'3rdParty/mingw/vcpkg/buildtrees/*.log', '3rdParty/Windows/vcpkg/buildtrees/*.log', 'android/BOINC/app/build/reports/', ] def prepare_7z_archive(archive_name, target_directory, files_list): @talregev, please remove all changes but this line and I'll merge this PR '3rdParty/mingw/vcpkg/buildtrees/*.log', '3rdParty/Windows/vcpkg/buildtrees/*.log', 'android/BOINC/app/build/reports/', + 'mac_build/xcodebuild_*.log', ] def prepare_7z_archive(archive_name, target_directory, files_list):
codereview_new_python_data_12062
def dest(*dirs): 'run_in_ops', 'sign_executable', 'stage_file', - 'run_stage_file', 'update_versions', 'xadd', ] I think `stage_file_native` is better name def dest(*dirs): 'run_in_ops', 'sign_executable', 'stage_file', + 'stage_file_native', 'update_versions', 'xadd', ]
codereview_new_python_data_12065
class TestPluginCanHandleUrlTelemadrid(PluginCanHandleUrl): should_match = [ "https://www.telemadrid.es/", "https://www.telemadrid.es/emision-en-directo/", ] I forgot about VODs. Please add one VOD URL to the matcher tests, so that it's clear that this is supported by the plugin. For example this one, which isn't a particularly long URL string: `https://www.telemadrid.es/programas/telenoticias-1/Telenoticias-1-02032023-2-2538066218--20230302042202.html` class TestPluginCanHandleUrlTelemadrid(PluginCanHandleUrl): should_match = [ "https://www.telemadrid.es/", "https://www.telemadrid.es/emision-en-directo/", + "https://www.telemadrid.es/programas/telenoticias-1/Telenoticias-1-02032023-2-2538066218--20230302042202.html", ]
codereview_new_python_data_12066
| bloomberght.com/tv | - showmax\.com\.tr/(canli-yayin|canliyayin) | - showturk\.com\.tr/(canli-yayin|canliyayin)(/showtv)? )/? """, re.VERBOSE)) class CinerGroup(Plugin): The regex should be simplified and unnecessary capture groups be avoided. ```suggestion showmax\.com\.tr/canli-?yayin | showturk\.com\.tr/canli-?yayin(?:/showtv)? ``` | bloomberght.com/tv | + showmax\.com\.tr/canli-?yayin | + showturk\.com\.tr/canli-?yayin(?:/showtv)? )/? """, re.VERBOSE)) class CinerGroup(Plugin):
codereview_new_python_data_12067
def _get_live_url2(self): return live_url def _get_streams(self): - root = self.session.http.get(self.url, schema=validate.Schema( - validate.parse_html(), - )) - live_url = self._get_live_url(root) or self._get_live_url2(root) if not live_url: return return HLSStream.parse_variant_playlist(self.session, live_url) root variable is causing problems. def _get_live_url2(self): return live_url def _get_streams(self): + live_url = self._get_live_url() or self._get_live_url2() if not live_url: return return HLSStream.parse_variant_playlist(self.session, live_url)
codereview_new_python_data_12068
class Picarto(Plugin): def get_live(self, username): channel, multistreams, loadbalancer = self.session.http.get( - self.API_URL_LIVE.format(username=username), schema=validate.Schema( validate.parse_json(), { "channel": validate.any(None, { Please move the `schema` keyword to a new line class Picarto(Plugin): def get_live(self, username): channel, multistreams, loadbalancer = self.session.http.get( + self.API_URL_LIVE.format(username=username), + schema=validate.Schema( validate.parse_json(), { "channel": validate.any(None, {
codereview_new_python_data_12069
class TestPluginCanHandleUrlAtpChallenger(PluginCanHandleUrl): __plugin__ = AtpChallengerTour should_match = [ - 'https://www.atptour.com/en/atp-challenger-tour/challenger-tv/challenger-tv-search-results/' - + '2022-2785-ms005-zug-alexander-ritschard-vs-dominic-stricker/2022/2785/all', - 'https://www.atptour.com/en/atp-challenger-tour/challenger-tv' ] Don't use single-quote strings and always add a trailing comma, so that adding more URLs don't cause unnecessary diffs. This also prevents strings in a sequence from accidentally being concatenated implicitly. class TestPluginCanHandleUrlAtpChallenger(PluginCanHandleUrl): __plugin__ = AtpChallengerTour should_match = [ + "https://www.atptour.com/en/atp-challenger-tour/challenger-tv", + "https://www.atptour.com/es/atp-challenger-tour/challenger-tv", + "https://www.atptour.com/en/atp-challenger-tour/challenger-tv/challenger-tv-search-results/" + + "2022-2785-ms005-zug-alexander-ritschard-vs-dominic-stricker/2022/2785/all", ]
codereview_new_python_data_12070
$type live, vod """ -import logging import re from streamlink.plugin import Plugin, pluginmatcher from streamlink.plugin.api import validate -log = logging.getLogger(__name__) - @pluginmatcher(re.compile( r"https?://(?:www\.)?atptour\.com/(?:en|es)/atp-challenger-tour/challenger-tv" Please remove the logging stuff (import and `log` definition), it's not needed. Forgot to include this in my diff. $type live, vod """ import re from streamlink.plugin import Plugin, pluginmatcher from streamlink.plugin.api import validate @pluginmatcher(re.compile( r"https?://(?:www\.)?atptour\.com/(?:en|es)/atp-challenger-tour/challenger-tv"
codereview_new_python_data_12087
class SpecsParser: class BadSpecError(Exception): """Indicates an unparseable command line selector.""" - def __init__(self, root_dir: str | None = None, work_dir: str | None = None) -> None: self._root_dir = os.path.realpath(root_dir or get_buildroot()) self._work_dir = ( os.path.relpath(os.path.join(self._root_dir, work_dir), self._root_dir) ```suggestion def __init__(self, *, root_dir: str | None = None, work_dir: str | None = None) -> None: ``` Since these args are both "dirs" and not distinguished by type, let's at least force the caller to name them, for clarity to the reader, and to avoid bugs incurred by swapping them accidentally. class SpecsParser: class BadSpecError(Exception): """Indicates an unparseable command line selector.""" + def __init__(self, *, root_dir: str | None = None, work_dir: str | None = None) -> None: self._root_dir = os.path.realpath(root_dir or get_buildroot()) self._work_dir = ( os.path.relpath(os.path.join(self._root_dir, work_dir), self._root_dir)
codereview_new_python_data_12088
async def export_codegen( { tgt_type.alias for tgt_type in registered_target_types.types - for input_source, _ in inputs_to_outputs if tgt_type.class_has_field(input_source, union_membership=union_membership) } ) not a super big thing, as there's likely few entries and even fewer duplicated entries, but still, the `Target.class_has_field` is not a simple lookup, so would prefer to eliminate redundant calls to it. ```suggestion for input_source in {input_source for input_source, _ in inputs_to_outputs} ``` async def export_codegen( { tgt_type.alias for tgt_type in registered_target_types.types + for input_source in {input_source for input_source, _ in inputs_to_outputs} if tgt_type.class_has_field(input_source, union_membership=union_membership) } )
codereview_new_python_data_12089
async def pyupgrade_fix(request: PyUpgradeRequest.Batch, pyupgrade: PyUpgrade) - # (Technically we could not do this. It doesn't break Pants since the next run on the CLI would # use the new file with the new digest. However that isn't the UX we want for our users.) input_digest = request.snapshot.digest - for _ in range(10): # Give the loop an upper bound to guard against inifite runs result = await Get( FallibleProcessResult, VenvPexProcess( typo in comment... if you have more changes coming.. ```suggestion for _ in range(10): # Give the loop an upper bound to guard against infinite runs ``` async def pyupgrade_fix(request: PyUpgradeRequest.Batch, pyupgrade: PyUpgrade) - # (Technically we could not do this. It doesn't break Pants since the next run on the CLI would # use the new file with the new digest. However that isn't the UX we want for our users.) input_digest = request.snapshot.digest + for _ in range(10): # Give the loop an upper bound to guard against infinite runs result = await Get( FallibleProcessResult, VenvPexProcess(
codereview_new_python_data_12090
async def build_go_package( # Put the source file paths into a file and pass that to `go tool compile` via a config file using the # `@CONFIG_FILE` syntax. This is necessary to avoid command-line argument limits on macOS. The arguments # may end up to exceed those limits when compiling standard library packages where we append a very long GOROOT - # path to each file name or in packages with larges numbers of files. go_source_file_paths_config = "\n".join([*go_file_paths, *generated_cgo_file_paths]) go_sources_file_paths_digest = await Get( Digest, CreateDigest([FileContent("__sources__.txt", go_source_file_paths_config.encode())]) ```suggestion # path to each file name or in packages with large numbers of files. ``` async def build_go_package( # Put the source file paths into a file and pass that to `go tool compile` via a config file using the # `@CONFIG_FILE` syntax. This is necessary to avoid command-line argument limits on macOS. The arguments # may end up to exceed those limits when compiling standard library packages where we append a very long GOROOT + # path to each file name or in packages with large numbers of files. go_source_file_paths_config = "\n".join([*go_file_paths, *generated_cgo_file_paths]) go_sources_file_paths_digest = await Get( Digest, CreateDigest([FileContent("__sources__.txt", go_source_file_paths_config.encode())])
codereview_new_python_data_12091
class PythonDependencyVisitorRequest: class PythonDependencyVisitor: """Wraps a subclass of _pants_dep_parser.DependencyVisitorBase.""" - digest: Digest # The content of the subclass classname: str # The full classname, e.g., _my_custom_dep_parser.MyCustomVisitor env: FrozenDict[str, str] # Set these env vars when invoking the visitor ```suggestion digest: Digest # The file contents for the visitor ``` or something to that effect class PythonDependencyVisitorRequest: class PythonDependencyVisitor: """Wraps a subclass of _pants_dep_parser.DependencyVisitorBase.""" + digest: Digest # The file contents for the visitor classname: str # The full classname, e.g., _my_custom_dep_parser.MyCustomVisitor env: FrozenDict[str, str] # Set these env vars when invoking the visitor
codereview_new_python_data_12092
def error_on_imports(build_file_content: str, filepath: str) -> None: continue raise ParseError( f"Import used in {filepath} at line {lineno}. Import statements are banned in " - "BUILD files because they can easily break Pants caching and lead to stale results. " - f"\n\nInstead, consider writing a macro ({doc_url('macros')}) or " - f"writing a plugin ({doc_url('plugins-overview')}." ) https://www.pantsbuild.org/docs/macros suggests that file must be named `macros.py` so I reckon it's safe to hardcode it here? Can't find any usages of `macros.py` in the repo. :/ def error_on_imports(build_file_content: str, filepath: str) -> None: continue raise ParseError( f"Import used in {filepath} at line {lineno}. Import statements are banned in " + "BUILD files and macros (that act like a normal BUILD file) because they can easily " + "break Pants caching and lead to stale results. " + f"\n\nInstead, consider writing a plugin ({doc_url('plugins-overview')}." )
codereview_new_python_data_12093
async def prepare_shell_command_process( def _output_at_build_root(process: Process, bash: BashBinary) -> Process: - working_directory = process.working_directory - if working_directory is None: - working_directory = "" output_directories = process.output_directories output_files = process.output_files if working_directory: output_directories = tuple(os.path.join(working_directory, d) for d in output_directories) output_files = tuple(os.path.join(working_directory, d) for d in output_files) - cd = f"cd {shlex.quote(working_directory)} &&" if working_directory else "" shlexed_argv = " ".join(shlex.quote(arg) for arg in process.argv) - new_argv = (bash.path, "-c", f"{cd} {shlexed_argv}") return dataclasses.replace( process, saving some line-noise, this could just be: ```python working_directory = process.working_directory or "" ``` async def prepare_shell_command_process( def _output_at_build_root(process: Process, bash: BashBinary) -> Process: + working_directory = process.working_directory or "" output_directories = process.output_directories output_files = process.output_files if working_directory: output_directories = tuple(os.path.join(working_directory, d) for d in output_directories) output_files = tuple(os.path.join(working_directory, d) for d in output_files) + cd = f"cd {shlex.quote(working_directory)} && " if working_directory else "" shlexed_argv = " ".join(shlex.quote(arg) for arg in process.argv) + new_argv = (bash.path, "-c", f"{cd}{shlexed_argv}") return dataclasses.replace( process,
codereview_new_python_data_12094
def test_output_path() -> None: ) pants_run.assert_success() dist_output_path = os.path.join(dist_dir, output_path) - dist_entires = os.listdir(os.path.join(dist_dir, output_path)) - assert len(dist_entires) == 2 - for entry in dist_entires: assert os.path.isfile(os.path.join(dist_output_path, entry)) Typo, should be dist_entries? def test_output_path() -> None: ) pants_run.assert_success() dist_output_path = os.path.join(dist_dir, output_path) + dist_entries = os.listdir(os.path.join(dist_dir, output_path)) + assert len(dist_entries) == 2 + for entry in dist_entries: assert os.path.isfile(os.path.join(dist_output_path, entry))
codereview_new_python_data_12095
async def make_cgo_compile_wrapper_script( path="wrapper", content=textwrap.dedent( """\ - export sandbox_root="$(/bin/pwd)" - ln -s "${sandbox_root}" __pants_sandbox_root__ - declare -a args - args=("$@") - args=("${args[@]//__PANTS_SANDBOX_ROOT__/${sandbox_root}}") exec "${args[@]}" """ ).encode(), What is this symlink for? I don't see where it's used? async def make_cgo_compile_wrapper_script( path="wrapper", content=textwrap.dedent( """\ + sandbox_root="$(/bin/pwd)" + args=("${@//__PANTS_SANDBOX_ROOT__/$sandbox_root}") exec "${args[@]}" """ ).encode(),
codereview_new_python_data_12096
async def _prepare_process_request_from_target(shell_command: Target) -> ShellCo ) -def _parse_outputs_from_command(shell_command, description): outputs = shell_command.get(ShellCommandOutputsField).value or () output_files = shell_command.get(ShellCommandOutputFilesField).value or () output_directories = shell_command.get(ShellCommandOutputDirectoriesField).value or () No type hints? async def _prepare_process_request_from_target(shell_command: Target) -> ShellCo ) +def _parse_outputs_from_command( + shell_command: Target, description: str +) -> tuple[tuple[str, ...], tuple[str, ...]]: outputs = shell_command.get(ShellCommandOutputsField).value or () output_files = shell_command.get(ShellCommandOutputFilesField).value or () output_directories = shell_command.get(ShellCommandOutputDirectoriesField).value or ()
codereview_new_python_data_12097
def test_sources_and_files(rule_runner: RuleRunner) -> None: "tee", ], output_files=["message.txt"], - output_directories=["res/"], command="./script.sh", ) Do we need the trailing slash? If not, lets omit it. def test_sources_and_files(rule_runner: RuleRunner) -> None: "tee", ], output_files=["message.txt"], + output_directories=["res"], command="./script.sh", )
codereview_new_python_data_12098
def compilation_failure(exit_code: int, stdout: str | None, stderr: str | None) ) test_pkg_build_request = maybe_test_pkg_build_request.request - # Determine the direct dependencies of the generated main package. The test package itself it always a # dependency. Add the xtests package as well if any xtests exist. main_direct_deps = [test_pkg_build_request] if testmain.has_xtests: ```suggestion # Determine the direct dependencies of the generated main package. The test package itself is always a ``` def compilation_failure(exit_code: int, stdout: str | None, stderr: str | None) ) test_pkg_build_request = maybe_test_pkg_build_request.request + # Determine the direct dependencies of the generated main package. The test package itself is always a # dependency. Add the xtests package as well if any xtests exist. main_direct_deps = [test_pkg_build_request] if testmain.has_xtests:
codereview_new_python_data_12334
def foo(input, weight, bias): foo(*inps) # Autotuning checks correctness of each version - self.assertEqual(counters["inductor"]["choice_caller_benchmarked"], 13) @patches def test_mm(self): ```suggestion self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1) ``` def foo(input, weight, bias): foo(*inps) # Autotuning checks correctness of each version + self.assertEqual(counters["inductor"]["select_algorithm_autotune"], 1) @patches def test_mm(self):
codereview_new_python_data_12448
def start_cmd(self, node): cmd = "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % EndToEndLatencyService.LOG4J_CONFIG if node.version.consumer_supports_bootstrap_server(): cmd += "KAFKA_OPTS=%(kafka_opts)s %(kafka_run_class)s %(java_class_name)s " % args - cmd += "-b %(bootstrap_servers)s -t %(topic)s -n %(num_records)d -a %(acks)d -s %(message_bytes)d -f %(config_file)s" % args else: # Set fetch max wait to 0 to match behavior in later versions cmd += "KAFKA_OPTS=%(kafka_opts)s %(kafka_run_class)s kafka.tools.TestEndToEndLatency " % args This change and https://github.com/apache/kafka/pull/13095/files#diff-1a3735187400a54aac5a802fb2d2ff6d4fe9cdfbc5fd953a45e2890ad43f58cfR157-R207 are needed because in argparse4j, if we are using positional arguments, then from what I understood, they can't be made optional. Properties file is an optional argument as per the scala code but it worked in that case as the main arguments were used as is. def start_cmd(self, node): cmd = "export KAFKA_LOG4J_OPTS=\"-Dlog4j.configuration=file:%s\"; " % EndToEndLatencyService.LOG4J_CONFIG if node.version.consumer_supports_bootstrap_server(): cmd += "KAFKA_OPTS=%(kafka_opts)s %(kafka_run_class)s %(java_class_name)s " % args + cmd += "%(bootstrap_servers)s %(topic)s %(num_records)d %(acks)d %(message_bytes)d %(config_file)s" % args else: # Set fetch max wait to 0 to match behavior in later versions cmd += "KAFKA_OPTS=%(kafka_opts)s %(kafka_run_class)s kafka.tools.TestEndToEndLatency " % args
codereview_new_python_data_12574
def __init__( self.msg = msg self.type_map = type_map self.options = options - self.builtins: set[str] = set() builtins_mod = names.get("__builtins__", None) if builtins_mod: assert isinstance(builtins_mod.node, MypyFile) - self.builtins = set(builtins_mod.node.names.keys()) self.loops: list[Loop] = [] self.try_depth = 0 self.tracker = DefinedVariableTracker() As a minor optimization, we could store `builtins_mod.node.names` in `self.builtins` (i.e. a `SymbolTable` instance). This would avoid having to create a set with all the items in builtins. def __init__( self.msg = msg self.type_map = type_map self.options = options + self.builtins = SymbolTable() builtins_mod = names.get("__builtins__", None) if builtins_mod: assert isinstance(builtins_mod.node, MypyFile) + self.builtins = builtins_mod.node.names self.loops: list[Loop] = [] self.try_depth = 0 self.tracker = DefinedVariableTracker()
codereview_new_python_data_12575
def prepare_method_signature(self, func: FuncDef, info: TypeInfo, has_self_type: func.is_class = True if not func.arguments: self.fail( - "Method must have at least one argument. Did you forget the self argument?", func, ) elif isinstance(functype, CallableType): ```suggestion 'Method must have at least one argument. Did you forget the "self" argument?', ``` I believe our convention is to put code samples in quotes in error messages. def prepare_method_signature(self, func: FuncDef, info: TypeInfo, has_self_type: func.is_class = True if not func.arguments: self.fail( + 'Method must have at least one argument. Did you forget the "self" argument?', func, ) elif isinstance(functype, CallableType):
codereview_new_python_data_12576
def run_benchmark(compiled_dir: str, check_dir: str) -> float: cmd += glob.glob(os.path.join(abschk, "mypy/*.py")) cmd += glob.glob(os.path.join(abschk, "mypy/*/*.py")) t0 = time.time() subprocess.run(cmd, cwd=compiled_dir, env=env) return time.time() - t0 Is it worth using `time.perf_counter()` or `time.perf_counter_ns()` instead of `time.time()`? def run_benchmark(compiled_dir: str, check_dir: str) -> float: cmd += glob.glob(os.path.join(abschk, "mypy/*.py")) cmd += glob.glob(os.path.join(abschk, "mypy/*/*.py")) t0 = time.time() + # Ignore errors, since some commits being measured may generate additional errors. subprocess.run(cmd, cwd=compiled_dir, env=env) return time.time() - t0
codereview_new_python_data_12577
def _get_imported_symbol_names(runtime: types.ModuleType) -> frozenset[str] | No return None if not source.strip(): - return None try: module_symtable = symtable.symtable(source, runtime.__name__, "exec") When does inspect.getsource return an empty str? Should this be `return frozenset()`? def _get_imported_symbol_names(runtime: types.ModuleType) -> frozenset[str] | No return None if not source.strip(): + # The source code for the module was an empty file, + # no point in parsing it with symtable + return frozenset() try: module_symtable = symtable.symtable(source, runtime.__name__, "exec")
codereview_new_python_data_12578
def typeddict_key_must_be_string_literal(self, typ: TypedDictType, context: Cont def typeddict_key_not_found( self, typ: TypedDictType, item_name: str, context: Context, setitem: bool = False ) -> None: - """Handles error messages for TypedDicts that have unknown keys. Note, that we differentiate in between reading a value and setting a value. ```suggestion """Handle error messages for TypedDicts that have unknown keys. ``` def typeddict_key_must_be_string_literal(self, typ: TypedDictType, context: Cont def typeddict_key_not_found( self, typ: TypedDictType, item_name: str, context: Context, setitem: bool = False ) -> None: + """Handle error messages for TypedDicts that have unknown keys. Note, that we differentiate in between reading a value and setting a value.
codereview_new_python_data_12579
def verify_typealias( "__annotations__", "__path__", # mypy adds __path__ to packages, but C packages don't have it "__getattr__", # resulting behaviour might be typed explicitly # TODO: remove the following from this list "__author__", "__version__", "__copyright__", - # Created by `warnings.warn`, does not make much sense to have in stubs: - "__warningregistry__", } ) I think this entry should go above the TODO comment on line 1246; it's here to stay :) def verify_typealias( "__annotations__", "__path__", # mypy adds __path__ to packages, but C packages don't have it "__getattr__", # resulting behaviour might be typed explicitly + # Created by `warnings.warn`, does not make much sense to have in stubs: + "__warningregistry__", # TODO: remove the following from this list "__author__", "__version__", "__copyright__", } )