id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_6028
def test_get_parameters_by_path_and_filter_by_labels(self, ssm_client, create_pa assert found_param["Type"] == "String" assert found_param["Value"] == "value" def test_get_inexistent_maintenance_window(self, ssm_client): invalid_name = "mw-00000000000000000" with pytest.raises(ssm_client.exceptions.DoesNotExistException) as exc: If this was tested against AWS please mark it with `@pytest.mark.aws_validated`. The other tests in the class have the marker as far as I can see. Feel free to merge after addressing this. def test_get_parameters_by_path_and_filter_by_labels(self, ssm_client, create_pa assert found_param["Type"] == "String" assert found_param["Value"] == "value" + @pytest.mark.aws_validated def test_get_inexistent_maintenance_window(self, ssm_client): invalid_name = "mw-00000000000000000" with pytest.raises(ssm_client.exceptions.DoesNotExistException) as exc:
codereview_new_python_data_6029
class LocalKmsProvider(KmsApi, ServiceLifecycleHook): - def get_forward_url(self): - """Return the URL of the backend local-kms server to forward requests to""" account_id = get_aws_account_id() start_kms_local(account_id=account_id) return f"http://{LOCALSTACK_HOSTNAME}:{get_server(account_id).port}" nit: we should maybe also mention that we're actually starting a server when we're first getting the forward URL. and some feedback: overall i think the approach is clever, since the implementation is so simple. but starting servers in property getters is generally something i would avoid. i don't really have a better solution suggestion right now though :-) class LocalKmsProvider(KmsApi, ServiceLifecycleHook): + def start_and_get_backend(self): + """ + Start the local-kms backend and return the URL of the server. + """ account_id = get_aws_account_id() start_kms_local(account_id=account_id) return f"http://{LOCALSTACK_HOSTNAME}:{get_server(account_id).port}"
codereview_new_python_data_6030
class ResourcePolicy: @dataclasses.dataclass class FunctionResourcePolicy: - # TODO: do we have a typed IAM policy somewhere already? - # Something like this? localstack_ext.services.iam.policy_engine.models.PolicyDocument policy: ResourcePolicy ```suggestion ``` I'd just remove this for now. There's not too much that we can do with it anyway. class ResourcePolicy: @dataclasses.dataclass class FunctionResourcePolicy: policy: ResourcePolicy
codereview_new_python_data_6031
def in_docker(): # TODO change when asf becomes default: os.environ.get("PROVIDER_OVERRIDE_S3", "") == 'legacy' LEGACY_S3_PROVIDER = os.environ.get("PROVIDER_OVERRIDE_S3", "") not in ("asf", "asf_pro") -# Whether to use the legacy implementation for AWS StepFunctions. -LEGACY_SFN_PROVIDER = os.environ.get("PROVIDER_OVERRIDE_SFN", "") not in "v2" - # Whether to report internal failures as 500 or 501 errors. FAIL_FAST = is_env_true("FAIL_FAST") ```suggestion ``` probably not needed in this case def in_docker(): # TODO change when asf becomes default: os.environ.get("PROVIDER_OVERRIDE_S3", "") == 'legacy' LEGACY_S3_PROVIDER = os.environ.get("PROVIDER_OVERRIDE_S3", "") not in ("asf", "asf_pro") # Whether to report internal failures as 500 or 501 errors. FAIL_FAST = is_env_true("FAIL_FAST")
codereview_new_python_data_6032
class ExecutionWorkerComm(abc.ABC): @abc.abstractmethod def terminated(self) -> None: ... nit: Would be great if we could add a few docstrings to explain the purpose/responsibility of the core engine classes. For the most part, the name is already self-explanatory, but it cannot harm to add a line or two to make it explicit. 👍 class ExecutionWorkerComm(abc.ABC): + """ + Defines abstract callbacks for Execution's workers to report their progress, such as termination. + Execution instances define custom callbacks routines to update their state according to the latest + relevant state machine evaluation steps. + """ + @abc.abstractmethod def terminated(self) -> None: ...
codereview_new_python_data_6033
def test_create_rest_api_with_custom_id( assert response.ok assert response._content == b'{"echo": "foobar", "response": "mocked"}' - @pytest.mark.skip def test_api_gateway_kinesis_integration(self): # create target Kinesis stream stream = resource_util.create_kinesis_stream(self.TEST_STREAM_KINESIS_API_GW) Is this test currently flaking? If so, can we add a TODO comment and keep a note to fix it in one of the upcoming iterations 👍 def test_create_rest_api_with_custom_id( assert response.ok assert response._content == b'{"echo": "foobar", "response": "mocked"}' def test_api_gateway_kinesis_integration(self): # create target Kinesis stream stream = resource_util.create_kinesis_stream(self.TEST_STREAM_KINESIS_API_GW)
codereview_new_python_data_6034
-import os - -# parent directory of this file -PARENT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) -OPENAPI_SPEC_PULUMI_JSON = os.path.join(PARENT_DIR, "files", "openapi.spec.pulumi.json") nit: we usually keep the `__init__.py` files clean of custom logic. Not critical, as this is just a test module, but something to consider for future iterations. 👍
codereview_new_python_data_6035
def validate_kms_key_id(kms_key: str, bucket: FakeBucket): arn = parse_arn(kms_key) region_name = arn["region"] if region_name != bucket.region_name: - raise # multi account unsupported yet except InvalidArnException: pass Shouldn't we raise `KMS:NotFoundException` here? - Created a kms key in `us-east-1` - Created s3 bucket in `us-west-2` - Tried to put object in bucket with sse key using the command below ``` aws s3api put-object --bucket bucket-xxxx --key main.go --ssekms-key-id "arn:aws:kms:us-east-1:00000000000:key/xxxxx-xxxx-xxxxxx-xxxxx" --server-side-encryption aws:kms ``` Output: ``` An error occurred (KMS.NotFoundException) when calling the PutObject operation: Invalid arn us-east-1 ``` def validate_kms_key_id(kms_key: str, bucket: FakeBucket): arn = parse_arn(kms_key) region_name = arn["region"] if region_name != bucket.region_name: + raise CommonServiceException( + code="KMS.NotFoundException", message=f"Invalid arn {region_name}" + ) # multi account unsupported yet except InvalidArnException: pass
codereview_new_python_data_6036
def test_lambda_permission_url_invocation( create_lambda_function( func_name=function_name, zip_file=testutil.create_zip_file(TEST_LAMBDA_URL, get_content=True), - runtime=Runtime.nodejs14_x, handler="lambda_url.handler", ) url_config = lambda_client.create_function_url_config( nit: could use a newer Runtime here def test_lambda_permission_url_invocation( create_lambda_function( func_name=function_name, zip_file=testutil.create_zip_file(TEST_LAMBDA_URL, get_content=True), + runtime=Runtime.nodejs18_x, handler="lambda_url.handler", ) url_config = lambda_client.create_function_url_config(
codereview_new_python_data_6037
from moto.logs.models import LogsBackend as MotoLogsBackend from moto.logs.models import logs_backends as moto_logs_backend -from localstack.constants import DEFAULT_AWS_ACCOUNT_ID from localstack.services.stores import AccountRegionBundle, BaseStore, CrossRegionAttribute -from localstack.utils.aws import aws_stack -def get_moto_logs_backend(account_id: str = None, region_name: str = None) -> MotoLogsBackend: - account_id = account_id or DEFAULT_AWS_ACCOUNT_ID - region_name = region_name or aws_stack.get_region() - return moto_logs_backend[account_id][region_name] nit: I think using the `DEFAULT_AWS_ACCOUNT_ID` might not be best practice anymore. But I think this function is never called without an explicit region name or account ID, right? from moto.logs.models import LogsBackend as MotoLogsBackend from moto.logs.models import logs_backends as moto_logs_backend from localstack.services.stores import AccountRegionBundle, BaseStore, CrossRegionAttribute +def get_moto_logs_backend(account_id: str, region_name: str) -> MotoLogsBackend: return moto_logs_backend[account_id][region_name]
codereview_new_python_data_6038
def start(self, env_vars: dict[str, str]) -> None: container_name_or_id=self.id, container_network=network ) if config.LAMBDA_ASF_DEV_PORT_EXPOSE: - self.executor_endpoint.container_address = "localhost" - else: - self.executor_endpoint.container_address = self.ip def stop(self) -> None: CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5) ```suggestion if config.LAMBDA_ASF_DEV_PORT_EXPOSE: self.ip = "127.0.0.1" self.executor_endpoint.container_address = self.ip ``` This would set both the ip and address correctly def start(self, env_vars: dict[str, str]) -> None: container_name_or_id=self.id, container_network=network ) if config.LAMBDA_ASF_DEV_PORT_EXPOSE: + self.ip = "127.0.0.1" + self.executor_endpoint.container_address = self.ip def stop(self) -> None: CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5)
codereview_new_python_data_6039
def test_put_object(self, s3_client, s3_bucket, snapshot): snapshot.match("get_object", response) @pytest.mark.aws_validated def test_post_object_with_files(self, s3_client, s3_bucket): object_key = "test-presigned-post-key" Like we discussed, those presigned POST tests are very flaky, they could pass with arm64 then fail on amd64 and finally pass on integration_pro. Or fail on two of them but not the same tests failing. It seems sometimes there are not body in the request, and the signature/ETag returned is different, but no exception is raised before. def test_put_object(self, s3_client, s3_bucket, snapshot): snapshot.match("get_object", response) @pytest.mark.aws_validated + @pytest.mark.xfail( + reason="failing sporadically with new HTTP gateway (only in CI)", + ) def test_post_object_with_files(self, s3_client, s3_bucket): object_key = "test-presigned-post-key"
codereview_new_python_data_6040
def _create_version_model( id=new_id, ) function.versions[next_version] = new_version if "$LATEST" in function.permissions: function.permissions[ "$LATEST" nit: Would be great if you could add a 1-2 line comment here explaining the context of this change. def _create_version_model( id=new_id, ) function.versions[next_version] = new_version + # Any Lambda permission for $LATEST (if existing) receives a new revision id upon publishing a new version. + # TODO: test revision id behavior for versions, permissions, etc because it seems they share the same revid if "$LATEST" in function.permissions: function.permissions[ "$LATEST"
codereview_new_python_data_6041
# Default bucket name of the s3 bucket used for local lambda development # This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions -DEFAULT_BUCKET_MARKER_LOCAL = "hot-reloading-bucket" -OLD_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" # user that starts the opensearch process if the current user is root OS_USER_OPENSEARCH = "localstack" we've started using `LEGACY_` as prefix for older variables. should we maybe do that here as well? # Default bucket name of the s3 bucket used for local lambda development # This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions +DEFAULT_BUCKET_MARKER_LOCAL = "hot-reload" +LEGACY_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" # user that starts the opensearch process if the current user is root OS_USER_OPENSEARCH = "localstack"
codereview_new_python_data_6042
# Default bucket name of the s3 bucket used for local lambda development # This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions -DEFAULT_BUCKET_MARKER_LOCAL = "hot-reloading-bucket" -OLD_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" # user that starts the opensearch process if the current user is root OS_USER_OPENSEARCH = "localstack" looks like the team agreed on `hot-reload`! # Default bucket name of the s3 bucket used for local lambda development # This name should be accepted by all IaC tools, so should respect s3 bucket naming conventions +DEFAULT_BUCKET_MARKER_LOCAL = "hot-reload" +LEGACY_DEFAULT_BUCKET_MARKER_LOCAL = "__local__" # user that starts the opensearch process if the current user is root OS_USER_OPENSEARCH = "localstack"
codereview_new_python_data_6043
def _run_transcription_job(self, args: Tuple[TranscribeStore, str]): job["FailureReason"] = failure_reason or str(exc) job["TranscriptionJobStatus"] = TranscriptionJobStatus.FAILED - LOG.warning("Transcription job %s failed: %s", job_name, job["FailureReason"]) - - raise exc Imho, a comment or a slight change would be nice here. The exception here is only raised such that it's stacktrace is shown in the logs (because this function is executed asynchronously in another thread)? Or is it caught anywhere? If it's just about the logging of the stacktrace, and you need it everytime you end up here, you could just use `LOG.exception` in the line above: https://docs.python.org/3/library/logging.html#logging.Logger.exception def _run_transcription_job(self, args: Tuple[TranscribeStore, str]): job["FailureReason"] = failure_reason or str(exc) job["TranscriptionJobStatus"] = TranscriptionJobStatus.FAILED + LOG.exception("Transcription job %s failed: %s", job_name, job["FailureReason"])
codereview_new_python_data_6044
def start(self, env_vars: dict[str, str]) -> None: def stop(self) -> None: CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5) - # CONTAINER_CLIENT.remove_container(container_name=self.id) try: self.executor_endpoint.shutdown() except Exception as e: nit: Is this change intended? (as it seems unrelated to the PR) def start(self, env_vars: dict[str, str]) -> None: def stop(self) -> None: CONTAINER_CLIENT.stop_container(container_name=self.id, timeout=5) + CONTAINER_CLIENT.remove_container(container_name=self.id) try: self.executor_endpoint.shutdown() except Exception as e:
codereview_new_python_data_6045
def _assert_batch(self, batch: List) -> None: raise EmptyBatchRequest visited = set() for entry in batch: - if not re.search(r"^[\w-]+$", (entry_id := entry["Id"])) or len(entry_id) > 80: raise InvalidBatchEntryId( "A batch entry id can only contain alphanumeric characters, hyphens and underscores. " "It can be at most 80 letters long." nit: took me a while to see where `entry_id` is coming from. i think it's better not to use the implicit assignment. i think the extra line makes the code more readable. ```suggestion entry_id = entry["Id"] if not re.search(r"^[\w-]+$", entry_id) or len(entry_id) > 80: ``` def _assert_batch(self, batch: List) -> None: raise EmptyBatchRequest visited = set() for entry in batch: + entry_id = entry["Id"] + if not re.search(r"^[\w-]+$", entry_id) or len(entry_id) > 80: raise InvalidBatchEntryId( "A batch entry id can only contain alphanumeric characters, hyphens and underscores. " "It can be at most 80 letters long."
codereview_new_python_data_6046
def handler(event, context): fragment["Resources"]["Parameter"]["Properties"]["Value"] = json.dumps( { "Event": event, - # TODO find a way to print context class - # "Context": vars(context) } ) Is there anything specific in the context that we would be interested in here? def handler(event, context): fragment["Resources"]["Parameter"]["Properties"]["Value"] = json.dumps( { "Event": event, } )
codereview_new_python_data_6047
def test_to_validate_template_limit_for_macro( snapshot, ): """ - The test validates the template max size that can be pass into the create_template """ macro_function_path = os.path.join( os.path.dirname(__file__), "../templates/macros/format_template.py" ```suggestion The test validates the max size of a template that can be passed into the macro function ``` def test_to_validate_template_limit_for_macro( snapshot, ): """ + The test validates the max size of a template that can be passed into the macro function """ macro_function_path = os.path.join( os.path.dirname(__file__), "../templates/macros/format_template.py"
codereview_new_python_data_6048
def test_to_validate_template_limit_for_macro( snapshot, ): """ - The test validates the template max size that can be pass into the create_template """ macro_function_path = os.path.join( os.path.dirname(__file__), "../templates/macros/format_template.py" ```suggestion The test validates the max size of a template that can be passed into the macro function ``` def test_to_validate_template_limit_for_macro( snapshot, ): """ + The test validates the max size of a template that can be passed into the macro function """ macro_function_path = os.path.join( os.path.dirname(__file__), "../templates/macros/format_template.py"
codereview_new_python_data_6049
def fix_types(o, **kwargs): def log_not_available_message(resource_type: str, message: str): LOG.warning( f"{message}. To find out if {resource_type} is supported in LocalStack Pro, " - "please check out our docs at https://docs.localstack.cloud/user-guide/aws/cloudformation/" ) ```suggestion "please check out our docs at https://docs.localstack.cloud/user-guide/aws/cloudformation/#resources-pro--enterprise-edition" ``` def fix_types(o, **kwargs): def log_not_available_message(resource_type: str, message: str): LOG.warning( f"{message}. To find out if {resource_type} is supported in LocalStack Pro, " + "please check out our docs at https://docs.localstack.cloud/user-guide/aws/cloudformation/#resources-pro--enterprise-edition" )
codereview_new_python_data_6050
def test_cfn_with_apigateway_resources(deploy_cfn_template, apigateway_client): "$..version", "$..methodIntegration.cacheNamespace", "$..methodIntegration.connectionType", - "$..methodIntegration.integrationResponses", "$..methodIntegration.passthroughBehavior", "$..methodIntegration.requestTemplates", "$..methodIntegration.timeoutInMillis", Forgive me if I am missing something, but the test is specifically testing that the `integrationResponses` can be created right? So why are you skipping verification of this field? def test_cfn_with_apigateway_resources(deploy_cfn_template, apigateway_client): "$..version", "$..methodIntegration.cacheNamespace", "$..methodIntegration.connectionType", "$..methodIntegration.passthroughBehavior", "$..methodIntegration.requestTemplates", "$..methodIntegration.timeoutInMillis",
codereview_new_python_data_6051
def publish( elif phone_number: self._publisher.publish_to_phone_number(ctx=publish_ctx, phone_number=phone_number) else: - # TODO: beware if FIFO, order is guaranteed yet. Semaphore? might block workers - # 2 quick call in succession might be unordered in the executor? need to try it with many threads self._publisher.publish_to_topic(publish_ctx, topic_arn or target_arn) return PublishResponse(MessageId=message_ctx.message_id) This TODO is not 100% clear to me. Please clarify it a bit :slightly_smiling_face: def publish( elif phone_number: self._publisher.publish_to_phone_number(ctx=publish_ctx, phone_number=phone_number) else: + # TODO: beware if the subscription is FIFO, the order might not be guaranteed. + # 2 quick call to this method in succession might not be executed in order in the executor? self._publisher.publish_to_topic(publish_ctx, topic_arn or target_arn) return PublishResponse(MessageId=message_ctx.message_id)
codereview_new_python_data_6052
def test_publish_sms(self, sns_client): @pytest.mark.aws_validated def test_publish_non_existent_target(self, sns_client, sns_create_topic, snapshot): - # todo: fix test, the client id in the ARN is wrong so can't test against AWS topic_arn = sns_create_topic()["TopicArn"] account_id = parse_arn(topic_arn)["account"] with pytest.raises(ClientError) as ex: Is this todo addressed now? was the "client id" the account id? def test_publish_sms(self, sns_client): @pytest.mark.aws_validated def test_publish_non_existent_target(self, sns_client, sns_create_topic, snapshot): topic_arn = sns_create_topic()["TopicArn"] account_id = parse_arn(topic_arn)["account"] with pytest.raises(ClientError) as ex:
codereview_new_python_data_6053
def store_s3_bucket_archive( ) -def store_image_code(image_uri: str) -> ImageCode: """ Creates an image code by inspecting the provided image ```suggestion def create_image_code(image_uri: str) -> ImageCode: ``` nit: doesn't seem to store anything :thinking: def store_s3_bucket_archive( ) +def create_image_code(image_uri: str) -> ImageCode: """ Creates an image code by inspecting the provided image
codereview_new_python_data_6054
def on_before_stop(self): pass def on_after_inject(self): - """Hook triggered after a new state in injected.""" pass def on_exception(self): tiny typo nit: ```suggestion """Hook triggered after new state has been injected into the provider's store.""" ``` def on_before_stop(self): pass def on_after_inject(self): + """Hook triggered after new state has been injected into the provider's store.""" pass def on_exception(self):
codereview_new_python_data_6055
def get_docker_image_to_start(): image_name = constants.DOCKER_IMAGE_NAME if os.environ.get("USE_LIGHT_IMAGE") in constants.FALSE_STRINGS: image_name = constants.DOCKER_IMAGE_NAME_FULL - if os.environ.get("LOCALSTACK_API_KEY"): image_name = constants.DOCKER_IMAGE_NAME_PRO return image_name Should the pro image be used when setting the api key, or is it some different switch? def get_docker_image_to_start(): image_name = constants.DOCKER_IMAGE_NAME if os.environ.get("USE_LIGHT_IMAGE") in constants.FALSE_STRINGS: image_name = constants.DOCKER_IMAGE_NAME_FULL + if os.environ.get("LOCALSTACK_API_KEY") and os.environ.get("LOCALSTACK_API_KEY").strip(): image_name = constants.DOCKER_IMAGE_NAME_PRO return image_name
codereview_new_python_data_6056
def test_head_object_fields(self, s3_client, s3_bucket, snapshot): snapshot.match("head-object", response) @pytest.mark.aws_validated def test_get_object_after_deleted_in_versioned_bucket( self, s3_client, s3_bucket, s3_resource, snapshot ): ```suggestion @pytest.mark.aws_validated @pytest.mark.skip_snapshot_verify( condition=is_old_provider, paths=["$..ContentLanguage", "$..Error.RequestID"] ) ``` def test_head_object_fields(self, s3_client, s3_bucket, snapshot): snapshot.match("head-object", response) @pytest.mark.aws_validated + @pytest.mark.skip_snapshot_verify( + condition=is_old_provider, paths=["$..ContentLanguage", "$..Error.RequestID"] + ) def test_get_object_after_deleted_in_versioned_bucket( self, s3_client, s3_bucket, s3_resource, snapshot ):
codereview_new_python_data_6057
"python3.7": "python:3.7@sha256:be668898a538d5258e006e1920f86f31cab8000dfa68b3be78d5ef67ad15a417", "python3.8": "python:3.8@sha256:b3402a5f5e9535ba4787a1fd6b0ee39738dee18bdff861a0589571ba74122d35", "python3.9": "python:3.9@sha256:5b3585b121e6fb9707abb52c1f99cbab51939fee0769752ab6c641f20f479cf6", - "nodejs12.x": "nodejs:12@sha256:16431b8d5eb26e80b37a80297ad67f70478c045339395bbd32f45091275ebb50", "nodejs14.x": "nodejs:14@sha256:aa3286c61b6c3f97219da3e9fa39d97f9586672c64c958cb5b980268afdfb554", "nodejs16.x": "nodejs:16@sha256:c7714124a782801cb7080fd6abddf9354a2ee89642571cb9222bb7541d3df558", "nodejs18.x": "nodejs:18@sha256:f3c7ebb522417c8212d765b504e8078f99be78d41f82f3a08b7261e183ce4ed6", Any reason for not updating nodejs12.x? (newest sha would be `1389b8cc6bbd321f22b0218233da11bd49382bbee7689dba41c7377c12df65a5`) "python3.7": "python:3.7@sha256:be668898a538d5258e006e1920f86f31cab8000dfa68b3be78d5ef67ad15a417", "python3.8": "python:3.8@sha256:b3402a5f5e9535ba4787a1fd6b0ee39738dee18bdff861a0589571ba74122d35", "python3.9": "python:3.9@sha256:5b3585b121e6fb9707abb52c1f99cbab51939fee0769752ab6c641f20f479cf6", + "nodejs12.x": "nodejs:12@sha256:1389b8cc6bbd321f22b0218233da11bd49382bbee7689dba41c7377c12df65a5", "nodejs14.x": "nodejs:14@sha256:aa3286c61b6c3f97219da3e9fa39d97f9586672c64c958cb5b980268afdfb554", "nodejs16.x": "nodejs:16@sha256:c7714124a782801cb7080fd6abddf9354a2ee89642571cb9222bb7541d3df558", "nodejs18.x": "nodejs:18@sha256:f3c7ebb522417c8212d765b504e8078f99be78d41f82f3a08b7261e183ce4ed6",
codereview_new_python_data_6058
def test_sam_template(lambda_client, deploy_cfn_template): parameters={"FunctionName": func_name}, ) - lambda_client.get_waiter("function_active_v2").wait( - FunctionName=func_name - ) # TODO: fix cfn model instead - # run Lambda test invocation result = lambda_client.invoke(FunctionName=func_name) result = json.loads(to_str(result["Payload"].read())) TODO: Can be removed now def test_sam_template(lambda_client, deploy_cfn_template): parameters={"FunctionName": func_name}, ) # run Lambda test invocation result = lambda_client.invoke(FunctionName=func_name) result = json.loads(to_str(result["Payload"].read()))
codereview_new_python_data_6059
def call_lambda(function_arn: str, event: bytes, asynchronous: bool) -> str: lambda_client = aws_stack.connect_to_service( "lambda", region_name=extract_region_from_arn(function_arn) ) - lambda_client.get_waiter("function_active_v2").wait(FunctionName=function_arn) inv_result = lambda_client.invoke( FunctionName=function_arn, Payload=event, TODO: should be removed def call_lambda(function_arn: str, event: bytes, asynchronous: bool) -> str: lambda_client = aws_stack.connect_to_service( "lambda", region_name=extract_region_from_arn(function_arn) ) inv_result = lambda_client.invoke( FunctionName=function_arn, Payload=event,
codereview_new_python_data_6060
def test_lifecycle_nested_stack(cfn_client, deploy_cfn_template, s3_client, s3_c assert s3_client.head_bucket(Bucket=altered_nested_bucket_name) - cfn_client.delete_stack(StackName=stack.stack_name) def _assert_bucket_is_deleted(): try: FYI: there's a convenience function that you *can* use if you want ```python stack = deploy_cfn_template( ... ) stack.destroy() ``` def test_lifecycle_nested_stack(cfn_client, deploy_cfn_template, s3_client, s3_c assert s3_client.head_bucket(Bucket=altered_nested_bucket_name) + stack.destroy() def _assert_bucket_is_deleted(): try:
codereview_new_python_data_6061
def set_aws_account_id(account_id: str) -> None: def get_account_id_from_access_key_id(access_key_id: str) -> str: """Return the Account ID associated the Access Key ID.""" - # If AWS_ACCES_KEY_ID has a 12-digit integer value, use it as the account ID if re.match(r"\d{12}", access_key_id): return access_key_id ```suggestion # If AWS_ACCESS_KEY_ID has a 12-digit integer value, use it as the account ID ``` def set_aws_account_id(account_id: str) -> None: def get_account_id_from_access_key_id(access_key_id: str) -> str: """Return the Account ID associated the Access Key ID.""" + # If AWS_ACCESS_KEY_ID has a 12-digit integer value, use it as the account ID if re.match(r"\d{12}", access_key_id): return access_key_id
codereview_new_python_data_6062
def put_bucket_cors( expected_bucket_owner: AccountId = None, ) -> None: response = call_moto(context) - # max 100 rules - # validate CORS? see moto self.get_store().bucket_cors[bucket] = cors_configuration self._cors_handler.invalidate_cache() return response Is this a leftover? If not, could you describe what you mean with that comment? def put_bucket_cors( expected_bucket_owner: AccountId = None, ) -> None: response = call_moto(context) self.get_store().bucket_cors[bucket] = cors_configuration self._cors_handler.invalidate_cache() return response
codereview_new_python_data_6063
def publish_version(self, function_version: FunctionVersion): """ Synchronously create a function version (manager) Should only be called on publishing new versions, which basically clone an existing one. - The published version should already be contained in the lambda state. After successful completion of this method, the lambda version stored will be modified to be active, with a new revision id. It will then be active for execution, and should be retrieved again from the store before returning the data over the API. ```suggestion The new version needs to be added to the lambda store before invoking this. ``` def publish_version(self, function_version: FunctionVersion): """ Synchronously create a function version (manager) Should only be called on publishing new versions, which basically clone an existing one. + The new version needs to be added to the lambda store before invoking this. After successful completion of this method, the lambda version stored will be modified to be active, with a new revision id. It will then be active for execution, and should be retrieved again from the store before returning the data over the API.
codereview_new_python_data_6064
def do_execute(q): start_time = now(millis=True) process.start() try: - process_result: LocalExecutorResult = process_queue.get(timeout=20) except queue.Empty: process_result = LocalExecutorResult( "", ```suggestion process_result: LocalExecutorResult = process_queue.get(timeout=lambda_function.timeout or 20) ``` def do_execute(q): start_time = now(millis=True) process.start() try: + process_result: LocalExecutorResult = process_queue.get(timeout=lambda_function.timeout or 20) except queue.Empty: process_result = LocalExecutorResult( "",
codereview_new_python_data_6065
def _create(resource_id, resources, resource_type, func, stack_name): resource = resources[resource_id] props = resource["Properties"] - tag_list = props.get("Tags", []) - tag_map = {tag["Key"]: tag["Value"] for tag in tag_list} # TODO: add missing attributes result = client.create_rest_api( name=props["Name"], description=props.get("Description", ""), - tags=tag_map, ) body = props.get("Body") if body: small cosmetic nit: I'd inline `tag_list` on the dictionary comprehension and reword `tag_map` to `tags`, something around these lines: ```suggestion tags = {tag["Key"]: tag["Value"] for tag in props.get("Tags", [])} ``` def _create(resource_id, resources, resource_type, func, stack_name): resource = resources[resource_id] props = resource["Properties"] + tags = {tag["Key"]: tag["Value"] for tag in props.get("Tags", [])} # TODO: add missing attributes result = client.create_rest_api( name=props["Name"], description=props.get("Description", ""), + tags=tags, ) body = props.get("Body") if body:
codereview_new_python_data_6066
def post_object( try: response: PostResponse = call_moto(context=context) except ServiceException as e: - if e.code == "303": - response = PostResponse(StatusCode=303) else: raise e nit: should probably be ```suggestion if e.status_code == 303: ``` def post_object( try: response: PostResponse = call_moto(context=context) except ServiceException as e: + if e.status_code == 303: + # the parser did not succeed in parsing the moto respond, we start constructing the response ourselves + response = PostResponse(StatusCode=e.status_code) else: raise e
codereview_new_python_data_6067
def _extract_service_indicators(request: Request) -> _ServiceIndicators: "/2021-01-01": "opensearch", }, "sagemaker": { - "/endpoints/": "sagemaker-runtime", "/human-loops": "sagemaker-a2i-runtime", }, } Does it make a difference here if we add `"/endpoints/"` instead of `"/endpoints"` as discussed in the internal discussions? def _extract_service_indicators(request: Request) -> _ServiceIndicators: "/2021-01-01": "opensearch", }, "sagemaker": { + "/endpoints": "sagemaker-runtime", "/human-loops": "sagemaker-a2i-runtime", }, }
codereview_new_python_data_6068
def cmd_start(docker: bool, host: bool, no_banner: bool, detached: bool): try: bootstrap.start_infra_locally() except ImportError: raise click.ClickException( "It appears you have a light install of localstack which only supports running in docker\n" "If you would like to use --host, please reinstall localstack using `pip install localstack[runtime]`" ```suggestion except ImportError: if config.DEBUG: console.print_exception() raise click.ClickException( ``` def cmd_start(docker: bool, host: bool, no_banner: bool, detached: bool): try: bootstrap.start_infra_locally() except ImportError: + if config.DEBUG: + console.print_exception() raise click.ClickException( "It appears you have a light install of localstack which only supports running in docker\n" "If you would like to use --host, please reinstall localstack using `pip install localstack[runtime]`"
codereview_new_python_data_6069
def get_cfn_attribute(self, attribute_name): if attribute_name == "WebsiteURL": bucket_name = self.props.get("BucketName") - return f"http://{bucket_name}.{S3_STATIC_WEBSITE_HOSTNAME}" return super(S3Bucket, self).get_cfn_attribute(attribute_name) nit: technically the s3 bucket could also have an https URL. def get_cfn_attribute(self, attribute_name): if attribute_name == "WebsiteURL": bucket_name = self.props.get("BucketName") + return f"https://{bucket_name}.{S3_STATIC_WEBSITE_HOSTNAME}" return super(S3Bucket, self).get_cfn_attribute(attribute_name)
codereview_new_python_data_6070
class NoSuchVersionException(PackageException): pass -class UnsupportedOSException(PackageException): - """Exception indicating that the requested package does not exist for the used operating system""" - - pass - - -class UnsupportedArchException(PackageException): - """Exception indicating that the requested package does not exist for the used architecture""" - - pass - - class InstallTarget(Enum): """ Different installation targets. I think these aren't really necessary. `SystemNotSupportedException` in `localstack.packages.core` seems to for that purpose (since they are only used for the lambda go runtime and these specific exceptions aren't caught anywhere). class NoSuchVersionException(PackageException): pass class InstallTarget(Enum): """ Different installation targets.
codereview_new_python_data_6071
def _install(self, target: InstallTarget) -> None: ) -class PermissionDownloadInstaller(DownloadInstaller): - def _get_download_url(self) -> str: - raise NotImplementedError() - def _install(self, target: InstallTarget) -> None: super()._install(target) chmod_r(self.get_executable_path(), 0o777) This function should be removed. Instead the `PermissionDownloadInstaller` should (additionally) extend `ABC`. def _install(self, target: InstallTarget) -> None: ) +class PermissionDownloadInstaller(DownloadInstaller, ABC): def _install(self, target: InstallTarget) -> None: super()._install(target) chmod_r(self.get_executable_path(), 0o777)
codereview_new_python_data_6072
def _install(self, target: InstallTarget) -> None: terraform_package = TerraformPackage() -TERRAFORM_BIN = terraform_package.get_installer().get_executable_path() This line needs to be removed. The binary location needs to be evaluated at runtime when the binary is needed (or at least after it is ensured that it has been explicitly installed before), because with our new fallback mechanism the installation location is determined at the time of the installation. Now, when terraform is not already installed at the time of the evaluation (i.e. when the file is imported the first time), this constant will have the value `None` and will stay `None`. def _install(self, target: InstallTarget) -> None: terraform_package = TerraformPackage()
codereview_new_python_data_6073
def is_none_or_empty(obj: Union[Optional[str], Optional[list]]) -> bool: def select_from_typed_dict(typed_dict: Type[TypedDict], obj: Dict, filter: bool = False) -> Dict: - """Select a subset of attributes from a dictionary based on the keys of a given `TypedDict`""" selection = select_attributes( obj, [*typed_dict.__required_keys__, *typed_dict.__optional_keys__] ) nit: maybe we could add proper pydoc here to also disambiguate the `filter` term def is_none_or_empty(obj: Union[Optional[str], Optional[list]]) -> bool: def select_from_typed_dict(typed_dict: Type[TypedDict], obj: Dict, filter: bool = False) -> Dict: + """ + Select a subset of attributes from a dictionary based on the keys of a given `TypedDict`. + :param typed_dict: the `TypedDict` blueprint + :param obj: the object to filter + :param filter: if True, remove all keys with an empty (e.g., empty string or dictionary) or `None` value + :return: the resulting dictionary (it returns a copy) + """ selection = select_attributes( obj, [*typed_dict.__required_keys__, *typed_dict.__optional_keys__] )
codereview_new_python_data_6074
class FunctionUrlConfig: class VersionFunctionConfiguration: # fields # name: str - function_arn: str # TODO:? description: str role: str timeout: int Out of curiosity, what's the required change (TODO) here? class FunctionUrlConfig: class VersionFunctionConfiguration: # fields # name: str description: str role: str timeout: int
codereview_new_python_data_6075
def store_s3_bucket_archive( account_id: str, ) -> S3Code: """ - Takes the lambda archive stored in the given bucket and stores it in an internal s4 bucket :param archive_bucket: Bucket the archive is stored in :param archive_key: Key the archive is stored under ```suggestion Takes the lambda archive stored in the given bucket and stores it in an internal S3 bucket ``` def store_s3_bucket_archive( account_id: str, ) -> S3Code: """ + Takes the lambda archive stored in the given bucket and stores it in an internal s3 bucket :param archive_bucket: Bucket the archive is stored in :param archive_key: Key the archive is stored under
codereview_new_python_data_6076
def qualifier_exists(self, qualifier): @property def envvars(self): return self._envvars @envvars.setter It would be nice if you can add a docstring here (we use reStructuredText usually) to document this condition. def qualifier_exists(self, qualifier): @property def envvars(self): + """Get the environment variables for the function. + + When setting the environment variables, perform the following + validations: + + - environment variables must be less than 4KiB in size + """ return self._envvars @envvars.setter
codereview_new_python_data_6077
def _assert_transcript(): content = to_str(data["Body"].read()) assert "hello my name is" in content - retry(_assert_transcript, retries=10, sleep=3) def test_transcribe_unsupported_media_format_failure( self, transcribe_client, transcribe_create_job I fear this will most likely end up being flaky, similar to `test_transcribe_happy_path()`. In the happy_path test we upped the timeout to 100sec, while here we only have a 30sec total. Will the VOSK model be reused across tests? This would save significant time def _assert_transcript(): content = to_str(data["Body"].read()) assert "hello my name is" in content + retry(_assert_transcript, retries=30, sleep=2) def test_transcribe_unsupported_media_format_failure( self, transcribe_client, transcribe_create_job
codereview_new_python_data_6078
) STEPFUNCTIONS_ZIP_URL = "https://s3.amazonaws.com/stepfunctionslocal/StepFunctionsLocal.zip" KMS_URL_PATTERN = "https://s3-eu-west-2.amazonaws.com/local-kms/3/local-kms_<arch>.bin" -FFMPEG_STATIC_BIN_URL = ( - "https://www.johnvansickle.com/ffmpeg/old-releases/ffmpeg-{version}-{arch}-static.tar.xz" -) # API endpoint for analytics events API_ENDPOINT = os.environ.get("API_ENDPOINT") or "https://api.localstack.cloud/v1" nit: Since the installation is done via an installer, I don't think it's necessary to add this URL to the constants, but I would rather add it directly to the `packages.py` instead. ) STEPFUNCTIONS_ZIP_URL = "https://s3.amazonaws.com/stepfunctionslocal/StepFunctionsLocal.zip" KMS_URL_PATTERN = "https://s3-eu-west-2.amazonaws.com/local-kms/3/local-kms_<arch>.bin" # API endpoint for analytics events API_ENDPOINT = os.environ.get("API_ENDPOINT") or "https://api.localstack.cloud/v1"
codereview_new_python_data_6079
def test_lambda_cache_local( second_invoke_result = lambda_client.invoke(FunctionName=func_name) snapshot.match("second_invoke_result", second_invoke_result) - @pytest.mark.skip_snapshot_verify( - condition=is_old_provider, - paths=["$..FunctionError", "$..LogResult", "$..Payload", "$..Layers"], - ) @pytest.mark.skipif(is_old_provider(), reason="old provider") @pytest.mark.aws_validated def test_lambda_invoke_with_timeout( NIT: one of the markers can probably be removed? `skipif(is_old_provider()...)` and `skip_snapshot_verify(condtion=is_old_provider,...)` def test_lambda_cache_local( second_invoke_result = lambda_client.invoke(FunctionName=func_name) snapshot.match("second_invoke_result", second_invoke_result) @pytest.mark.skipif(is_old_provider(), reason="old provider") @pytest.mark.aws_validated def test_lambda_invoke_with_timeout(
codereview_new_python_data_6080
def run_app_sync(*args, loop=None, shutdown_event=None): class ProxyThread(FuncThread): def __init__(self): - FuncThread.__init__(self, self.run_proxy, None, name="proxy-thread") # TODO self.shutdown_event = None self.loop = None Seems like a TODO slipped through here? def run_app_sync(*args, loop=None, shutdown_event=None): class ProxyThread(FuncThread): def __init__(self): + FuncThread.__init__(self, self.run_proxy, None, name="proxy-thread") self.shutdown_event = None self.loop = None
codereview_new_python_data_6081
def stop(self, quiet: bool = False) -> None: def start_thread(method, *args, **kwargs) -> FuncThread: # TODO: find all usages and add names... """Start the given method in a background thread, and add the thread to the TMP_THREADS shutdown hook""" _shutdown_hook = kwargs.pop("_shutdown_hook", True) - # if not kwargs.get("name"): - # print("oh no") kwargs.setdefault("name", method.__name__) thread = FuncThread(method, *args, **kwargs) thread.start() Oh no :P def stop(self, quiet: bool = False) -> None: def start_thread(method, *args, **kwargs) -> FuncThread: # TODO: find all usages and add names... """Start the given method in a background thread, and add the thread to the TMP_THREADS shutdown hook""" _shutdown_hook = kwargs.pop("_shutdown_hook", True) + if not kwargs.get("name"): + LOG.debug("start_thread called without providing a custom name") # technically we should add a new level here for *internal* warnings kwargs.setdefault("name", method.__name__) thread = FuncThread(method, *args, **kwargs) thread.start()
codereview_new_python_data_6082
def run_app_sync(*args, loop=None, shutdown_event=None): class ProxyThread(FuncThread): def __init__(self): - FuncThread.__init__(self, self.run_proxy, None, name="proxy-thread") # TODO self.shutdown_event = None self.loop = None What's TODO here? def run_app_sync(*args, loop=None, shutdown_event=None): class ProxyThread(FuncThread): def __init__(self): + FuncThread.__init__(self, self.run_proxy, None, name="proxy-thread") self.shutdown_event = None self.loop = None
codereview_new_python_data_6083
def _create_apigateway_function(*args, **kwargs): @pytest.fixture -def import_apigateway_function(apigateway_client): rest_api_ids = [] def _import_apigateway_function(*args, **kwargs): nit: I guess the same could apply to shortening the name of this fixture.. 😅 (don't bother for this PR, though - could be refactored in a future iteration) def _create_apigateway_function(*args, **kwargs): @pytest.fixture +def import_apigw(apigateway_client): rest_api_ids = [] def _import_apigateway_function(*args, **kwargs):
codereview_new_python_data_6084
def inline_policy_unapply_policy(fn, self, backend): except Exception: # Actually role can be deleted before policy being deleted in cloudformation pass - - @patch(IAMBackend.create_policy) - def clean_policy_document_from_no_values( - fn, self, description, path, policy_document, policy_name, tags - ): - # Sometime CDK adds this resources to the policy doc that should be ignored - doc = json.loads(policy_document) - - def _remove_no_values(statement): - if isinstance(statement["Resource"], list): - statement["Resource"] = [ - statement_resource - for statement_resource in statement["Resource"] - if statement_resource != "__aws_no_value__" - ] - return statement - - if isinstance(doc["Statement"], list): - doc["Statement"] = [_remove_no_values(statement) for statement in doc["Statement"]] - return fn(self, description, path, json.dumps(doc), policy_name, tags) In my opinion, this should be part of the cloudformation model, not of IAM itself, since this is a CDK/cloudformation specific value, which has no notion in IAM itself, right? I would prefer not to mix those things here (unless there is indication IAM itself does this?) def inline_policy_unapply_policy(fn, self, backend): except Exception: # Actually role can be deleted before policy being deleted in cloudformation pass
codereview_new_python_data_6085
import pytest -# TODO fix service so it returns the stream mode @pytest.mark.aws_validated @pytest.mark.skip_snapshot_verify(paths=["$..StreamDescription.StreamModeDetails"]) def test_stream_creation(kinesis_client, deploy_cfn_template, snapshot): ```suggestion ``` The paths in `skips_snapshot_verify` already cover this, so the extra TODO is redundant :+1: import pytest @pytest.mark.aws_validated @pytest.mark.skip_snapshot_verify(paths=["$..StreamDescription.StreamModeDetails"]) def test_stream_creation(kinesis_client, deploy_cfn_template, snapshot):
codereview_new_python_data_6086
def factory(**kwargs): @pytest.fixture def transcribe_create_job(transcribe_client, s3_client, s3_bucket): def _create_job(**kwargs): if "TranscriptionJobName" not in kwargs: kwargs["TranscriptionJobName"] = f"test-transcribe-{short_uid()}" if "LanguageCode" not in kwargs: kwargs["LanguageCode"] = "en-GB" if "Media" not in kwargs: - test_key = "test-clip.wav" kwargs["Media"] = {"MediaFileUri": f"s3://{s3_bucket}/{test_key}"} # upload test wav to a s3 bucket `test_key` must be defined outside of the `if` block to avoid potential `NameError` on line 693 def factory(**kwargs): @pytest.fixture def transcribe_create_job(transcribe_client, s3_client, s3_bucket): def _create_job(**kwargs): + test_key = "test-clip.wav" + if "TranscriptionJobName" not in kwargs: kwargs["TranscriptionJobName"] = f"test-transcribe-{short_uid()}" if "LanguageCode" not in kwargs: kwargs["LanguageCode"] = "en-GB" if "Media" not in kwargs: kwargs["Media"] = {"MediaFileUri": f"s3://{s3_bucket}/{test_key}"} # upload test wav to a s3 bucket
codereview_new_python_data_6087
def factory(**kwargs): @pytest.fixture def transcribe_create_job(transcribe_client, s3_client, s3_bucket): def _create_job(**kwargs): if "TranscriptionJobName" not in kwargs: kwargs["TranscriptionJobName"] = f"test-transcribe-{short_uid()}" if "LanguageCode" not in kwargs: kwargs["LanguageCode"] = "en-GB" if "Media" not in kwargs: - test_key = "test-clip.wav" kwargs["Media"] = {"MediaFileUri": f"s3://{s3_bucket}/{test_key}"} # upload test wav to a s3 bucket It would be clearer if the function args are explicit with defaults def factory(**kwargs): @pytest.fixture def transcribe_create_job(transcribe_client, s3_client, s3_bucket): def _create_job(**kwargs): + test_key = "test-clip.wav" + if "TranscriptionJobName" not in kwargs: kwargs["TranscriptionJobName"] = f"test-transcribe-{short_uid()}" if "LanguageCode" not in kwargs: kwargs["LanguageCode"] = "en-GB" if "Media" not in kwargs: kwargs["Media"] = {"MediaFileUri": f"s3://{s3_bucket}/{test_key}"} # upload test wav to a s3 bucket
codereview_new_python_data_6088
def delete_change_set( change_set.stack.change_sets = [ cs for cs in change_set.stack.change_sets - if (cs.change_set_name != change_set_name and cs.change_set_id != change_set_name) ] return DeleteChangeSetOutput() tiny nit: could be simplified a bit ```suggestion if change_set_name not in (cs.change_set_name, cs.change_set_id) ``` def delete_change_set( change_set.stack.change_sets = [ cs for cs in change_set.stack.change_sets + if change_set_name not in (cs.change_set_name, cs.change_set_id) ] return DeleteChangeSetOutput()
codereview_new_python_data_6089
def test_default_logging_configuration( assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 result = stepfunctions_client.describe_state_machine(stateMachineArn=result["stateMachineArn"]) assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 - assert result["loggingConfiguration"] assert result["loggingConfiguration"] == {"level": "OFF", "includeExecutionData": False} # clean up unsafe cleanup This obv. isn't super critical when running against LocalStack but a test against AWS might leak resources here. Either fixtures or other safer cleanup strategies should be used instead. A finally block would be ok too but would again need to wrap the individual cleanup statements in a safe way so they don't interfere with each other. def test_default_logging_configuration( assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 result = stepfunctions_client.describe_state_machine(stateMachineArn=result["stateMachineArn"]) assert result["ResponseMetadata"]["HTTPStatusCode"] == 200 assert result["loggingConfiguration"] == {"level": "OFF", "includeExecutionData": False} # clean up
codereview_new_python_data_6090
def test_report_batch_item_failures_invalid_result_json_batch_fails( assert "Messages" in first_invocation snapshot.match("first_invocation", first_invocation) - # now wait for the second invocation result which second_invocation = sqs_client.receive_message( QueueUrl=destination_url, WaitTimeSeconds=15, MaxNumberOfMessages=1 ) ```suggestion # now wait for the second invocation result which (?) ``` def test_report_batch_item_failures_invalid_result_json_batch_fails( assert "Messages" in first_invocation snapshot.match("first_invocation", first_invocation) + # now wait for the second invocation result, which should be a retry of the first second_invocation = sqs_client.receive_message( QueueUrl=destination_url, WaitTimeSeconds=15, MaxNumberOfMessages=1 )
codereview_new_python_data_6091
def match(self, key: str, obj: dict) -> None: self.called_keys.add(key) # order the obj to guarantee reference replacement works as expected - self.observed_state[key] = ( - self._order_dict(obj) if isinstance(obj, dict) else obj - ) # type is not enforced, we already have some tests using list-objects # TODO: track them separately since the transformation is now done *just* before asserting if not self.update and (not self.recorded_state or not self.recorded_state.get(key)): nit: I think you don't need the first `isinstance` anymore as it checks it in the function now def match(self, key: str, obj: dict) -> None: self.called_keys.add(key) # order the obj to guarantee reference replacement works as expected + self.observed_state[key] = self._order_dict(obj) # TODO: track them separately since the transformation is now done *just* before asserting if not self.update and (not self.recorded_state or not self.recorded_state.get(key)):
codereview_new_python_data_6092
def send_notification_for_subscriber( try: events_client.put_events(Entries=[entry]) except Exception as e: - LOGGER.warning( - f'Unable to send notification for S3 bucket "{bucket_name}" to EventBridge', e ) if not filter(lambda x: notification.get(x), NOTIFICATION_DESTINATION_TYPES): maybe we could log this using the exception level ```suggestion LOGGER.exception( 'Unable to send notification for S3 bucket "%s" to EventBridge', bucket_name ) ``` def send_notification_for_subscriber( try: events_client.put_events(Entries=[entry]) except Exception as e: + LOGGER.exception( + 'Unable to send notification for S3 bucket "%s" to EventBridge', bucket_name ) if not filter(lambda x: notification.get(x), NOTIFICATION_DESTINATION_TYPES):
codereview_new_python_data_6093
from jsonpath_ng.ext import parse SNAPSHOT_LOGGER = logging.getLogger(__name__) -SNAPSHOT_LOGGER.setLevel(logging.DEBUG if os.environ.get("SNAPSHOT_DEBUG") else logging.WARNING) # Types i think `SNAPSHOT_DEBUG` is better than `DEBUG_SNAPSHOT` (since the first specifies the namepsace of the property), however we already have `DEBUG_ANALYTICS`, `DEBUG_PLUGINS`, so for the sake of consistency it would maybe be good to rename the variable. from jsonpath_ng.ext import parse SNAPSHOT_LOGGER = logging.getLogger(__name__) +SNAPSHOT_LOGGER.setLevel(logging.DEBUG if os.environ.get("DEBUG_SNAPSHOT") else logging.WARNING) # Types
codereview_new_python_data_6094
def invoke(self, api_context: ApiInvocationContext): EVENTBRIDGE_PUTEVENTS = "EventBridge-PutEvents", False SQS_SENDMESSAGE = "SQS-SendMessage", False - SQS_RECEIVESMESSAGE = "SQS-ReceiveMessage", False SQS_DELETEMESSAGE = "SQS-DeleteMessage", False SQS_PURGEQUEUE = "SQS-PurgeQueue", False APPCONFIG_GETCONFIGURATION = "AppConfig-GetConfiguration", False ```suggestion SQS_RECEIVEMESSAGE = "SQS-ReceiveMessage", False ``` def invoke(self, api_context: ApiInvocationContext): EVENTBRIDGE_PUTEVENTS = "EventBridge-PutEvents", False SQS_SENDMESSAGE = "SQS-SendMessage", False + SQS_RECEIVEMESSAGE = "SQS-ReceiveMessage", False SQS_DELETEMESSAGE = "SQS-DeleteMessage", False SQS_PURGEQUEUE = "SQS-PurgeQueue", False APPCONFIG_GETCONFIGURATION = "AppConfig-GetConfiguration", False
codereview_new_python_data_6095
import pytest -from localstack.utils.common import retry -from localstack.utils.common import safe_requests as requests -from localstack.utils.common import short_uid THIS_FOLDER = os.path.dirname(os.path.realpath(__file__)) TEST_LAMBDA_PYTHON_TRIGGERED_S3 = os.path.join( Please do not import directly from .common, but from the respective modules in localstack.utils. For retry, this would be localstack.utils.sync for example. We should change those imports whenever we run across them. import pytest +from localstack.utils.http import safe_requests as requests +from localstack.utils.strings import short_uid +from localstack.utils.sync import retry THIS_FOLDER = os.path.dirname(os.path.realpath(__file__)) TEST_LAMBDA_PYTHON_TRIGGERED_S3 = os.path.join(
codereview_new_python_data_6096
def cmd_logs(follow: bool): if not DOCKER_CLIENT.is_container_running(container_name): console.print("localstack container not running") - with open(logfile) as fd: - for line in fd: - console.print(line.rstrip("\n\r")) sys.exit(1) if follow: We should guard against cases where the log file does not exist def cmd_logs(follow: bool): if not DOCKER_CLIENT.is_container_running(container_name): console.print("localstack container not running") + if os.path.exists(logfile): + console.print("printing logs from previous run") + with open(logfile) as fd: + for line in fd: + click.echo(line, nl=False) sys.exit(1) if follow:
codereview_new_python_data_6111
def test_contains_product(self): ranges = models.Range.objects.contains_product(self.prod) self.assertEqual(ranges.count(), 2, "Both ranges should contain the product") - def test_not_contains_non_public_product(self): - product = create_product() - self.range.add_product(product) - product.is_public = False - product.save() - self.assertFalse(product in self.range.all_products()) - self.range.remove_product(product) - def test_excluded_product(self): ranges = models.Range.objects.contains_product(self.excludedprod) self.assertEqual( you don't need to remove the product here, it's just test data def test_contains_product(self): ranges = models.Range.objects.contains_product(self.prod) self.assertEqual(ranges.count(), 2, "Both ranges should contain the product") def test_excluded_product(self): ranges = models.Range.objects.contains_product(self.excludedprod) self.assertEqual(
codereview_new_python_data_6112
def products(self): return Product.objects.none() queryset = self.condition.range.all_products() - return queryset.filter(is_discountable=True).exclude( - structure=Product.CHILD).browsable() @cached_property def combined_offers(self): En then you can remove the .exclude since this is also done in browsable :) def products(self): return Product.objects.none() queryset = self.condition.range.all_products() + return queryset.filter(is_discountable=True).browsable() @cached_property def combined_offers(self):
codereview_new_python_data_6113
DEFAULT_HASHING_ALGORITHM = 'sha1' TEST_RUNNER = 'django.test.runner.DiscoverRunner' FIXTURE_DIRS = [location('unit/fixtures')] - -# Try and import local settings which can be used to override any of the above. -try: - from tests.settings_local import * -except ImportError: - pass I think you committed this by accident? DEFAULT_HASHING_ALGORITHM = 'sha1' TEST_RUNNER = 'django.test.runner.DiscoverRunner' FIXTURE_DIRS = [location('unit/fixtures')]
codereview_new_python_data_6116
class _LGBMRegressorBase: # type: ignore _LGBMBaseCrossValidator = None _LGBMLabelEncoder = None - _LGBMBaseCrossValidator = None LGBMNotFittedError = ValueError _LGBMStratifiedKFold = None _LGBMGroupKFold = None This is already included above ```suggestion ``` class _LGBMRegressorBase: # type: ignore _LGBMBaseCrossValidator = None _LGBMLabelEncoder = None LGBMNotFittedError = ValueError _LGBMStratifiedKFold = None _LGBMGroupKFold = None
codereview_new_python_data_6117
This script checks that LightGBM library is linked to the appropriate symbol versions. Linking to newer symbol versions at compile time is problematic because it could result -in built artifacts being unusable on older platforms that LightGBM users are on. Version history for these symbols can be found at the following: ```suggestion in built artifacts being unusable on older platforms. ``` This script checks that LightGBM library is linked to the appropriate symbol versions. Linking to newer symbol versions at compile time is problematic because it could result +in built artifacts being unusable on older platforms. Version history for these symbols can be found at the following:
codereview_new_python_data_6118
def fit( feature_name='auto', categorical_feature='auto', callbacks=None, - init_model: Optional[Union[str, Path, Booster, LGBMModel]] = None ): """Docstring is set after definition, using a template.""" params = self._process_params(stage="fit") ```suggestion init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None ``` def fit( feature_name='auto', categorical_feature='auto', callbacks=None, + init_model: Optional[Union[str, Path, Booster, "LGBMModel"]] = None ): """Docstring is set after definition, using a template.""" params = self._process_params(stage="fit")
codereview_new_python_data_6119
def dummy_metric(_, __): "INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!" ] cuda_exp_lines = [ - "INFO | [LightGBM] [Warning] Using sparse features with CUDA is currently not supported.", "INFO | [LightGBM] [Warning] Objective binary is not implemented in cuda_exp version. Fall back to boosting on CPU.", "INFO | [LightGBM] [Warning] Metric auc is not implemented in cuda_exp version. Fall back to evaluation on CPU.", "INFO | [LightGBM] [Warning] Metric binary_error is not implemented in cuda_exp version. Fall back to evaluation on CPU.", Already in `gpu_lines`. ```suggestion ``` def dummy_metric(_, __): "INFO | [LightGBM] [Info] LightGBM using CUDA trainer with DP float!!" ] cuda_exp_lines = [ "INFO | [LightGBM] [Warning] Objective binary is not implemented in cuda_exp version. Fall back to boosting on CPU.", "INFO | [LightGBM] [Warning] Metric auc is not implemented in cuda_exp version. Fall back to evaluation on CPU.", "INFO | [LightGBM] [Warning] Metric binary_error is not implemented in cuda_exp version. Fall back to evaluation on CPU.",
codereview_new_python_data_6120
def test_chunked_dataset_linear(): def test_save_dataset_subset_and_load_from_file(tmp_path): data = np.random.rand(100, 2) - ds = lgb.Dataset(data) ds.subset([1, 2, 3, 5, 8]).save_binary(tmp_path / 'subset.bin') - lgb.Dataset(tmp_path / 'subset.bin').construct() def test_subset_group(): Could you please try overriding the defaults (e.g. setting `max_bin: 7` or something) and check that those non-default values survive the round trip to disk? I think that would increase our confidence that this is working as expected. Otherwise, I think a bug of the form "all parameter values are lost when writing to binary Dataset file" could make it through this test. def test_chunked_dataset_linear(): def test_save_dataset_subset_and_load_from_file(tmp_path): data = np.random.rand(100, 2) + params = {'max_bin': 50, 'min_data_in_bin': 10} + ds = lgb.Dataset(data, params=params) ds.subset([1, 2, 3, 5, 8]).save_binary(tmp_path / 'subset.bin') + lgb.Dataset(tmp_path / 'subset.bin', params=params).construct() def test_subset_group():
codereview_new_python_data_6129
def _make_n_folds( def _agg_cv_result( - raw_results: List[Tuple[str, str, float, bool]] ) -> List[Tuple[str, str, float, bool, float]]: """Aggregate cross-validation results.""" cvmap = collections.OrderedDict() You can see two `for` loops iterating over two lists here: https://github.com/microsoft/LightGBM/blob/706540483a142efdb5be4d2670c1ebd75704ce7b/python-package/lightgbm/engine.py#L372-L373 https://github.com/microsoft/LightGBM/blob/706540483a142efdb5be4d2670c1ebd75704ce7b/python-package/lightgbm/engine.py#L594 I think inner list is from `eval_valid()` https://github.com/microsoft/LightGBM/blob/706540483a142efdb5be4d2670c1ebd75704ce7b/python-package/lightgbm/basic.py#L3261 https://github.com/microsoft/LightGBM/blob/706540483a142efdb5be4d2670c1ebd75704ce7b/python-package/lightgbm/basic.py#L3285-L3288 and the outer one is from `__getattr__()`: https://github.com/microsoft/LightGBM/blob/706540483a142efdb5be4d2670c1ebd75704ce7b/python-package/lightgbm/engine.py#L297 https://github.com/microsoft/LightGBM/blob/706540483a142efdb5be4d2670c1ebd75704ce7b/python-package/lightgbm/engine.py#L301-L304 ```suggestion raw_results: List[List[Tuple[str, str, float, bool]]] ``` def _make_n_folds( def _agg_cv_result( + raw_results: List[List[Tuple[str, str, float, bool]]] ) -> List[Tuple[str, str, float, bool, float]]: """Aggregate cross-validation results.""" cvmap = collections.OrderedDict()
codereview_new_python_data_6132
def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_va # avoid side effects on passed-in parameters params = deepcopy(params) - aliases = set(_ConfigAliases.get(main_param_name)) - {main_param_name} # if main_param_name was provided, keep that value and remove all aliases if main_param_name in params.keys(): This is already `set` https://github.com/microsoft/LightGBM/blob/2f5baa3d39efb518cd13a7932fe4d8602c36762f/python-package/lightgbm/basic.py#L369 ```suggestion aliases = _ConfigAliases.get(main_param_name) - {main_param_name} ``` def _choose_param_value(main_param_name: str, params: Dict[str, Any], default_va # avoid side effects on passed-in parameters params = deepcopy(params) + aliases = _ConfigAliases.get(main_param_name) - {main_param_name} # if main_param_name was provided, keep that value and remove all aliases if main_param_name in params.keys():
codereview_new_python_data_6133
def test_no_copy_when_single_float_dtype_dataframe(dtype, feature_name): assert np.shares_memory(X, built_data) -@pytest.mark.parametrize('feature_name', [[42], 'auto']) def test_categorical_code_conversion_doesnt_modify_original_data(feature_name): pd = pytest.importorskip('pandas') X = np.random.choice(['a', 'b'], 100).reshape(-1, 1) - column_name = 42 # use int as name - assert [column_name] == feature_name or feature_name == 'auto' df = pd.DataFrame(X.copy(), columns=[column_name], dtype='category') data = lgb.basic._data_from_pandas(df, feature_name, None, None)[0] # check that the original data wasn't modified ```suggestion @pytest.mark.parametrize('feature_name', [['x1'], [42], 'auto']) ``` Instead of replacing a string with a number here, would you consider *adding* a case here? That way, this test would still cover the case "`feature_name` provided, and names are strings". def test_no_copy_when_single_float_dtype_dataframe(dtype, feature_name): assert np.shares_memory(X, built_data) +@pytest.mark.parametrize('feature_name', [['x1'], [42], 'auto']) def test_categorical_code_conversion_doesnt_modify_original_data(feature_name): pd = pytest.importorskip('pandas') X = np.random.choice(['a', 'b'], 100).reshape(-1, 1) + column_name = 'a' if feature_name == 'auto' else feature_name[0] df = pd.DataFrame(X.copy(), columns=[column_name], dtype='category') data = lgb.basic._data_from_pandas(df, feature_name, None, None)[0] # check that the original data wasn't modified
codereview_new_python_data_6234
def get_bonus(self, stability: float) -> float: return self.value if stability >= self.required_stability else 0.0 -def adjust_direction(threshold: float, current: float, target) -> float: """ Returns -1, 0 or 1, depending on whether compared to current, target drops below, is on the same side or goes above the threshold. ```suggestion def adjust_direction(threshold: float, current: float, target) -> int: ``` def get_bonus(self, stability: float) -> float: return self.value if stability >= self.required_stability else 0.0 +def adjust_direction(threshold: float, current: float, target) -> int: """ Returns -1, 0 or 1, depending on whether compared to current, target drops below, is on the same side or goes above the threshold.
codereview_new_python_data_6235
def set_have_nest(): def have_computronium() -> bool: - """Do we have a planet with a computronium moon, which is set to research focus?""" return _get_planet_catalog().have_computronium def computronium_candidates() -> List[PlanetId]: - """Returns list of own planets that have a computronium moon and a species capable of research.""" return _get_planet_catalog().pids_computronium def have_honeycomb() -> bool: - """Do we have a planet with the honeycomb special, which is set to industry focus?""" return _get_planet_catalog().have_honeycomb def honeycomb_candidates() -> List[PlanetId]: - """Returns list of own planets that have the honeycomb special and a species capable of production.""" return _get_planet_catalog().pids_honeycomb ```suggestion """Return True if we have a planet with a computronium moon, which is set to research focus.""" ``` def set_have_nest(): def have_computronium() -> bool: + """Return True if we have a planet with a computronium moon, which is set to research focus.""" return _get_planet_catalog().have_computronium def computronium_candidates() -> List[PlanetId]: + """Return list of own planets that have a computronium moon and a species capable of research.""" return _get_planet_catalog().pids_computronium def have_honeycomb() -> bool: + """Return True if we have a planet with a honeycomb special, which is set to production focus.""" return _get_planet_catalog().have_honeycomb def honeycomb_candidates() -> List[PlanetId]: + """Return list of own planets that have the honeycomb special and a species capable of production.""" return _get_planet_catalog().pids_honeycomb
codereview_new_python_data_6236
def set_have_nest(): def have_computronium() -> bool: - """Do we have a planet with a computronium moon, which is set to research focus?""" return _get_planet_catalog().have_computronium def computronium_candidates() -> List[PlanetId]: - """Returns list of own planets that have a computronium moon and a species capable of research.""" return _get_planet_catalog().pids_computronium def have_honeycomb() -> bool: - """Do we have a planet with the honeycomb special, which is set to industry focus?""" return _get_planet_catalog().have_honeycomb def honeycomb_candidates() -> List[PlanetId]: - """Returns list of own planets that have the honeycomb special and a species capable of production.""" return _get_planet_catalog().pids_honeycomb ```suggestion """Return list of own planets that have the honeycomb special and a species capable of production.""" ``` def set_have_nest(): def have_computronium() -> bool: + """Return True if we have a planet with a computronium moon, which is set to research focus.""" return _get_planet_catalog().have_computronium def computronium_candidates() -> List[PlanetId]: + """Return list of own planets that have a computronium moon and a species capable of research.""" return _get_planet_catalog().pids_computronium def have_honeycomb() -> bool: + """Return True if we have a planet with a honeycomb special, which is set to production focus.""" return _get_planet_catalog().have_honeycomb def honeycomb_candidates() -> List[PlanetId]: + """Return list of own planets that have the honeycomb special and a species capable of production.""" return _get_planet_catalog().pids_honeycomb
codereview_new_python_data_6237
def set_planet_industry_research_influence_foci(focus_manager, priority_ratio): # xy = y output when focus x, p for production(INDUSTRY), c for current pp, pr, pi = pinfo.possible_output[INDUSTRY] rp, rr, ri = pinfo.possible_output[RESEARCH] # calculate factor F at which pp + F * pr == rp + F * rr =====> F = ( pp-rp ) / (rr-pr) factor = (pp - rp) / max(0.01, rr - pr) # Races much better at producing shouldn't switch too early, better produce the history analyzer quickly I am in the process of re-writing ResourceAI, this line will be replaced anyway. def set_planet_industry_research_influence_foci(focus_manager, priority_ratio): # xy = y output when focus x, p for production(INDUSTRY), c for current pp, pr, pi = pinfo.possible_output[INDUSTRY] rp, rr, ri = pinfo.possible_output[RESEARCH] + ip, ir, ii = pinfo.possible_output[INFLUENCE] # calculate factor F at which pp + F * pr == rp + F * rr =====> F = ( pp-rp ) / (rr-pr) factor = (pp - rp) / max(0.01, rr - pr) # Races much better at producing shouldn't switch too early, better produce the history analyzer quickly
codereview_new_python_data_6242
class Bonus(NamedTuple): def get_bonus(self, stability: float) -> float: if not self.available: return 0.0 - return self.value if stability > self.min_stability else 0.0 ```suggestion if not self.available or stability < self.min_stability: return 0.0 return self.value ``` class Bonus(NamedTuple): def get_bonus(self, stability: float) -> float: if not self.available: return 0.0 + return self.value if stability >= self.min_stability else 0.0
codereview_new_python_data_6243
class Tags: # </editor-fold> # <editor-fold desc="Industry boosting specials"> -# modified = affected by species multiplier industry_boost_specials_modified = { "TIDAL_LOCK_SPECIAL", } industry_boost_specials_unmodified = { "CRYSTALS_SPECIAL", "ELERIUM_SPECIAL", Maybe use `fixed`/`flat` and `multiplier` terms? class Tags: # </editor-fold> # <editor-fold desc="Industry boosting specials"> +# Each adds INDUSTRY_PER_POP before production is multiplied by species skill modifier industry_boost_specials_modified = { "TIDAL_LOCK_SPECIAL", } +# Each adds INDUSTRY_PER_POP after all multipliers have been applied industry_boost_specials_unmodified = { "CRYSTALS_SPECIAL", "ELERIUM_SPECIAL",
codereview_new_python_data_6261
def test_s2nd_falls_back_to_full_connection(managed_process, tmp_path, cipher, c @pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name) @pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name) @pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name) -@pytest.mark.parametrize("protocol", [p for p in PROTOCOLS if p != Protocols.TLS13], ids=get_parameter_name) @pytest.mark.parametrize("provider", [OpenSSL, S2N], ids=get_parameter_name) @pytest.mark.parametrize("other_provider", [S2N], ids=get_parameter_name) def test_session_resumption_s2n_client_tls13_server_not_tls13(managed_process, cipher, curve, protocol, provider, other_provider, certificate): Nit: A theoretical TLS1.4 would build off of TLS1.3, not an earlier version. ```suggestion @pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name) @pytest.mark.parametrize("protocol", [p for p in PROTOCOLS if p < Protocols.TLS13], ids=get_parameter_name) ``` def test_s2nd_falls_back_to_full_connection(managed_process, tmp_path, cipher, c @pytest.mark.parametrize("cipher", ALL_TEST_CIPHERS, ids=get_parameter_name) @pytest.mark.parametrize("curve", ALL_TEST_CURVES, ids=get_parameter_name) @pytest.mark.parametrize("certificate", ALL_TEST_CERTS, ids=get_parameter_name) +@pytest.mark.parametrize("protocol", [p for p in PROTOCOLS if p < Protocols.TLS13], ids=get_parameter_name) @pytest.mark.parametrize("provider", [OpenSSL, S2N], ids=get_parameter_name) @pytest.mark.parametrize("other_provider", [S2N], ids=get_parameter_name) def test_session_resumption_s2n_client_tls13_server_not_tls13(managed_process, cipher, curve, protocol, provider, other_provider, certificate):
codereview_new_python_data_6315
class Ciphers(object): "PQ-SIKE-TEST-TLS-1-0-2020-02", Protocols.TLS10, False, False, s2n=True, pq=True) PQ_TLS_1_0_2020_12 = Cipher( "PQ-TLS-1-0-2020-12", Protocols.TLS10, False, False, s2n=True, pq=True) - DEFAULT_FIPS = Cipher( - "default_fips", Protocols.TLS12, False, True, s2n=True, pq=False) @staticmethod def from_iana(iana_name): lrstewart@ mentioned that it's generally a good idea to avoid fips security policies when possible. Is there another security policy/signature algorithm combination that works instead? class Ciphers(object): "PQ-SIKE-TEST-TLS-1-0-2020-02", Protocols.TLS10, False, False, s2n=True, pq=True) PQ_TLS_1_0_2020_12 = Cipher( "PQ-TLS-1-0-2020-12", Protocols.TLS10, False, False, s2n=True, pq=True) + SECURITY_POLICY_20210816 = Cipher( + "20210816", Protocols.TLS12, False, False, s2n=True, pq=True) + @staticmethod def from_iana(iana_name):
codereview_new_python_data_6316
class Ciphers(object): "PQ-SIKE-TEST-TLS-1-0-2020-02", Protocols.TLS10, False, False, s2n=True, pq=True) PQ_TLS_1_0_2020_12 = Cipher( "PQ-TLS-1-0-2020-12", Protocols.TLS10, False, False, s2n=True, pq=True) SECURITY_POLICY_20210816 = Cipher( - "20210816", Protocols.TLS12, False, False, s2n=True, pq=True) @staticmethod I don't think the pq flag is used for anything, so it probably doesn't matter, but since this isn't a pq cipher pq should probably be False. Also, it might make sense to put a newline in front of this to differentiate it from the rest of the pq ciphers. class Ciphers(object): "PQ-SIKE-TEST-TLS-1-0-2020-02", Protocols.TLS10, False, False, s2n=True, pq=True) PQ_TLS_1_0_2020_12 = Cipher( "PQ-TLS-1-0-2020-12", Protocols.TLS10, False, False, s2n=True, pq=True) + SECURITY_POLICY_20210816 = Cipher( + "20210816", Protocols.TLS12, False, False, s2n=True, pq=False) @staticmethod
codereview_new_python_data_6317
def expected_signature(protocol, signature): signature = signature return signature -# ECDSA by default hashes with SHA-1. -# -# This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc4492#section-5.10 def expected_signature_alg_tls12(signature): if signature == Signatures.RSA_SHA224: signature = Signatures.RSA_SHA1 else: ## Redundant assignment This assignment assigns a variable to itself. [Show more details](https://github.com/aws/s2n-tls/security/code-scanning/559) def expected_signature(protocol, signature): signature = signature return signature + def expected_signature_alg_tls12(signature): + # ECDSA by default hashes with SHA-1. + # + # This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc4492#section-5.10 if signature == Signatures.RSA_SHA224: signature = Signatures.RSA_SHA1 else:
codereview_new_python_data_6318
class Signatures(object): RSA_SHA256 = Signature('RSA+SHA256', max_protocol=Protocols.TLS12) RSA_SHA384 = Signature('RSA+SHA384', max_protocol=Protocols.TLS12) RSA_SHA512 = Signature('RSA+SHA512', max_protocol=Protocols.TLS12) - MD5_SHA1 = Signature('RSA+MD5_SHA1', max_protocol=Protocols.TLS11) ECDSA_SHA224 = Signature('ECDSA+SHA224', max_protocol=Protocols.TLS12) ECDSA_SHA1 = Signature('ECDSA+SHA1', max_protocol=Protocols.TLS12) ```suggestion RSA_MD5_SHA1 = Signature('RSA+MD5_SHA1', max_protocol=Protocols.TLS11) ``` class Signatures(object): RSA_SHA256 = Signature('RSA+SHA256', max_protocol=Protocols.TLS12) RSA_SHA384 = Signature('RSA+SHA384', max_protocol=Protocols.TLS12) RSA_SHA512 = Signature('RSA+SHA512', max_protocol=Protocols.TLS12) + RSA_MD5_SHA1 = Signature('RSA+MD5_SHA1', max_protocol=Protocols.TLS11) ECDSA_SHA224 = Signature('ECDSA+SHA224', max_protocol=Protocols.TLS12) ECDSA_SHA1 = Signature('ECDSA+SHA1', max_protocol=Protocols.TLS12)
codereview_new_python_data_6319
-from common import Protocols, Signatures from providers import S2N from global_flags import get_flag, S2N_FIPS_MODE ## Unused import Import of 'Signatures' is not used. [Show more details](https://github.com/aws/s2n-tls/security/code-scanning/561) +from common import Protocols from providers import S2N from global_flags import get_flag, S2N_FIPS_MODE
codereview_new_python_data_6320
def test_s2n_server_tls12_signature_algorithm_fallback(managed_process, cipher, # extension. # # This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.4.1 - expected_sig_alg_tls12: None if signature == Signatures.RSA_SHA224: expected_sig_alg_tls12 = Signatures.RSA_SHA1 else: I think this syntax is setting the type of `expected_sig_alg_tls12` to None. I think this makes more sense: ```suggestion expected_sig_alg_tls12 = None ``` def test_s2n_server_tls12_signature_algorithm_fallback(managed_process, cipher, # extension. # # This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.4.1 if signature == Signatures.RSA_SHA224: expected_sig_alg_tls12 = Signatures.RSA_SHA1 else:
codereview_new_python_data_6321
def test_s2n_server_tls12_signature_algorithm_fallback(managed_process, cipher, # extension. # # This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.4.1 - expected_sig_alg_tls12: None if signature == Signatures.RSA_SHA224: expected_sig_alg_tls12 = Signatures.RSA_SHA1 else: If I understand correctly this code is selecting an expected signature algorithm based on the `signature` parameter: `@pytest.mark.parametrize("signature", [Signatures.RSA_SHA224, Signatures.ECDSA_SHA224], ids=get_parameter_name)` Is it possible we add a signature to the above list in the future? If we add a signature to this list in the future will we need to modify this if statement? One alternate implementation might be to package the signatures and expected sig_algs together (e.g.:) `@pytest.mark.parametrize("signature_expected_sig_alg", [(Signatures.RSA_SHA224, Signatures.RSA_SHA1), (Signatures.ECDSA_SHA224, Signatures.ECDSA_SHA1)], ids=get_parameter_name)` and then unpack that: `signature, expected_sig_alg_tls12 = signature_expected_sig_alg`. But I may be misunderstanding the nature of the problem here. def test_s2n_server_tls12_signature_algorithm_fallback(managed_process, cipher, # extension. # # This is inferred from the rfc- https://www.rfc-editor.org/rfc/rfc5246#section-7.4.1.4.1 if signature == Signatures.RSA_SHA224: expected_sig_alg_tls12 = Signatures.RSA_SHA1 else:
codereview_new_python_data_6322
import pytest from configuration import available_ports, TLS13_CIPHERS, ALL_TEST_CURVES, MINIMAL_TEST_CERTS -from common import Ciphers, ProviderOptions, Protocols, data_bytes from fixtures import managed_process # lgtm [py/unused-import] from providers import Provider, S2N, OpenSSL from utils import invalid_test_parameters, get_parameter_name, get_expected_s2n_version, to_bytes ## Unused import Import of 'Ciphers' is not used. [Show more details](https://github.com/aws/s2n-tls/security/code-scanning/552) import pytest from configuration import available_ports, TLS13_CIPHERS, ALL_TEST_CURVES, MINIMAL_TEST_CERTS +from common import ProviderOptions, Protocols, data_bytes from fixtures import managed_process # lgtm [py/unused-import] from providers import Provider, S2N, OpenSSL from utils import invalid_test_parameters, get_parameter_name, get_expected_s2n_version, to_bytes
codereview_new_python_data_6323
import copy -import math import pytest from configuration import available_ports, TLS13_CIPHERS, ALL_TEST_CURVES, MINIMAL_TEST_CERTS ## Unused import Import of 'math' is not used. [Show more details](https://github.com/aws/s2n-tls/security/code-scanning/553) import copy import pytest from configuration import available_ports, TLS13_CIPHERS, ALL_TEST_CURVES, MINIMAL_TEST_CERTS
codereview_new_python_data_6325
import copy import pytest -import math import re from configuration import available_ports, TLS13_CIPHERS, ALL_TEST_CURVES, MINIMAL_TEST_CERTS ## Unused import Import of 'math' is not used. [Show more details](https://github.com/aws/s2n-tls/security/code-scanning/554) import copy import pytest import re from configuration import available_ports, TLS13_CIPHERS, ALL_TEST_CURVES, MINIMAL_TEST_CERTS
codereview_new_python_data_6338
SEARCH_USER_LIMIT = 10 -@api_bp.route('/playlist/search/users/', methods=['GET', 'OPTIONS']) @crossdomain @ratelimit() def search_user(): search_term = request.args.get("search_term") user = validate_auth_header() user_id = user.id if search_term: - users = db_user.search(search_term, SEARCH_USER_LIMIT, user_id) else: users = [] return jsonify( We want this to be more generic. Searching for users is not an action that should be specifically tied to playlists. So instead, this endpoint should live somewhere else, but I am not sure what the best place or path are. @amCap1712 where do you think a user search (API) endpoint should live? and would it be `http://…/1/search/user` ? SEARCH_USER_LIMIT = 10 +@api_bp.route('/search/users/', methods=['GET', 'OPTIONS']) @crossdomain @ratelimit() def search_user(): search_term = request.args.get("search_term") user = validate_auth_header() user_id = user.id if search_term: + users = db_user.search_user_name(search_term, SEARCH_USER_LIMIT,user_id) else: users = [] return jsonify(
codereview_new_python_data_6339
@ratelimit() def search_user(): search_term = request.args.get("search_term") - validate_auth_header() if search_term: users = db_user.search_user_name(search_term, SEARCH_USER_LIMIT) else: users = [] return jsonify( { - 'status': 'ok', 'users': users } ) No need for status: ok. @ratelimit() def search_user(): search_term = request.args.get("search_term") if search_term: users = db_user.search_user_name(search_term, SEARCH_USER_LIMIT) else: users = [] return jsonify( { 'users': users } )
codereview_new_python_data_6340
@ratelimit() def search_user(): search_term = request.args.get("search_term") - validate_auth_header() if search_term: users = db_user.search_user_name(search_term, SEARCH_USER_LIMIT) else: users = [] return jsonify( { - 'status': 'ok', 'users': users } ) Normal search endpoint is public so no need for auth here either. @ratelimit() def search_user(): search_term = request.args.get("search_term") if search_term: users = db_user.search_user_name(search_term, SEARCH_USER_LIMIT) else: users = [] return jsonify( { 'users': users } )