repo
stringclasses 283
values | pull_number
int64 5
44.1k
| instance_id
stringlengths 13
45
| issue_numbers
sequencelengths 1
4
| base_commit
stringlengths 40
40
| patch
stringlengths 179
224k
| test_patch
stringlengths 94
7.54M
| problem_statement
stringlengths 4
256k
| hints_text
stringlengths 0
294k
| created_at
timestamp[s] | version
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|
bridgecrewio/checkov | 5,155 | bridgecrewio__checkov-5155 | [
"5154"
] | d93a39cf71235f337b611b08fd63e6762bd16cae | diff --git a/checkov/common/goget/registry/get_registry.py b/checkov/common/goget/registry/get_registry.py
--- a/checkov/common/goget/registry/get_registry.py
+++ b/checkov/common/goget/registry/get_registry.py
@@ -4,18 +4,20 @@
from checkov.common.goget.base_getter import BaseGetter
from checkov.common.util.file_utils import extract_tar_archive
+from checkov.common.util.file_utils import extract_zip_archive
from checkov.common.util.http_utils import DEFAULT_TIMEOUT
class RegistryGetter(BaseGetter):
- def __init__(self, url: str, create_clone_and_result_dirs: bool = False) -> None:
+ def __init__(self, url: str, extension: str, create_clone_and_result_dirs: bool = False) -> None:
self.logger = logging.getLogger(__name__)
+ self.extension = extension
self.create_clone_and_res_dirs = create_clone_and_result_dirs
super().__init__(url)
def do_get(self) -> str:
# get dest dir
- download_path = os.path.join(self.temp_dir, 'module_source.tar.gz')
+ download_path = os.path.join(self.temp_dir, f'module_source.{self.extension}')
# download zip
dest_path = os.path.dirname(download_path)
with requests.get(self.url, stream=True, timeout=DEFAULT_TIMEOUT) as r:
@@ -25,7 +27,10 @@ def do_get(self) -> str:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
# extract
- extract_tar_archive(source_path=download_path, dest_path=dest_path)
+ if self.extension == 'zip':
+ extract_zip_archive(source_path=download_path, dest_path=dest_path)
+ else:
+ extract_tar_archive(source_path=download_path, dest_path=dest_path)
os.remove(download_path)
return dest_path
diff --git a/checkov/common/util/file_utils.py b/checkov/common/util/file_utils.py
--- a/checkov/common/util/file_utils.py
+++ b/checkov/common/util/file_utils.py
@@ -5,6 +5,8 @@
import io
import logging
+from zipfile import ZipFile
+
def convert_to_unix_path(path: str) -> str:
return path.replace('\\', '/')
@@ -15,6 +17,11 @@ def extract_tar_archive(source_path: str, dest_path: str) -> None:
tar.extractall(path=dest_path) # nosec # only trusted source
+def extract_zip_archive(source_path: str, dest_path: str) -> None:
+ with ZipFile(source_path) as zip:
+ zip.extractall(path=dest_path) # nosec # only trusted source
+
+
def compress_file_gzip_base64(input_path: str) -> str:
try:
with open(input_path, 'rb') as json_results_file:
diff --git a/checkov/terraform/module_loading/loaders/registry_loader.py b/checkov/terraform/module_loading/loaders/registry_loader.py
--- a/checkov/terraform/module_loading/loaders/registry_loader.py
+++ b/checkov/terraform/module_loading/loaders/registry_loader.py
@@ -7,6 +7,7 @@
import requests
from requests.exceptions import HTTPError
+from urllib.parse import urljoin
from urllib.parse import urlparse
from checkov.common.models.consts import TFC_HOST_NAME
@@ -42,7 +43,7 @@ def discover(self, module_params: ModuleParams) -> None:
def _is_matching_loader(self, module_params: ModuleParams) -> bool:
# https://developer.hashicorp.com/terraform/language/modules/sources#github
- if module_params.module_source.startswith(("github.com", "bitbucket.org", "git::", "git@github.com")):
+ if module_params.module_source.startswith(("/", "github.com", "bitbucket.org", "git::", "git@github.com")):
return False
self._process_inner_registry_module(module_params)
# determine tf api endpoints
@@ -78,7 +79,7 @@ def _load_module(self, module_params: ModuleParams) -> ModuleContent:
elif not module_params.tf_modules_endpoint:
return ModuleContent(dir=None)
- request_download_url = "/".join((module_params.tf_modules_endpoint, module_params.module_source, best_version, "download"))
+ request_download_url = urljoin(module_params.tf_modules_endpoint, "/".join((module_params.module_source, best_version, "download")))
logging.debug(f"Best version for {module_params.module_source} is {best_version} based on the version constraint {module_params.version}.")
logging.debug(f"Module download url: {request_download_url}")
try:
@@ -97,9 +98,10 @@ def _load_module(self, module_params: ModuleParams) -> ModuleContent:
self.logger.debug(f"X-Terraform-Get: {module_download_url}")
module_download_url = self._normalize_module_download_url(module_params, module_download_url)
self.logger.debug(f"Cloning module from normalized url {module_download_url}")
- if self._is_download_url_archive(module_download_url):
+ archive_extension = self._get_archive_extension(module_download_url)
+ if archive_extension:
try:
- registry_getter = RegistryGetter(module_download_url)
+ registry_getter = RegistryGetter(module_download_url, archive_extension)
registry_getter.temp_dir = module_params.dest_dir
registry_getter.do_get()
return_dir = module_params.dest_dir
@@ -193,12 +195,14 @@ def _determine_tf_api_endpoints(self, module_params: ModuleParams) -> None:
return None
self.logger.debug(f"Service discovery response: {response.json()}")
- module_params.tf_modules_endpoint = f"https://{module_params.tf_host_name}{response.json().get('modules.v1')}"
+ module_params.tf_modules_endpoint = self._normalize_module_download_url(module_params, response.json().get('modules.v1'))
else:
# use terraform cloud host name and url for the public registry
module_params.tf_host_name = TFC_HOST_NAME
- module_params.tf_modules_endpoint = "https://registry.terraform.io/v1/modules"
- module_params.tf_modules_versions_endpoint = "/".join((module_params.tf_modules_endpoint, module_params.module_source, "versions"))
+ module_params.tf_modules_endpoint = "https://registry.terraform.io/v1/modules/"
+
+ # assume module_params.tf_modules_endpoint ends with a slash as per https://developer.hashicorp.com/terraform/internals/module-registry-protocol#service-discovery
+ module_params.tf_modules_versions_endpoint = urljoin(module_params.tf_modules_endpoint, "/".join((module_params.module_source, "versions")))
def _normalize_module_download_url(self, module_params: ModuleParams, module_download_url: str) -> str:
if not urlparse(module_download_url).netloc:
@@ -206,17 +210,18 @@ def _normalize_module_download_url(self, module_params: ModuleParams, module_dow
return module_download_url
@staticmethod
- def _is_download_url_archive(module_download_url: str) -> bool:
+ def _get_archive_extension(module_download_url: str) -> str | None:
+ module_download_path = urlparse(module_download_url).path
for extension in MODULE_ARCHIVE_EXTENSIONS:
- if module_download_url.endswith(extension):
- return True
+ if module_download_path.endswith(extension):
+ return extension
query_params_str = urlparse(module_download_url).query
if query_params_str:
query_params = query_params_str.split("&")
for query_param in query_params:
if query_param.startswith("archive="):
- return True
- return False
+ return query_params_str.split("=")[1]
+ return None
loader = RegistryLoader()
| diff --git a/tests/terraform/module_loading/loaders/test_registry_loader.py b/tests/terraform/module_loading/loaders/test_registry_loader.py
--- a/tests/terraform/module_loading/loaders/test_registry_loader.py
+++ b/tests/terraform/module_loading/loaders/test_registry_loader.py
@@ -40,11 +40,34 @@ def test_determine_tf_api_endpoints_tfc():
# then
assert module_params.tf_host_name == "app.terraform.io"
- assert module_params.tf_modules_endpoint == "https://registry.terraform.io/v1/modules"
+ assert module_params.tf_modules_endpoint == "https://registry.terraform.io/v1/modules/"
assert module_params.tf_modules_versions_endpoint == "https://registry.terraform.io/v1/modules/terraform-aws-modules/example/versions"
+@pytest.mark.parametrize(
+ "discovery_response",
+ [
+ ({
+ "modules.v1": "/api/registry/v1/modules/",
+ "providers.v1": "/api/registry/v1/providers/",
+ "state.v2": "/api/v2/",
+ "tfe.v2": "/api/v2/",
+ "tfe.v2.1": "/api/v2/",
+ "tfe.v2.2": "/api/v2/",
+ "versions.v1": "https://checkpoint-api.hashicorp.com/v1/versions/"
+ }),
+ ({
+ "modules.v1": "https://example.registry.com/api/registry/v1/modules/",
+ "providers.v1": "https://example.registry.com/api/registry/v1/providers/",
+ "state.v2": "https://example.registry.com/api/v2/",
+ "tfe.v2": "https://example.registry.com/api/v2/",
+ "tfe.v2.1": "https://example.registry.com/api/v2/",
+ "tfe.v2.2": "https://example.registry.com/api/v2/",
+ "versions.v1": "https://checkpoint-api.hashicorp.com/v1/versions/"
+ }),
+ ]
+)
@responses.activate
-def test_determine_tf_api_endpoints_tfe():
+def test_determine_tf_api_endpoints_tfe(discovery_response):
# given
loader = RegistryLoader()
module_params = ModuleParams("", "", "example.registry.com/terraform-aws-modules/example", "", "", "")
@@ -53,15 +76,7 @@ def test_determine_tf_api_endpoints_tfe():
responses.add(
method=responses.GET,
url=f"https://{module_params.tf_host_name}/.well-known/terraform.json",
- json={
- "modules.v1": "/api/registry/v1/modules/",
- "providers.v1": "/api/registry/v1/providers/",
- "state.v2": "/api/v2/",
- "tfe.v2": "/api/v2/",
- "tfe.v2.1": "/api/v2/",
- "tfe.v2.2": "/api/v2/",
- "versions.v1": "https://checkpoint-api.hashicorp.com/v1/versions/"
- },
+ json=discovery_response,
status=200,
)
@@ -72,20 +87,42 @@ def test_determine_tf_api_endpoints_tfe():
responses.assert_call_count(f"https://{module_params.tf_host_name}/.well-known/terraform.json", 1)
assert module_params.tf_host_name == "example.registry.com"
assert module_params.tf_modules_endpoint == "https://example.registry.com/api/registry/v1/modules/"
- assert module_params.tf_modules_versions_endpoint == "https://example.registry.com/api/registry/v1/modules//terraform-aws-modules/example/versions"
+ assert module_params.tf_modules_versions_endpoint == "https://example.registry.com/api/registry/v1/modules/terraform-aws-modules/example/versions"
+
+@responses.activate
+def test_load_module():
+ # given
+ loader = RegistryLoader()
+ module_params = ModuleParams("", "", "terraform-aws-modules/example", "", "", "")
+ module_params.tf_modules_endpoint = "https://example.registry.com/api/registry/v1/modules/"
+ module_params.best_version = "1.0.0"
+ with mock.patch.dict("os.environ", {"TF_HOST_NAME": "example.registry.com"}):
+ loader.discover(module_params)
+ responses.add(
+ method=responses.GET,
+ url="https://example.registry.com/api/registry/v1/modules/terraform-aws-modules/example/1.0.0/download",
+ status=200,
+ )
+
+ # when
+ loader._load_module(module_params)
+
+ # then
+ responses.assert_call_count("https://example.registry.com/api/registry/v1/modules/terraform-aws-modules/example/1.0.0/download", 1)
@pytest.mark.parametrize(
"download_url, expected_result",
[
- ("https://example.com/download?archive=tgz", True),
- ("https://example.com/download?archive=zip", True),
- ("https://example.com/download/module.zip", True),
- ("https://example.com/download/module/archive", False),
+ ("https://example.com/download?archive=tgz", "tgz"),
+ ("https://example.com/download?archive=zip", "zip"),
+ ("https://example.com/download/module.zip", "zip"),
+ ("https://example.com/download/module.zip?sig=foo", "zip"),
+ ("https://example.com/download/module/archive", None),
]
)
-def test_is_download_url_archive(download_url, expected_result):
- is_archive = RegistryLoader._is_download_url_archive(download_url)
- assert is_archive == expected_result
+def test_get_archive_extension(download_url, expected_result):
+ archive_extension = RegistryLoader._get_archive_extension(download_url)
+ assert archive_extension == expected_result
@pytest.mark.parametrize(
"tf_host_name, module_download_url, expected_result",
| Unable to download Terraform modules from JFrog Artifactory
**Describe the issue**
The `--download-external-modules` option fails to download Terraform modules from JFrog Artifactory due to a malformed URL.
**Example Value**
This Terraform configuration:
```
module "lambda" {
source = "artifactory.jfrog.example.com/example-terraform-module__example/lambda/aws"
version = "1.8.133"
}
```
produces these warnings:
```
2023-05-31 05:22:07,799 [MainThread ] [WARNI] Module artifactory.jfrog.example.com/example-terraform-module__example/lambda/aws:1.8.133 failed to load via <class 'checkov.terraform.module_loading.loaders.registry_loader.RegistryLoader'>
2023-05-31 05:22:07,799 [MainThread ] [WARNI] Unable to load module (artifactory.jfrog.example.com/example-terraform-module__example/lambda/aws:1.8.133): HTTPSConnectionPool(host='artifactory.jfrog.example.comhttps', port=443): Max retries exceeded with url: //artifactory.jfrog.example.com/artifactory/api/terraform/v1/modules//example-terraform-module__example/lambda/aws/versions (Caused by NewConnectionError('<urllib3.connection.HTTPSConnection object at 0x7f3ee1315570>: Failed to establish a new connection: [Errno -2] Name or service not known'))
```
| 2023-05-31T05:42:27 | -1.0 |
|
bridgecrewio/checkov | 5,170 | bridgecrewio__checkov-5170 | [
"5161"
] | 1a308323d4d12ee03eab342d863b060b21f3f041 | diff --git a/checkov/dockerfile/checks/AliasIsUnique.py b/checkov/dockerfile/checks/AliasIsUnique.py
--- a/checkov/dockerfile/checks/AliasIsUnique.py
+++ b/checkov/dockerfile/checks/AliasIsUnique.py
@@ -24,13 +24,12 @@ def scan_resource_conf(self, conf: list[_Instruction]) -> tuple[CheckResult, lis
alias = []
for instruction in conf:
if " as " in instruction["value"]:
- temp = instruction["value"].split()
- alias += [temp[2]]
+ alias.append(instruction["value"].rsplit(maxsplit=1)[-1])
if len(alias) == len(set(alias)):
return CheckResult.PASSED, None
- else:
- return CheckResult.FAILED, [conf[0]]
+
+ return CheckResult.FAILED, [conf[0]]
check = AliasIsUnique()
| diff --git a/tests/dockerfile/checks/example_AliasIsUnique/success_platform/Dockerfile b/tests/dockerfile/checks/example_AliasIsUnique/success_platform/Dockerfile
new file mode 100644
--- /dev/null
+++ b/tests/dockerfile/checks/example_AliasIsUnique/success_platform/Dockerfile
@@ -0,0 +1,10 @@
+FROM --platform=linux/amd64 node:16 as FOO
+RUN npm install
+
+FROM --platform=linux/amd64 node:16 as BAR
+RUN npm run
+
+USER nobody
+HEALTHCHECK CMD curl --fail http://localhost:3000 || exit 1
+
+CMD mycommand.sh
diff --git a/tests/dockerfile/checks/test_AliasIsUnique.py b/tests/dockerfile/checks/test_AliasIsUnique.py
--- a/tests/dockerfile/checks/test_AliasIsUnique.py
+++ b/tests/dockerfile/checks/test_AliasIsUnique.py
@@ -15,14 +15,17 @@ def test(self):
report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
- passing_resources = {"/success/Dockerfile."}
+ passing_resources = {
+ "/success/Dockerfile.",
+ "/success_platform/Dockerfile.",
+ }
failing_resources = {"/failure/Dockerfile.FROM"}
- passed_check_resources = set([c.resource for c in report.passed_checks])
- failed_check_resources = set([c.resource for c in report.failed_checks])
+ passed_check_resources = {c.resource for c in report.passed_checks}
+ failed_check_resources = {c.resource for c in report.failed_checks}
- self.assertEqual(summary["passed"], 1)
- self.assertEqual(summary["failed"], 1)
+ self.assertEqual(summary["passed"], len(passing_resources))
+ self.assertEqual(summary["failed"], len(failing_resources))
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
| CKV_DOCKER_11 false positive when `--platform` is used
**Describe the issue**
CKV_DOCKER_11 false positive when `--platform` is used (possibly other arguments as well)
For reference: _"CKV_DOCKER_11: "Ensure From Alias are unique for multistage builds."_ In other words, make sure you add `as myAlias` at the end of your `FROM` line
**Examples**
This will PASS as expected:
`FROM node:16 as build`
Now, add `--platform` and it will FAIL:
`FROM --platform=linux/amd64 node:16 as build`
**Version (please complete the following information):**
```
> checkov -v
2.3.240
```
**Additional context**
Add any other context about the problem here.
| hey @LinguineCode thanks for reaching out.
I tried to reproduce it, but the mentioned check is passing for me. Can you please provide your `checkov` version and if possible a more complete example?
> hey @LinguineCode thanks for reaching out.
>
> I tried to reproduce it, but the mentioned check is passing for me. Can you please provide your `checkov` version and if possible a more complete example?
My pleasure, here you go:
> ```
> > checkov -v
> 2.3.240
> ```
> ```
> > checkov --quiet -f Dockerfile
> dockerfile scan results:
>
> Passed checks: 6, Failed checks: 1, Skipped checks: 0
>
> Check: CKV_DOCKER_11: "Ensure From Alias are unique for multistage builds."
> FAILED for resource: Dockerfile.FROM
> File: Dockerfile:1-1
> Guide: https://docs.bridgecrew.io/docs/ensure-docker-from-alias-is-unique-for-multistage-builds
>
> 1 | FROM --platform=linux/amd64 node:16 as FOO
>
> >
> ```
Here's the `Dockerfile`:
```Dockerfile
FROM --platform=linux/amd64 node:16 as FOO
RUN npm install
FROM --platform=linux/amd64 node:16 as BAR
RUN npm run
USER nobody
HEALTHCHECK CMD curl --fail http://localhost:3000 || exit 1
CMD mycommand.sh
```
thanks, that did the trick 😄 | 2023-06-03T16:12:33 | -1.0 |
bridgecrewio/checkov | 5,171 | bridgecrewio__checkov-5171 | [
"5148"
] | 1a308323d4d12ee03eab342d863b060b21f3f041 | diff --git a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
--- a/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
+++ b/checkov/terraform/checks/utils/iam_terraform_document_to_policy_converter.py
@@ -1,3 +1,5 @@
+from __future__ import annotations
+
from typing import Dict, List, Any
from checkov.common.util.data_structures_utils import pickle_deepcopy
@@ -23,4 +25,13 @@ def convert_terraform_conf_to_iam_policy(conf: Dict[str, List[Dict[str, Any]]])
statement["Effect"] = statement.pop("effect")[0]
if "effect" not in statement and "Effect" not in statement:
statement["Effect"] = "Allow"
+ if "condition" in statement:
+ conditions = statement.pop("condition")
+ if conditions and isinstance(conditions, list):
+ statement["Condition"] = {}
+ for condition in conditions:
+ cond_operator = condition["test"][0]
+ cond_key = condition["variable"][0]
+ cond_value = condition["values"][0]
+ statement["Condition"].setdefault(cond_operator, {})[cond_key] = cond_value
return result
| diff --git a/tests/terraform/checks/data/aws/example_ResourcePolicyDocument/main.tf b/tests/terraform/checks/data/aws/example_ResourcePolicyDocument/main.tf
--- a/tests/terraform/checks/data/aws/example_ResourcePolicyDocument/main.tf
+++ b/tests/terraform/checks/data/aws/example_ResourcePolicyDocument/main.tf
@@ -86,3 +86,23 @@ data "aws_iam_policy_document" "pass_unrestrictable" {
]
}
}
+
+data "aws_iam_policy_document" "pass_condition" {
+ statement {
+ actions = [
+ "kms:GenerateDataKey",
+ "kms:Decrypt"
+ ]
+ resources = [
+ "*"
+ ]
+
+ condition {
+ test = "ArnEquals"
+ variable = "aws:SourceArn"
+ values = [
+ "arn"
+ ]
+ }
+ }
+}
diff --git a/tests/terraform/checks/data/aws/test_ResourcePolicyDocument.py b/tests/terraform/checks/data/aws/test_ResourcePolicyDocument.py
--- a/tests/terraform/checks/data/aws/test_ResourcePolicyDocument.py
+++ b/tests/terraform/checks/data/aws/test_ResourcePolicyDocument.py
@@ -17,6 +17,7 @@ def test(self):
"aws_iam_policy_document.pass",
"aws_iam_policy_document.pass2",
"aws_iam_policy_document.pass_unrestrictable",
+ "aws_iam_policy_document.pass_condition",
}
failing_resources = {
"aws_iam_policy_document.fail",
diff --git a/tests/terraform/util/test_iam_converter.py b/tests/terraform/util/test_iam_converter.py
--- a/tests/terraform/util/test_iam_converter.py
+++ b/tests/terraform/util/test_iam_converter.py
@@ -1,19 +1,79 @@
import unittest
-from checkov.terraform.checks.utils.iam_terraform_document_to_policy_converter import \
- convert_terraform_conf_to_iam_policy
+from checkov.terraform.checks.utils.iam_terraform_document_to_policy_converter import (
+ convert_terraform_conf_to_iam_policy,
+)
class TestIAMConverter(unittest.TestCase):
-
def test_iam_converter(self):
conf = {'version': ['2012-10-17'], 'statement': [{'actions': [['*']], 'resources': [['*']]}]}
expected_result = {'version': ['2012-10-17'], 'Statement': [{'Action': ['*'], 'Resource': ['*'], 'Effect': 'Allow'}]}
result = convert_terraform_conf_to_iam_policy(conf)
- self.assertDictEqual(result,expected_result)
- self.assertNotEqual(result,conf)
+ self.assertDictEqual(result, expected_result)
+ self.assertNotEqual(result, conf)
+
+ def test_convert_condition(self):
+ # given
+ conf = {
+ "__end_line__": 77,
+ "__start_line__": 42,
+ "statement": [
+ {
+ "actions": [["kms:Decrypt", "kms:GenerateDataKey"]],
+ "condition": [
+ {
+ "test": ["ForAnyValue:StringEquals"],
+ "values": [["pi"]],
+ "variable": ["kms:EncryptionContext:service"],
+ },
+ {
+ "test": ["ForAnyValue:StringEquals"],
+ "values": [["rds"]],
+ "variable": ["kms:EncryptionContext:aws:pi:service"],
+ },
+ {
+ "test": ["ForAnyValue:StringEquals"],
+ "values": [["db-AAAAABBBBBCCCCCDDDDDEEEEE", "db-EEEEEDDDDDCCCCCBBBBBAAAAA"]],
+ "variable": ["kms:EncryptionContext:aws:rds:db-id"],
+ },
+ {"test": ["ArnEquals"], "values": [["arn"]], "variable": ["aws:SourceArn"]},
+ ],
+ "resources": [["*"]],
+ }
+ ],
+ "__address__": "aws_iam_policy_document.example_multiple_condition_keys_and_values",
+ }
+
+ result = convert_terraform_conf_to_iam_policy(conf)
+ self.assertDictEqual(
+ result,
+ {
+ "__end_line__": 77,
+ "__start_line__": 42,
+ "__address__": "aws_iam_policy_document.example_multiple_condition_keys_and_values",
+ "Statement": [
+ {
+ "Action": ["kms:Decrypt", "kms:GenerateDataKey"],
+ "Resource": ["*"],
+ "Effect": "Allow",
+ "Condition": {
+ "ForAnyValue:StringEquals": {
+ "kms:EncryptionContext:service": ["pi"],
+ "kms:EncryptionContext:aws:pi:service": ["rds"],
+ "kms:EncryptionContext:aws:rds:db-id": [
+ "db-AAAAABBBBBCCCCCDDDDDEEEEE",
+ "db-EEEEEDDDDDCCCCCBBBBBAAAAA",
+ ],
+ },
+ "ArnEquals": {"aws:SourceArn": ["arn"]},
+ },
+ }
+ ],
+ },
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
unittest.main()
| Checkov v2.3.261 fails with CKV_AWS_356 for KMS actions which must specify 'all resources'
**Describe the issue**
Checkov v2.3.261 fails with CKV_AWS_356 highlights IAM policies which are overly permissive but is incorrectly identifying actions for KMS policies which need to be for all resources potentially scoped with conditional access per https://docs.aws.amazon.com/kms/latest/developerguide/key-policy-services.html
Similar issue for https://github.com/bridgecrewio/checkov/issues/5134 where certain actions like 'list' require all resources.
**Examples**
```
data "aws_iam_policy_document" "myKmsKey" {
actions = [
"kms:GenerateDataKey",
"kms:Decrypt"
]
resources = [
"*"
]
condition {
test = "ArnEquals"
variable = "aws:SourceArn"
values = [
<SOME OTHER RESOURCE>.arn
]
}
}
}
```
**Version (please complete the following information):**
- Checkov Version 2.3.261
| hey @aidenvaines-bjss thanks for reaching out.
I think my fix, which was released today with version `2.3.263` should also handle your case, feel free to give it a try.
Sadly, still seeing the issue with 2.3.263 and 2.3.264 I am still seeing the issue
Assuming that fix is https://github.com/bridgecrewio/checkov/pull/5135 it looks like KMS isn't covered
thanks for verifying, it looks like we don't consider the `condition` block correctly. Actually we don't parse it correctly, so it becomes empty. | 2023-06-03T22:30:02 | -1.0 |
bridgecrewio/checkov | 5,189 | bridgecrewio__checkov-5189 | [
"5168"
] | 1bf7a156c8c14cdc7b0b0992feebcbca0883195f | diff --git a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py
--- a/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py
+++ b/checkov/terraform/checks/resource/azure/AKSApiServerAuthorizedIpRanges.py
@@ -16,7 +16,7 @@ def __init__(self) -> None:
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
- return "api_server_authorized_ip_ranges/[0]"
+ return "api_server_access_profile/[0]/authorized_ip_ranges/[0]"
def get_expected_value(self) -> Any:
return ANY_VALUE
@@ -26,6 +26,12 @@ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
private_cluster_enabled = conf.get("private_cluster_enabled", [False])[0]
if private_cluster_enabled:
return CheckResult.PASSED
+
+ # provider version <=3.38.0
+ api_server = conf.get("api_server_authorized_ip_ranges")
+ if api_server and isinstance(api_server, list) and api_server[0]:
+ return CheckResult.PASSED
+
return super().scan_resource_conf(conf)
| diff --git a/tests/terraform/checks/resource/azure/example_AKSApiServerAuthorizedIpRanges/main.tf b/tests/terraform/checks/resource/azure/example_AKSApiServerAuthorizedIpRanges/main.tf
--- a/tests/terraform/checks/resource/azure/example_AKSApiServerAuthorizedIpRanges/main.tf
+++ b/tests/terraform/checks/resource/azure/example_AKSApiServerAuthorizedIpRanges/main.tf
@@ -38,6 +38,17 @@ resource "azurerm_kubernetes_cluster" "private" {
private_cluster_enabled = true
}
+resource "azurerm_kubernetes_cluster" "version_3_39" {
+ name = "example"
+ location = "azurerm_resource_group.example.location"
+ resource_group_name = "azurerm_resource_group.example.name"
+ dns_prefix = "example"
+
+ api_server_access_profile {
+ authorized_ip_ranges = ["192.168.0.0/16"]
+ }
+}
+
# fail
resource "azurerm_kubernetes_cluster" "default" {
diff --git a/tests/terraform/checks/resource/azure/test_AKSApiServerAuthorizedIpRanges.py b/tests/terraform/checks/resource/azure/test_AKSApiServerAuthorizedIpRanges.py
--- a/tests/terraform/checks/resource/azure/test_AKSApiServerAuthorizedIpRanges.py
+++ b/tests/terraform/checks/resource/azure/test_AKSApiServerAuthorizedIpRanges.py
@@ -19,7 +19,8 @@ def test(self):
passing_resources = {
"azurerm_kubernetes_cluster.enabled",
- "azurerm_kubernetes_cluster.private"
+ "azurerm_kubernetes_cluster.private",
+ "azurerm_kubernetes_cluster.version_3_39",
}
failing_resources = {
@@ -30,8 +31,8 @@ def test(self):
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
- self.assertEqual(summary["passed"], 2)
- self.assertEqual(summary["failed"], 2)
+ self.assertEqual(summary["passed"], len(passing_resources))
+ self.assertEqual(summary["failed"], len(failing_resources))
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
| [CKV_AZURE_6] AKS API Server White Tests Failing with Correct Code
**Describe the issue**
[CKV_AZURE_6](https://github.com/bridgecrewio/checkov/tree/master/checkov/arm/checks/resource/AKSApiServerAuthorizedIpRanges.py)
This check should trigger when an API Server whitelist IP isn't found in the TF code.
**Examples**
Please share an example code sample (in the IaC of your choice) + the expected outcomes.
Sample (Pre 3.39.0 Provider Version):
```
resource "azurerm_kubernetes_cluster" "aks_k2" {
name = var.cluster_name
location = azurerm_resource_group.rg_aks.location
resource_group_name = azurerm_resource_group.rg_aks.name
sku_tier = var.sku_tier
dns_prefix = var.dns_name
api_server_authorized_ip_ranges = [my_ip_list]
}
```
Sample (Post 3.39.0):
```
resource "azurerm_kubernetes_cluster" "aks_k2" {
name = var.cluster_name
location = azurerm_resource_group.rg_aks.location
resource_group_name = azurerm_resource_group.rg_aks.name
sku_tier = var.sku_tier
dns_prefix = var.dns_name
api_server_access_profile {
authorized_ip_ranges = [my_ip_list]
}
}
```
Both have expected outcome of passing this test, as we list 4 IP's for whitelisting.
We are failing tests

**Version (please complete the following information):**
- Checkov Version: checkov-2.3.272
| 2023-06-07T21:09:35 | -1.0 |
|
bridgecrewio/checkov | 5,193 | bridgecrewio__checkov-5193 | [
"4394"
] | 4f52296102079ca5ae3f777b42a156f91ffcbda4 | diff --git a/checkov/cloudformation/checks/resource/aws/KMSKeyWildCardPrincipal.py b/checkov/cloudformation/checks/resource/aws/KMSKeyWildCardPrincipal.py
--- a/checkov/cloudformation/checks/resource/aws/KMSKeyWildCardPrincipal.py
+++ b/checkov/cloudformation/checks/resource/aws/KMSKeyWildCardPrincipal.py
@@ -1,58 +1,43 @@
-from checkov.common.models.enums import CheckResult, CheckCategories
-from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
-
-
-def get_recursively(search_dict, field):
- """
- Takes a dict with nested lists and dicts,
- and searches all dicts for a key of the field
- provided.
- """
- fields_found = []
-
- for key, value in search_dict.items():
+from __future__ import annotations
- if key == field:
- fields_found.append(value)
+from typing import Any
- elif isinstance(value, dict):
- results = get_recursively(value, field)
- for result in results:
- fields_found.append(result)
-
- elif isinstance(value, list):
- for item in value:
- if isinstance(item, dict):
- more_results = get_recursively(item, field)
- for another_result in more_results:
- fields_found.append(another_result)
-
- return fields_found
+from checkov.common.models.enums import CheckResult, CheckCategories
+from checkov.cloudformation.checks.resource.base_resource_value_check import BaseResourceValueCheck
class KMSKeyWildCardPrincipal(BaseResourceValueCheck):
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure KMS key policy does not contain wildcard (*) principal"
id = "CKV_AWS_33"
- supported_resources = ['AWS::KMS::Key']
- categories = [CheckCategories.ENCRYPTION]
+ supported_resources = ("AWS::KMS::Key",)
+ categories = (CheckCategories.ENCRYPTION,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return 'Properties/KeyPolicy/Statement/Principal'
-
- def scan_resource_conf(self, conf):
- if conf.get('Properties'):
- if conf['Properties'].get('KeyPolicy'):
- policy_block = conf['Properties']['KeyPolicy']
- principals_list = get_recursively(policy_block, 'Principal')
- for principal in principals_list:
- if isinstance(principal, dict):
- for principal_value in principal.values():
- if principal_value == '*' or (isinstance(principal_value, list) and '*' in principal_value):
- return CheckResult.FAILED
- else:
- if principal == '*':
+ def get_inspected_key(self) -> str:
+ return "Properties/KeyPolicy/Statement/Principal"
+
+ def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:
+ properties = conf.get("Properties")
+ if properties and isinstance(properties, dict):
+ policy_block = properties.get("KeyPolicy")
+ if policy_block and isinstance(policy_block, dict):
+ statements = policy_block.get("Statement")
+ if statements and isinstance(statements, list):
+ for statement in statements:
+ principal = statement.get("Principal")
+ if not principal:
+ continue
+ if statement.get("Effect") == "Deny":
+ continue
+
+ if isinstance(principal, dict) and "AWS" in principal:
+ # the actual principals can be under the `AWS`
+ principal = principal["AWS"]
+
+ if isinstance(principal, str) and principal == "*":
+ return CheckResult.FAILED
+ if isinstance(principal, list) and "*" in principal:
return CheckResult.FAILED
return CheckResult.PASSED
diff --git a/checkov/cloudformation/checks/resource/base_resource_value_check.py b/checkov/cloudformation/checks/resource/base_resource_value_check.py
--- a/checkov/cloudformation/checks/resource/base_resource_value_check.py
+++ b/checkov/cloudformation/checks/resource/base_resource_value_check.py
@@ -1,11 +1,12 @@
+from __future__ import annotations
+
import re
from abc import abstractmethod
from collections.abc import Iterable
-from typing import List, Any, Dict
+from typing import List, Any
from checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck
from checkov.cloudformation.context_parser import ContextParser
-from checkov.common.parsers.node import StrNode, DictNode
from checkov.common.models.consts import ANY_VALUE
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.common.util.type_forcers import force_list
@@ -50,7 +51,7 @@ def _is_nesting_key(inspected_attributes: List[str], key: str) -> bool:
"""
return any(x in key for x in inspected_attributes)
- def scan_resource_conf(self, conf: Dict[StrNode, DictNode]) -> CheckResult:
+ def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:
inspected_key = self.get_inspected_key()
expected_values = self.get_expected_values()
path_elements = inspected_key.split("/")
| diff --git a/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-AWS-Wildcard.yaml b/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-AWS-Wildcard.yaml
--- a/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-AWS-Wildcard.yaml
+++ b/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-AWS-Wildcard.yaml
@@ -1,7 +1,7 @@
AWSTemplateFormatVersion: 2010-09-09
Description: KMS key example template
Resources:
- myKey:
+ AwsWildcard:
Type: AWS::KMS::Key
Properties:
KeyPolicy:
@@ -15,7 +15,7 @@ Resources:
Action: kms:*
Resource: '*'
EnableKeyRotation: true
- SomeKmsKey:
+ AwsWildcardList:
Type: 'AWS::KMS::Key'
Properties:
KeyPolicy:
diff --git a/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-Wildcard.yaml b/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-Wildcard.yaml
--- a/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-Wildcard.yaml
+++ b/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-FAILED-Wildcard.yaml
@@ -1,7 +1,7 @@
AWSTemplateFormatVersion: 2010-09-09
Description: KMS key example template
Resources:
- myKey:
+ Wildcard:
Type: AWS::KMS::Key
Properties:
KeyPolicy:
diff --git a/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-PASSED.yaml b/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-PASSED.yaml
--- a/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-PASSED.yaml
+++ b/tests/cloudformation/checks/resource/aws/example_KMSKeyWildCardPrincipal/KMSKeyWildCardPrincipal-PASSED.yaml
@@ -1,7 +1,7 @@
AWSTemplateFormatVersion: 2010-09-09
Description: KMS key example template
Resources:
- myKey:
+ Root:
Type: AWS::KMS::Key
Properties:
KeyPolicy:
@@ -15,3 +15,17 @@ Resources:
Action: 'kms:*'
Resource: '*'
EnableKeyRotation: true
+ Deny:
+ Type: AWS::KMS::Key
+ Properties:
+ KeyPolicy:
+ Version: '2012-10-17'
+ Id: key-default-1
+ Statement:
+ - Sid: Enable Permissions for All AWS Principals
+ Effect: Deny
+ Principal:
+ AWS: '*'
+ Action: kms:*
+ Resource: '*'
+ EnableKeyRotation: true
diff --git a/tests/cloudformation/checks/resource/aws/test_KMSKeyWildCardPrincipal.py b/tests/cloudformation/checks/resource/aws/test_KMSKeyWildCardPrincipal.py
--- a/tests/cloudformation/checks/resource/aws/test_KMSKeyWildCardPrincipal.py
+++ b/tests/cloudformation/checks/resource/aws/test_KMSKeyWildCardPrincipal.py
@@ -7,7 +7,6 @@
class TestKMSKeyWildCardPrincipal(unittest.TestCase):
-
def test_summary(self):
runner = Runner()
current_dir = os.path.dirname(os.path.realpath(__file__))
@@ -16,11 +15,28 @@ def test_summary(self):
report = runner.run(root_folder=test_files_dir,runner_filter=RunnerFilter(checks=[check.id]))
summary = report.get_summary()
- self.assertEqual(summary['passed'], 1)
- self.assertEqual(summary['failed'], 3)
+
+ passing_resources = {
+ "AWS::KMS::Key.Root",
+ "AWS::KMS::Key.Deny",
+ }
+ failing_resources = {
+ "AWS::KMS::Key.AwsWildcard",
+ "AWS::KMS::Key.AwsWildcardList",
+ "AWS::KMS::Key.Wildcard",
+ }
+
+ passed_check_resources = {c.resource for c in report.passed_checks}
+ failed_check_resources = {c.resource for c in report.failed_checks}
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
self.assertEqual(summary['skipped'], 0)
self.assertEqual(summary['parsing_errors'], 0)
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
if __name__ == '__main__':
unittest.main()
| CKV_AWS_33 false positive for deny statements
**Describe the issue**
CKV_AWS_33 provides false-positives for KMS KeyPolicy statements that have a wildcard principle with `Effect: "Deny"`
**Examples**
```yaml
TheKey:
Type: AWS::KMS::Key
Properties:
KeyPolicy:
Version: "2012-10-17"
Statement:
- Effect: "Deny"
Principal: "*"
...
```
**Version (please complete the following information):**
- Checkov Version [2.2.327]
| 2023-06-09T15:53:11 | -1.0 |
|
bridgecrewio/checkov | 5,222 | bridgecrewio__checkov-5222 | [
"5099"
] | 440ecaeb8b654b0bdd4faf9b34b4b5ada7e4660d | diff --git a/checkov/terraform/checks/resource/github/SecretsEncrypted.py b/checkov/terraform/checks/resource/github/SecretsEncrypted.py
--- a/checkov/terraform/checks/resource/github/SecretsEncrypted.py
+++ b/checkov/terraform/checks/resource/github/SecretsEncrypted.py
@@ -25,6 +25,10 @@ def scan_resource_conf(self, conf: Dict[str, List[Any]]) -> CheckResult:
if plaintext and self._is_variable_dependant(plaintext[0]):
return CheckResult.UNKNOWN
+ if isinstance(plaintext, list) and not plaintext[0]:
+ # this happens mainly in TF plan files, because the value is just an empty string
+ return CheckResult.PASSED
+
return super().scan_resource_conf(conf)
def get_inspected_key(self) -> str:
| diff --git a/tests/terraform/checks/resource/github/example_SecretsEncrypted/main.tf b/tests/terraform/checks/resource/github/example_SecretsEncrypted/main.tf
--- a/tests/terraform/checks/resource/github/example_SecretsEncrypted/main.tf
+++ b/tests/terraform/checks/resource/github/example_SecretsEncrypted/main.tf
@@ -40,6 +40,13 @@ resource "github_actions_secret" "pass" {
encrypted_value = "WOULDBEENCRYPTED"
}
+resource "github_actions_organization_secret" "pass_empty_value" {
+ environment = "example_environment"
+ secret_name = "example_secret_name"
+ encrypted_value = "WOULDBEENCRYPTED"
+ plaintext_value = ""
+}
+
# value ref
resource "azuread_service_principal_password" "gh_actions" {
diff --git a/tests/terraform/checks/resource/github/test_SecretsEncrypted.py b/tests/terraform/checks/resource/github/test_SecretsEncrypted.py
--- a/tests/terraform/checks/resource/github/test_SecretsEncrypted.py
+++ b/tests/terraform/checks/resource/github/test_SecretsEncrypted.py
@@ -20,6 +20,7 @@ def test(self):
passing_resources = {
"github_actions_environment_secret.pass",
"github_actions_organization_secret.pass",
+ "github_actions_organization_secret.pass_empty_value",
"github_actions_secret.pass",
}
failing_resources = {
@@ -36,7 +37,7 @@ def test(self):
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
# github_actions_secret.value_ref is dependent on azuread_service_principal_password.gh_actions
- self.assertEqual(summary["resource_count"], 8) # 2 extra
+ self.assertEqual(summary["resource_count"], 9) # 2 extra
self.assertEqual(passing_resources, passed_check_resources)
self.assertEqual(failing_resources, failed_check_resources)
| CKV_GIT_4 always fail with terraform_plan
**Describe the issue**
Once a GitHub org/repo secret is created and stored in the terraform state, the check CKV_GIT_4 will always fail when scanning terraform plans even if the value was created using an encrypted value.
It seems like the check consider `"plaintext_text": ""` to be a hard-coded secret because if I remove that line from the plan or change it to `"plaintext_text": null`, the check passes.
```
"resources": [
{
"address": "github_actions_organization_secret.my_github_secret",
"mode": "managed",
"type": "github_actions_organization_secret",
"name": "my_github_secret",
"provider_name": "registry.terraform.io/integrations/github",
"schema_version": 0,
"values": {
"created_at": "2023-05-17 13:54:59 +0000 UTC",
"encrypted_value": "MIr5c6eSzTJeGW/uyB0u...",
"id": "MY_GITHUB_SECRET",
"plaintext_value": "",
"secret_name": "MY_GITHUB_SECRET",
"selected_repository_ids": [],
"updated_at": "2023-05-17 13:54:59 +0000 UTC",
"visibility": "all"
},
"sensitive_values": {
"selected_repository_ids": []
}
}
```
**Examples**
**Version (please complete the following information):**
- Checkov Version 2.3.223
**Additional context**
Add any other context about the problem here.
| 2023-06-18T18:37:09 | -1.0 |
|
bridgecrewio/checkov | 5,247 | bridgecrewio__checkov-5247 | [
"5246"
] | e2aa231efeed33e1a9dff389f70a5dd652f0f5fc | diff --git a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
--- a/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
+++ b/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py
@@ -21,7 +21,7 @@ def get_inspected_key(self) -> str:
return "ca_cert_identifier"
def get_expected_values(self) -> List[Any]:
- return ["rds-ca-2019"]
+ return ["rds-ca-rsa2048-g1", "rds-ca-rsa4096-g1", "rds-ca-ecc384-g1"]
check = RDSCACertIsRecent()
| diff --git a/tests/terraform/checks/resource/aws/example_RDSCACertIsRecent/main.tf b/tests/terraform/checks/resource/aws/example_RDSCACertIsRecent/main.tf
--- a/tests/terraform/checks/resource/aws/example_RDSCACertIsRecent/main.tf
+++ b/tests/terraform/checks/resource/aws/example_RDSCACertIsRecent/main.tf
@@ -5,7 +5,7 @@ resource "aws_db_instance" "fail" {
engine = "mysql"
engine_version = "5.7"
instance_class = "db.t2.micro"
- name = "mydb"
+ db_name = "mydb"
username = "foo"
password = "foobarbaz"
iam_database_authentication_enabled = true
@@ -13,29 +13,25 @@ resource "aws_db_instance" "fail" {
ca_cert_identifier = "rds-ca-2015"
}
-resource "aws_db_instance" "pass" {
- allocated_storage = 20
- storage_type = "gp2"
- engine = "mysql"
- engine_version = "5.7"
- instance_class = "db.t2.micro"
- name = "mydb"
- username = "foo"
- password = "foobarbaz"
- iam_database_authentication_enabled = true
- storage_encrypted = true
- ca_cert_identifier = "rds-ca-2019"
+locals {
+ passing_ca_cert_identifiers = [
+ "rds-ca-rsa2048-g1",
+ "rds-ca-rsa4096-g1",
+ "rds-ca-ecc384-g1",
+ ]
}
-resource "aws_db_instance" "pass2" {
+resource "aws_db_instance" "pass" {
+ for_each = local.passing_ca_cert_identifiers
allocated_storage = 20
storage_type = "gp2"
engine = "mysql"
engine_version = "5.7"
instance_class = "db.t2.micro"
- name = "mydb"
+ db_name = "mydb"
username = "foo"
password = "foobarbaz"
iam_database_authentication_enabled = true
storage_encrypted = true
+ ca_cert_identifier = each.key
}
diff --git a/tests/terraform/checks/resource/aws/test_RDSCACertIsRecent.py b/tests/terraform/checks/resource/aws/test_RDSCACertIsRecent.py
--- a/tests/terraform/checks/resource/aws/test_RDSCACertIsRecent.py
+++ b/tests/terraform/checks/resource/aws/test_RDSCACertIsRecent.py
@@ -14,8 +14,9 @@ def test(self):
summary = report.get_summary()
passing_resources = {
- "aws_db_instance.pass",
- "aws_db_instance.pass2",
+ "aws_db_instance.pass[\"rds-ca-rsa2048-g1\"]",
+ "aws_db_instance.pass[\"rds-ca-rsa4096-g1\"]",
+ "aws_db_instance.pass[\"rds-ca-ecc384-g1\"]",
}
failing_resources = {
"aws_db_instance.fail",
@@ -24,7 +25,7 @@ def test(self):
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
- self.assertEqual(summary["passed"], 2)
+ self.assertEqual(summary["passed"], 3)
self.assertEqual(summary["failed"], 1)
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
| Missing AWS RDS CA in CKV_AWS_211
**Describe the issue**
In check CKV_AWS_211, checkov currently only checks for one possible CA on AWS RDS instances, namely `rds-ca-2019` (see [associated code](https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/aws/RDSCACertIsRecent.py#L24)) whereas RDS supports several (see [AWS docs](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.SSL.html#UsingWithRDS.SSL.RegionCertificateAuthorities)). The check should accept those CAs: `rds-ca-rsa2048-g1`, `rds-ca-rsa4096-g1` and `rds-ca-ecc384-g1`.
**Examples**
Terraform code on which the check should pass:
```terraform
resource "aws_db_instance" "pass3" {
allocated_storage = 20
storage_type = "gp2"
engine = "mysql"
engine_version = "5.7"
instance_class = "db.t2.micro"
db_name = "mydb"
username = "foo"
password = "foobarbaz"
iam_database_authentication_enabled = true
storage_encrypted = true
ca_cert_identifier = "rds-ca-rsa2048-g1"
}
```
When I run checkov on this Terraform example, I get an error whereas the test should pass:
```
Check: CKV_AWS_211: "Ensure RDS uses a modern CaCert"
FAILED for resource: aws_db_instance.pass3
File: /main.tf:43-55
Guide: https://docs.paloaltonetworks.com/content/techdocs/en_US/prisma/prisma-cloud/prisma-cloud-code-security-policy-reference/aws-policies/aws-general-policies/ensure-aws-rds-uses-a-modern-cacert.html
43 | resource "aws_db_instance" "pass3" {
44 | allocated_storage = 20
45 | storage_type = "gp2"
46 | engine = "mysql"
47 | engine_version = "5.7"
48 | instance_class = "db.t2.micro"
49 | db_name = "mydb"
50 | username = "foo"
51 | password = "foobarbaz"
52 | iam_database_authentication_enabled = true
53 | storage_encrypted = true
54 | ca_cert_identifier = "rds-ca-rsa2048-g1"
55 | }
```
**Version (please complete the following information):**
- Checkov Version 2.0.930
| 2023-06-22T18:10:09 | -1.0 |
|
bridgecrewio/checkov | 5,254 | bridgecrewio__checkov-5254 | [
"5252"
] | e2aa231efeed33e1a9dff389f70a5dd652f0f5fc | diff --git a/checkov/arm/base_resource_check.py b/checkov/arm/base_resource_check.py
--- a/checkov/arm/base_resource_check.py
+++ b/checkov/arm/base_resource_check.py
@@ -45,7 +45,12 @@ def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> CheckResul
self.api_version = conf["api_version"]
conf["config"]["apiVersion"] = conf["api_version"] # set for better reusability of existing ARM checks
- return self.scan_resource_conf(conf["config"], entity_type) # type:ignore[no-any-return] # issue with multi_signature annotation
+ resource_conf = conf["config"]
+ if "loop_type" in resource_conf:
+ # this means the whole resource block is surrounded by a for loop
+ resource_conf = resource_conf["config"]
+
+ return self.scan_resource_conf(resource_conf, entity_type) # type:ignore[no-any-return] # issue with multi_signature annotation
self.api_version = None
diff --git a/checkov/arm/checks/resource/AzureManagedDiscEncryption.py b/checkov/arm/checks/resource/AzureManagedDiscEncryption.py
--- a/checkov/arm/checks/resource/AzureManagedDiscEncryption.py
+++ b/checkov/arm/checks/resource/AzureManagedDiscEncryption.py
@@ -4,6 +4,7 @@
from checkov.common.models.enums import CheckResult, CheckCategories
from checkov.arm.base_resource_check import BaseResourceCheck
+from checkov.common.util.data_structures_utils import find_in_dict
class AzureManagedDiscEncryption(BaseResourceCheck):
@@ -15,15 +16,21 @@ def __init__(self) -> None:
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:
- if "properties" in conf:
- if "encryptionSettingsCollection" in conf["properties"]:
- if "enabled" in conf["properties"]["encryptionSettingsCollection"]:
- if str(conf["properties"]["encryptionSettingsCollection"]["enabled"]).lower() == "true":
- return CheckResult.PASSED
- elif "encryptionSettings" in conf["properties"]:
- if "enabled" in conf["properties"]["encryptionSettings"]:
- if str(conf["properties"]["encryptionSettings"]["enabled"]).lower() == "true":
- return CheckResult.PASSED
+ properties = conf.get("properties")
+ if properties:
+ encryption = properties.get("encryption")
+ if encryption:
+ # if the block exists, then it is enabled
+ return CheckResult.PASSED
+
+ encryption_enabled = find_in_dict(input_dict=properties, key_path="encryptionSettingsCollection/enabled")
+ if str(encryption_enabled).lower() == "true":
+ return CheckResult.PASSED
+
+ encryption_enabled = find_in_dict(input_dict=properties, key_path="encryptionSettings/enabled")
+ if str(encryption_enabled).lower() == "true":
+ return CheckResult.PASSED
+
return CheckResult.FAILED
| diff --git a/tests/arm/checks/resource/example_AzureManagedDiscEncryption/azureManagedDiscEncryption-PASSED_3.json b/tests/arm/checks/resource/example_AzureManagedDiscEncryption/azureManagedDiscEncryption-PASSED_3.json
new file mode 100644
--- /dev/null
+++ b/tests/arm/checks/resource/example_AzureManagedDiscEncryption/azureManagedDiscEncryption-PASSED_3.json
@@ -0,0 +1,76 @@
+{
+ "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#",
+ "contentVersion": "1.0.0.0",
+ "parameters": {
+ "vhdUri": {
+ "type": "string",
+ "metadata": {
+ "description": "Storage VHD Uri"
+ }
+ },
+ "managedDiskName": {
+ "type": "string",
+ "metadata": {
+ "description": "Name of the managed disk to be copied"
+ }
+ },
+ "keyVaultResourceID": {
+ "type": "string",
+ "metadata": {
+ "description": "KeyVault resource id. Ex: /subscriptions/subscriptionid/resourceGroups/contosorg/providers/Microsoft.KeyVault/vaults/contosovault"
+ }
+ },
+ "keyVaultSecretUrl": {
+ "type": "string",
+ "metadata": {
+ "description": "KeyVault secret Url. Ex: https://contosovault.vault.azure.net/secrets/contososecret/e088818e865e48488cf363af16dea596"
+ }
+ },
+ "kekUrl": {
+ "type": "string",
+ "defaultValue": "",
+ "metadata": {
+ "description": "KeyVault key encryption key Url. Ex: https://contosovault.vault.azure.net/keys/contosokek/562a4bb76b524a1493a6afe8e536ee78"
+ }
+ },
+ "kekVaultResourceID": {
+ "type": "string",
+ "defaultValue": "",
+ "metadata": {
+ "description": "KekVault resource id. Ex: /subscriptions/subscriptionid/resourceGroups/contosorg/providers/Microsoft.KeyVault/vaults/contosovault"
+ }
+ },
+ "location": {
+ "type": "string",
+ "defaultValue": "[resourceGroup().location]",
+ "metadata": {
+ "description": "Location for all resources."
+ }
+ }
+ },
+ "variables": {
+ "location": "[parameters('location')]",
+ "storageAccountType": "Standard_LRS",
+ "diskSzie": "128"
+ },
+ "resources": [
+ {
+ "apiVersion": "2021-12-01",
+ "type": "Microsoft.Compute/disks",
+ "name": "encryptionBlock",
+ "location": "[variables('location')]",
+ "properties": {
+ "creationData": {
+ "createOption": "Import",
+ "sourceUri": "[parameters('vhdUri')]"
+ },
+ "accountType": "[variables('storageAccountType')]",
+ "diskSizeGB": "[variables('diskSzie')]",
+ "encryption": {
+ "diskEncryptionSetId": "exampleSetId",
+ "type": "EncryptionAtRestWithCustomerKey"
+ }
+ }
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tests/arm/checks/resource/test_AzureManagedDiscEncryption.py b/tests/arm/checks/resource/test_AzureManagedDiscEncryption.py
--- a/tests/arm/checks/resource/test_AzureManagedDiscEncryption.py
+++ b/tests/arm/checks/resource/test_AzureManagedDiscEncryption.py
@@ -20,6 +20,7 @@ def test_summary(self):
passing_resources = {
"Microsoft.Compute/disks.enabled",
"Microsoft.Compute/disks.collectionEnabled",
+ "Microsoft.Compute/disks.encryptionBlock",
}
failing_resources = {
@@ -29,8 +30,8 @@ def test_summary(self):
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
- self.assertEqual(summary["passed"], 2)
- self.assertEqual(summary["failed"], 1)
+ self.assertEqual(summary["passed"], len(passing_resources))
+ self.assertEqual(summary["failed"], len(failing_resources))
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
diff --git a/tests/bicep/examples/loop.bicep b/tests/bicep/examples/loop.bicep
new file mode 100644
--- /dev/null
+++ b/tests/bicep/examples/loop.bicep
@@ -0,0 +1,21 @@
+resource Disks 'Microsoft.Compute/disks@2022-07-02' = [for (disk, i) in dataDisks: {
+ name: disk.diskName
+ location: location
+ tags: tags
+ sku: {
+ name: disk.storageAccountType
+ }
+ zones: [
+ avZone
+ ]
+ properties: {
+ creationData: {
+ createOption: 'Empty'
+ }
+ diskSizeGB: disk.diskSizeGB
+ encryption: {
+ type: 'EncryptionAtRestWithCustomerKey'
+ diskEncryptionSetId: diskEncryptionSetId
+ }
+ }
+}]
diff --git a/tests/bicep/test_graph_manager.py b/tests/bicep/test_graph_manager.py
--- a/tests/bicep/test_graph_manager.py
+++ b/tests/bicep/test_graph_manager.py
@@ -13,16 +13,17 @@ def test_build_graph_from_source_directory():
existing_file = EXAMPLES_DIR / "existing.bicep"
playground_file = EXAMPLES_DIR / "playground.bicep"
graph_file = EXAMPLES_DIR / "graph.bicep"
+ loop_file = EXAMPLES_DIR / "loop.bicep"
graph_manager = BicepGraphManager(db_connector=NetworkxConnector())
# when
local_graph, definitions = graph_manager.build_graph_from_source_directory(source_dir=str(EXAMPLES_DIR))
# then
- assert set(definitions.keys()) == {existing_file, playground_file, graph_file} # should no include 'malformed.bicep' file
+ assert set(definitions.keys()) == {existing_file, playground_file, graph_file, loop_file} # should not include 'malformed.bicep' file
- assert len(local_graph.vertices) == 46
- assert len(local_graph.edges) == 41
+ assert len(local_graph.vertices) == 48
+ assert len(local_graph.edges) == 42
storage_account_idx = local_graph.vertices_by_name["diagsAccount"] # vertices_by_name exists for BicepGraphManager
storage_account = local_graph.vertices[storage_account_idx]
diff --git a/tests/bicep/test_runner.py b/tests/bicep/test_runner.py
--- a/tests/bicep/test_runner.py
+++ b/tests/bicep/test_runner.py
@@ -5,7 +5,6 @@
from checkov.bicep.runner import Runner
from checkov.arm.runner import Runner as ArmRunner
from checkov.common.bridgecrew.check_type import CheckType
-from checkov.common.bridgecrew.code_categories import CodeCategoryConfiguration
from checkov.common.bridgecrew.severities import Severities, BcSeverities
from checkov.common.graph.db_connectors.igraph.igraph_db_connector import IgraphConnector
from checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector
@@ -200,3 +199,18 @@ def test_runner_extra_resources(graph_connector):
assert extra_resource.file_abs_path == str(test_file)
assert extra_resource.file_path.endswith("playground.bicep")
+
+def test_runner_loop_resource():
+ # given
+ test_file = EXAMPLES_DIR / "loop.bicep"
+
+ # when
+ report = Runner().run(root_folder="", files=[str(test_file)], runner_filter=RunnerFilter(checks=["CKV_AZURE_2"]))
+
+ # then
+ summary = report.get_summary()
+
+ assert summary["passed"] == 1
+ assert summary["failed"] == 0
+ assert summary["skipped"] == 0
+ assert summary["parsing_errors"] == 0
| Checkov Managed Disk Encryption check in Bicep IaC failing
**Describe the issue**
Checkov Managed Disk Encryption check will fail despite having the required check in Bicep code. It will only be successful if both checks are in the code, but need to be hashed out.
**Examples**
```
resource Disks 'Microsoft.Compute/disks@2022-07-02' = [for (disk, i) in dataDisks: {
name: disk.diskName
location: location
tags: tags
sku: {
name: disk.storageAccountType
}
zones: [
avZone
]
properties: {
creationData: {
createOption: 'Empty'
}
diskSizeGB: disk.diskSizeGB
// encryption: {
// type: 'EncryptionAtRestWithCustomerKey'
// diskEncryptionSetId: diskEncryptionSetId
// }
encryption: {
type: 'EncryptionAtRestWithCustomerKey'
diskEncryptionSetId: diskEncryptionSetId
}
// encryptionSettingsCollection: {
// enabled: true
// encryptionSettings: [
// {
// diskEncryptionKey: {
// secretUrl: keyURL
// sourceVault: {
// id: keyVaultId
// }
// }
// }
// ]
// }
}
}]
```
**Version :**
- Latest
**Additional context**
Even if I remove the commented out sections, the check will fail. If I have the "encryptionSettingsCollection" block, the check will fail. It will only work if it is formatted like the above.
| hey @mmassey1993 thanks for reaching out.
The current check logic doesn't consider for the `encryption` property, which probably didn't exist in the past. Will adjust it 🙂 | 2023-06-24T18:01:11 | -1.0 |
bridgecrewio/checkov | 5,260 | bridgecrewio__checkov-5260 | [
"5258"
] | 9c11ff89ead7c1adfce25a1f775209df52e6501c | diff --git a/checkov/common/util/secrets.py b/checkov/common/util/secrets.py
--- a/checkov/common/util/secrets.py
+++ b/checkov/common/util/secrets.py
@@ -4,8 +4,6 @@
import json
import logging
import re
-
-# secret categories for use as constants
from typing import Any, TYPE_CHECKING
from checkov.common.models.enums import CheckCategories, CheckResult
@@ -17,7 +15,7 @@
from checkov.common.typing import _CheckResult, ResourceAttributesToOmit
from pycep.typing import ParameterAttributes, ResourceAttributes
-
+# secret categories for use as constants
AWS = 'aws'
AZURE = 'azure'
GCP = 'gcp'
@@ -163,6 +161,10 @@ def omit_secret_value_from_checks(
if key not in resource_masks:
continue
if isinstance(secret, list) and secret:
+ if not isinstance(secret[0], str):
+ logging.debug(f"Secret value can't be masked, has type {type(secret)}")
+ continue
+
secrets.add(secret[0])
if not secrets:
@@ -207,6 +209,10 @@ def omit_secret_value_from_graph_checks(
for attribute, secret in entity_config.items():
if attribute in resource_masks:
if isinstance(secret, list) and secret:
+ if not isinstance(secret[0], str):
+ logging.debug(f"Secret value can't be masked, has type {type(secret)}")
+ continue
+
secrets.add(secret[0])
if not secrets:
| diff --git a/tests/common/utils/test_secrets_utils.py b/tests/common/utils/test_secrets_utils.py
--- a/tests/common/utils/test_secrets_utils.py
+++ b/tests/common/utils/test_secrets_utils.py
@@ -114,6 +114,70 @@ def test_omit_secret_value_from_graph_checks_by_attribute(
assert result == tfplan_resource_lines_without_secrets
+def test_omit_secret_value_from_graph_checks_by_attribute_skip_non_string():
+ # given
+ check = BaseGraphCheck()
+ check.resource_types = ['aws_ssm_parameter']
+ check_result = {'result': CheckResult.FAILED}
+ entity_code_lines = [
+ (22, 'resource "aws_ssm_parameter" "aws_ssm_parameter_foo" {\n'),
+ (23, ' name = "foo"\n'),
+ (24, ' description = "Parameter foo"\n'),
+ (25, ' type = "String"\n'),
+ (26, ' tier = "Advanced"\n'),
+ (27, " value = jsonencode({\n"),
+ (28, ' "foo" : {\n'),
+ (29, ' "hello" : "world",\n'),
+ (30, ' "answer " : 42\n'),
+ (31, " }\n"),
+ (32, " })\n"),
+ (33, "}\n"),
+ ]
+ entity_config = {
+ "__address__": "aws_ssm_parameter.aws_ssm_parameter_foo",
+ "__end_line__": 33,
+ "__start_line__": 22,
+ "description": ["Parameter foo"],
+ "name": ["foo"],
+ "tier": ["Advanced"],
+ "type": ["String"],
+ "value": [
+ {
+ "foo": {
+ "answer ": 42,
+ "hello": "world",
+ }
+ }
+ ],
+ }
+ resource_attributes_to_omit = {'aws_ssm_parameter': {'value'}}
+
+ # when
+ result = omit_secret_value_from_graph_checks(
+ check=check,
+ check_result=check_result,
+ entity_code_lines=entity_code_lines,
+ entity_config=entity_config,
+ resource_attributes_to_omit=resource_attributes_to_omit
+ )
+
+ # then
+ assert result == [
+ (22, 'resource "aws_ssm_parameter" "aws_ssm_parameter_foo" {\n'),
+ (23, ' name = "foo"\n'),
+ (24, ' description = "Parameter foo"\n'),
+ (25, ' type = "String"\n'),
+ (26, ' tier = "Advanced"\n'),
+ (27, " value = jsonencode({\n"),
+ (28, ' "foo" : {\n'),
+ (29, ' "hello" : "world",\n'),
+ (30, ' "answer " : 42\n'),
+ (31, " }\n"),
+ (32, " })\n"),
+ (33, "}\n"),
+ ]
+
+
def test_omit_secret_value_from_checks_by_attribute_runner_filter_resource_config(
tfplan_resource_lines_with_secrets,
tfplan_resource_config_with_secrets,
| Secrets check crashes when it encounters a dict
**Describe the issue**
Checkov scan is throwing error because of a dictionary structure in target files
**Examples**
In a Terraform file, add the following resource:
```
resource "aws_ssm_parameter" "aws_ssm_parameter_foo" {
name = "foo"
description = "Parameter foo"
type = "String"
tier = "Advanced"
value = jsonencode({
"foo" : {
"hello" : "world",
"answer " : 42
}
})
}
```
**Exception Trace**
```
File "/Python311/Lib/concurrent/futures/_base.py", line 401, in __get_result
raise self._exception
File "/Python311/Lib/concurrent/futures/thread.py", line 58, in run
result = self.fn(*self.args, **self.kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Python311/site-packages/checkov/common/runners/runner_registry.py", line 125, in _parallel_run
report = runner.run(
^^^^^^^^^^^
File "/Python311/site-packages/checkov/terraform/runner.py", line 169, in run
graph_report = self.get_graph_checks_report(root_folder, runner_filter)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Python311/site-packages/checkov/terraform/runner.py", line 256, in get_graph_checks_report
censored_code_lines = omit_secret_value_from_graph_checks(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/Python311/site-packages/checkov/common/util/secrets.py", line 213, in omit_secret_value_from_graph_checks
secrets.add(secret[0])
TypeError: unhashable type: 'dict'
```
**Desktop (please complete the following information):**
- OS: Windows 10 Pro for Workstation
- Checkov Version 2.3.301
**Additional context**
None
| hey @SKisContent thanks for reaching out.
I think you are using the `--mask` flag in `checkov` right? I will fix the crash, but it won't mask the value, just ignore it.
Thanks for looking into it! AFAIK, I am not using the `--mask` option. My flags are `--quiet` and `--compact`.
Interesting, can't reproduce it by just running `checkov`. Needed to use the `--mask` flag 😅 | 2023-06-26T19:51:10 | -1.0 |
bridgecrewio/checkov | 5,275 | bridgecrewio__checkov-5275 | [
"5270"
] | b377e91237fe578ceb521d93051be5975f02fd5b | diff --git a/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py b/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/gcp/GKELegacyInstanceMetadataDisabled.py
+++ /dev/null
@@ -1,36 +0,0 @@
-from checkov.common.models.enums import CheckResult, CheckCategories
-from checkov.common.util.type_forcers import force_float
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-
-
-class GKELegacyInstanceMetadataDisabled(BaseResourceValueCheck):
-
- def __init__(self):
- name = "Ensure legacy Compute Engine instance metadata APIs are Disabled"
- id = "CKV_GCP_67"
- supported_resources = ['google_container_cluster']
- categories = [CheckCategories.KUBERNETES]
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def scan_resource_conf(self, conf):
- """
- looks for min_master_version =1.12 which ensures that legacy metadata endpoints are disabled
- https://www.terraform.io/docs/providers/google/r/compute_ssl_policy.html
- :param conf: google_container_cluster configuration
- :return: <CheckResult>
- """
- if 'min_master_version' in conf:
- min_master_version = force_float(conf.get('min_master_version')[0])
- if min_master_version and min_master_version >= 1.12:
- return CheckResult.PASSED
-
- return CheckResult.FAILED
-
- def get_inspected_key(self):
- return 'min_master_version'
-
- def get_expected_value(self):
- return "1.12"
-
-
-check = GKELegacyInstanceMetadataDisabled()
| diff --git a/tests/terraform/checks/resource/gcp/test_GKELegacyInstanceMetadataDisabled.py b/tests/terraform/checks/resource/gcp/test_GKELegacyInstanceMetadataDisabled.py
deleted file mode 100644
--- a/tests/terraform/checks/resource/gcp/test_GKELegacyInstanceMetadataDisabled.py
+++ /dev/null
@@ -1,41 +0,0 @@
-import unittest
-import os
-
-from checkov.terraform.checks.resource.gcp.GKELegacyInstanceMetadataDisabled import check
-from checkov.runner_filter import RunnerFilter
-from checkov.terraform.runner import Runner
-
-
-class TestGKELegacyInstanceMetadataDisabled(unittest.TestCase):
-
- def test(self):
- runner = Runner()
- current_dir = os.path.dirname(os.path.realpath(__file__))
-
- test_files_dir = current_dir + "/test_GKELegacyInstanceMetadataDisabled"
- report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
- summary = report.get_summary()
-
- passing_resources = {
- 'google_container_cluster.success1',
- 'google_container_cluster.success2',
- }
- failing_resources = {
- 'google_container_cluster.fail1',
- 'google_container_cluster.fail2'
- }
-
- passed_check_resources = set([c.resource for c in report.passed_checks])
- failed_check_resources = set([c.resource for c in report.failed_checks])
-
- self.assertEqual(summary['passed'], 2)
- self.assertEqual(summary['failed'], 2)
- self.assertEqual(summary['skipped'], 0)
- self.assertEqual(summary['parsing_errors'], 0)
-
- self.assertEqual(passing_resources, passed_check_resources)
- self.assertEqual(failing_resources, failed_check_resources)
-
-
-if __name__ == '__main__':
- unittest.main()
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/gcp/test_GKELegacyInstanceMetadataDisabled/main.tf b/tests/terraform/checks/resource/gcp/test_GKELegacyInstanceMetadataDisabled/main.tf
deleted file mode 100644
--- a/tests/terraform/checks/resource/gcp/test_GKELegacyInstanceMetadataDisabled/main.tf
+++ /dev/null
@@ -1,91 +0,0 @@
-
-resource "google_container_cluster" "fail1" {
- name = var.name
- location = var.location
- initial_node_count = 1
- project = data.google_project.project.name
-
- network = var.network
- subnetwork = var.subnetwork
-
- ip_allocation_policy {
- cluster_ipv4_cidr_block = var.ip_allocation_policy["cluster_ipv4_cidr_block"]
- cluster_secondary_range_name = var.ip_allocation_policy["cluster_secondary_range_name"]
- services_ipv4_cidr_block = var.ip_allocation_policy["services_ipv4_cidr_block"]
- services_secondary_range_name = var.ip_allocation_policy["services_secondary_range_name"]
- }
-}
-
-resource "google_container_cluster" "fail2" {
- name = var.name
- location = var.location
- initial_node_count = 1
- project = data.google_project.project.name
-
- network = var.network
- subnetwork = var.subnetwork
- min_master_version = "1.11"
-
- ip_allocation_policy {
- cluster_ipv4_cidr_block = var.ip_allocation_policy["cluster_ipv4_cidr_block"]
- cluster_secondary_range_name = var.ip_allocation_policy["cluster_secondary_range_name"]
- services_ipv4_cidr_block = var.ip_allocation_policy["services_ipv4_cidr_block"]
- services_secondary_range_name = var.ip_allocation_policy["services_secondary_range_name"]
- }
-
- node_config {
- workload_metadata_config {
- node_metadata = "GKE_METADATA_SERVER"
- }
- }
-}
-
-
-resource "google_container_cluster" "success1" {
- name = var.name
- location = var.location
- initial_node_count = 1
- project = data.google_project.project.name
-
- network = var.network
- subnetwork = var.subnetwork
- min_master_version = 1.12
-
- ip_allocation_policy {
- cluster_ipv4_cidr_block = var.ip_allocation_policy["cluster_ipv4_cidr_block"]
- cluster_secondary_range_name = var.ip_allocation_policy["cluster_secondary_range_name"]
- services_ipv4_cidr_block = var.ip_allocation_policy["services_ipv4_cidr_block"]
- services_secondary_range_name = var.ip_allocation_policy["services_secondary_range_name"]
- }
-
- node_config {
- workload_metadata_config {
- node_metadata = "GKE_METADATA_SERVER"
- }
- }
-}
-
-
-resource "google_container_cluster" "success2" {
- name = var.name
- location = var.location
- initial_node_count = 1
- project = data.google_project.project.name
-
- network = var.network
- subnetwork = var.subnetwork
- min_master_version = 1.13
-
- ip_allocation_policy {
- cluster_ipv4_cidr_block = var.ip_allocation_policy["cluster_ipv4_cidr_block"]
- cluster_secondary_range_name = var.ip_allocation_policy["cluster_secondary_range_name"]
- services_ipv4_cidr_block = var.ip_allocation_policy["services_ipv4_cidr_block"]
- services_secondary_range_name = var.ip_allocation_policy["services_secondary_range_name"]
- }
-
- node_config {
- workload_metadata_config {
- node_metadata = "GKE_METADATA_SERVER"
- }
- }
-}
diff --git a/tests/terraform/runner/test_runner.py b/tests/terraform/runner/test_runner.py
--- a/tests/terraform/runner/test_runner.py
+++ b/tests/terraform/runner/test_runner.py
@@ -342,6 +342,9 @@ def test_no_missing_ids(self):
if f'CKV_GCP_{i}' == 'CKV_GCP_5':
# CKV_GCP_5 is no longer a valid platform check
continue
+ if f'CKV_GCP_{i}' == 'CKV_GCP_67':
+ # CKV_GCP_67 is not deployable anymore https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster#protect_node_metadata
+ continue
self.assertIn(f'CKV_GCP_{i}', gcp_checks, msg=f'The new GCP violation should have the ID "CKV_GCP_{i}"')
| Deprecate CKV_GCP_67
**Describe the issue**
CKV_GCP_67: https://docs.bridgecrew.io/docs/ensure-legacy-compute-engine-instance-metadata-apis-are-disabled
https://cloud.google.com/kubernetes-engine/docs/how-to/hardening-your-cluster#protect_node_metadata
As per this article: `The v0.1 and v1beta1 Compute Engine metadata server endpoints were deprecated and shutdown on September 30, 2020.`
| hey @fleroux514 thanks for reaching out.
Seems like it is time to retire the check 😄 | 2023-06-30T18:36:35 | -1.0 |
bridgecrewio/checkov | 5,301 | bridgecrewio__checkov-5301 | [
"5297"
] | 88e36ebd6a31487bd7ff8ec05ba866c20cc72f79 | diff --git a/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py b/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py
--- a/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py
+++ b/checkov/terraform/checks/resource/aws/SecretManagerSecret90days.py
@@ -1,23 +1,27 @@
+from __future__ import annotations
+from typing import Any
+
+from checkov.common.util.type_forcers import force_int
from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
from checkov.common.models.enums import CheckCategories, CheckResult
class SecretManagerSecret90days(BaseResourceCheck):
-
- def __init__(self):
+ def __init__(self) -> None:
name = "Ensure Secrets Manager secrets should be rotated within 90 days"
id = "CKV_AWS_304"
- supported_resources = ["aws_secretsmanager_secret_rotation"]
- categories = [CheckCategories.GENERAL_SECURITY]
+ supported_resources = ("aws_secretsmanager_secret_rotation",)
+ categories = (CheckCategories.GENERAL_SECURITY,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def scan_resource_conf(self, conf) -> CheckResult:
- if conf.get("rotation_rules") and isinstance(conf.get("rotation_rules"), list):
- rule = conf.get("rotation_rules")[0]
- if rule.get('automatically_after_days') and isinstance(rule.get('automatically_after_days'), list):
- days = rule.get('automatically_after_days')[0]
- if days < 90:
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ rules = conf.get("rotation_rules")
+ if rules and isinstance(rules, list):
+ days = rules[0].get('automatically_after_days')
+ if days and isinstance(days, list):
+ days = force_int(days[0])
+ if days is not None and days < 90:
return CheckResult.PASSED
return CheckResult.FAILED
| diff --git a/tests/terraform/checks/resource/aws/example_SecretManagerSecret90days/main.tf b/tests/terraform/checks/resource/aws/example_SecretManagerSecret90days/main.tf
--- a/tests/terraform/checks/resource/aws/example_SecretManagerSecret90days/main.tf
+++ b/tests/terraform/checks/resource/aws/example_SecretManagerSecret90days/main.tf
@@ -14,4 +14,13 @@ resource "aws_secretsmanager_secret_rotation" "fail" {
rotation_rules {
automatically_after_days = 90
}
-}
\ No newline at end of file
+}
+
+resource "aws_secretsmanager_secret_rotation" "fail_2" {
+ secret_id = aws_secretsmanager_secret.example.id
+ rotation_lambda_arn = aws_lambda_function.example.arn
+
+ rotation_rules {
+ automatically_after_days = var.days
+ }
+}
diff --git a/tests/terraform/checks/resource/aws/test_SecretManagerSecret90days.py b/tests/terraform/checks/resource/aws/test_SecretManagerSecret90days.py
--- a/tests/terraform/checks/resource/aws/test_SecretManagerSecret90days.py
+++ b/tests/terraform/checks/resource/aws/test_SecretManagerSecret90days.py
@@ -20,10 +20,11 @@ def test(self):
}
failing_resources = {
"aws_secretsmanager_secret_rotation.fail",
+ "aws_secretsmanager_secret_rotation.fail_2",
}
- passed_check_resources = set([c.resource for c in report.passed_checks])
- failed_check_resources = set([c.resource for c in report.failed_checks])
+ passed_check_resources = {c.resource for c in report.passed_checks}
+ failed_check_resources = {c.resource for c in report.failed_checks}
self.assertEqual(summary["passed"], len(passing_resources))
self.assertEqual(summary["failed"], len(failing_resources))
| TypeError in SecretManagerSecret90days
**Describe the issue**
While running a scan on TF code, I'm getting a TypeError
**Examples**
The relevant TF code is:
```
resource "aws_secretsmanager_secret_rotation" "rds_password_rotation" {
secret_id = aws_secretsmanager_secret.credentials.id
rotation_lambda_arn = "arn:..."
rotation_rules {
automatically_after_days = var.db_password_rotation_days
}
}
variable "db_password_rotation_days" {
description = "Number of days in which the RDS password will be rotated"
type = number
}
```
**Exception Trace**
```
Failed to run check CKV_AWS_304 on rds.tf:aws_secretsmanager_secret_rotation.rds_password_rotation
Traceback (most recent call last):
File "\venv\Lib\site-packages\checkov\common\checks\base_check.py", line 73, in run
check_result["result"] = self.scan_entity_conf(entity_configuration, entity_type)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "\venv\Lib\site-packages\checkov\terraform\checks\resource\base_resource_check.py", line 43, in scan_entity_conf
return self.scan_resource_conf(conf)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "\venv\Lib\site-packages\checkov\terraform\checks\resource\aws\SecretManagerSecret90days.py", line 20, in scan_resource_conf
if days < 90:
^^^^^^^^^
TypeError: '<' not supported between instances of 'str' and 'int'
```
**Desktop (please complete the following information):**
- OS: Windows 10 for Workstation
- Checkov Version 2.3.301
**Additional context**
I inspected the value of date at the line causing the error and it is the string `var.db_password_rotation_days`.
| 2023-07-06T19:15:57 | -1.0 |
|
bridgecrewio/checkov | 5,302 | bridgecrewio__checkov-5302 | [
"5300"
] | f9967cad22c794e98216efa38b10f00ea35afb77 | diff --git a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
--- a/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
+++ b/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py
@@ -17,6 +17,16 @@ def __init__(self) -> None:
block_type=BlockType.DOCUMENT)
def scan_entity_conf(self, conf: dict[str, Any], entity_type: str) -> tuple[CheckResult, dict[str, Any]]: # type:ignore[override] # return type is different than the base class
+ schemes = conf.get("schemes")
+ if schemes and isinstance(schemes, list):
+ if "http" not in schemes and "wp" not in schemes:
+ return CheckResult.PASSED, conf
+
+ servers = conf.get("servers")
+ if servers and isinstance(servers, list):
+ if not any(server['url'].startswith('http://') for server in servers):
+ return CheckResult.PASSED, conf
+
components = conf.get("components")
security_def = conf.get("securityDefinitions")
if components and isinstance(components, dict):
| diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail3.json b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail3.json
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail3.json
@@ -0,0 +1,42 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "title": "Simple API overview",
+ "version": "1.0.0"
+ },
+ "schemes": [
+ "https",
+ "http"
+ ],
+ "paths": {
+ "/pets": {
+ "post": {
+ "description": "Creates a new pet in the store",
+ "responses": {
+ "200": {
+ "description": "200 response"
+ }
+ },
+ "operationId": "addPet",
+ "security": [
+ {
+ "apiKey1": [],
+ "apiKey3": []
+ }
+ ]
+ }
+ }
+ },
+ "securityDefinitions": {
+ "apiKey1": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "header"
+ },
+ "apiKey3": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "query"
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail3.yaml b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail3.yaml
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail3.yaml
@@ -0,0 +1,27 @@
+swagger: "2.0"
+info:
+ title: Simple API overview
+ version: 1.0.0
+schemes:
+ - https
+ - http
+paths:
+ /pets:
+ post:
+ description: Creates a new pet in the store
+ responses:
+ "200":
+ description: 200 response
+ operationId: addPet
+ security:
+ - apiKey1: []
+ apiKey3: []
+securityDefinitions:
+ apiKey1:
+ type: apiKey
+ name: X-API-Key
+ in: header
+ apiKey3:
+ type: apiKey
+ name: X-API-Key
+ in: query
diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail4.json b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail4.json
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail4.json
@@ -0,0 +1,56 @@
+{
+ "openapi": "3.0.0",
+ "info": {
+ "title": "Simple API overview",
+ "version": "1.0.0"
+ },
+ "servers": [
+ {
+ "url": "https://localhost:8000",
+ "description": "Local server"
+ },
+ {
+ "url": "http://example.com",
+ "description": "Example"
+ }
+ ],
+ "paths": {
+ "/pets": {
+ "post": {
+ "description": "Creates a new pet in the store",
+ "responses": {
+ "200": {
+ "description": "200 response"
+ }
+ },
+ "operationId": "addPet",
+ "security": [
+ {
+ "apiKey1": [],
+ "apiKey2": [],
+ "apiKey3": []
+ }
+ ]
+ }
+ }
+ },
+ "components": {
+ "securitySchemes": {
+ "apiKey1": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "header"
+ },
+ "apiKey2": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "cookie"
+ },
+ "apiKey3": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "query"
+ }
+ }
+ }
+}
diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail4.yaml b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail4.yaml
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail4.yaml
@@ -0,0 +1,35 @@
+openapi: 3.0.0
+info:
+ title: Simple API overview
+ version: 1.0.0
+servers:
+ - url: https://localhost:8000
+ description: Local server
+ - url: http://example.com
+ description: example
+paths:
+ /pets:
+ post:
+ description: Creates a new pet in the store
+ responses:
+ '200':
+ description: 200 response
+ operationId: addPet
+ security:
+ - apiKey1: []
+ apiKey2: []
+ apiKey3: []
+components:
+ securitySchemes:
+ apiKey1:
+ type: apiKey
+ name: X-API-Key
+ in: header
+ apiKey2:
+ type: apiKey
+ name: X-API-Key
+ in: cookie
+ apiKey3:
+ type: apiKey
+ name: X-API-Key
+ in: query
diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass3.json b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass3.json
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass3.json
@@ -0,0 +1,41 @@
+{
+ "swagger": "2.0",
+ "info": {
+ "title": "Simple API overview",
+ "version": "1.0.0"
+ },
+ "schemes": [
+ "https"
+ ],
+ "paths": {
+ "/pets": {
+ "post": {
+ "description": "Creates a new pet in the store",
+ "responses": {
+ "200": {
+ "description": "200 response"
+ }
+ },
+ "operationId": "addPet",
+ "security": [
+ {
+ "apiKey1": [],
+ "apiKey3": []
+ }
+ ]
+ }
+ }
+ },
+ "securityDefinitions": {
+ "apiKey1": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "header"
+ },
+ "apiKey3": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "query"
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass3.yaml b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass3.yaml
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass3.yaml
@@ -0,0 +1,26 @@
+swagger: "2.0"
+info:
+ title: Simple API overview
+ version: 1.0.0
+schemes:
+ - https
+paths:
+ /pets:
+ post:
+ description: Creates a new pet in the store
+ responses:
+ "200":
+ description: 200 response
+ operationId: addPet
+ security:
+ - apiKey1: []
+ apiKey3: []
+securityDefinitions:
+ apiKey1:
+ type: apiKey
+ name: X-API-Key
+ in: header
+ apiKey3:
+ type: apiKey
+ name: X-API-Key
+ in: query
diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass4.json b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass4.json
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass4.json
@@ -0,0 +1,52 @@
+{
+ "openapi": "3.0.0",
+ "info": {
+ "title": "Simple API overview",
+ "version": "1.0.0"
+ },
+ "servers": [
+ {
+ "url": "https://localhost:8000",
+ "description": "Local server"
+ }
+ ],
+ "paths": {
+ "/pets": {
+ "post": {
+ "description": "Creates a new pet in the store",
+ "responses": {
+ "200": {
+ "description": "200 response"
+ }
+ },
+ "operationId": "addPet",
+ "security": [
+ {
+ "apiKey1": [],
+ "apiKey2": [],
+ "apiKey3": []
+ }
+ ]
+ }
+ }
+ },
+ "components": {
+ "securitySchemes": {
+ "apiKey1": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "header"
+ },
+ "apiKey2": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "cookie"
+ },
+ "apiKey3": {
+ "type": "apiKey",
+ "name": "X-API-Key",
+ "in": "query"
+ }
+ }
+ }
+}
diff --git a/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass4.yaml b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass4.yaml
new file mode 100644
--- /dev/null
+++ b/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/pass4.yaml
@@ -0,0 +1,33 @@
+openapi: 3.0.0
+info:
+ title: Simple API overview
+ version: 1.0.0
+servers:
+ - url: https://localhost:8000
+ description: Local server
+paths:
+ /pets:
+ post:
+ description: Creates a new pet in the store
+ responses:
+ '200':
+ description: 200 response
+ operationId: addPet
+ security:
+ - apiKey1: []
+ apiKey2: []
+ apiKey3: []
+components:
+ securitySchemes:
+ apiKey1:
+ type: apiKey
+ name: X-API-Key
+ in: header
+ apiKey2:
+ type: apiKey
+ name: X-API-Key
+ in: cookie
+ apiKey3:
+ type: apiKey
+ name: X-API-Key
+ in: query
diff --git a/tests/openapi/checks/resource/generic/test_ClearTextAPIKey.py b/tests/openapi/checks/resource/generic/test_ClearTextAPIKey.py
--- a/tests/openapi/checks/resource/generic/test_ClearTextAPIKey.py
+++ b/tests/openapi/checks/resource/generic/test_ClearTextAPIKey.py
@@ -23,12 +23,20 @@ def test_summary(self):
"/pass.json",
"/pass2.yaml",
"/pass2.json",
+ "/pass3.yaml",
+ "/pass3.json",
+ "/pass4.yaml",
+ "/pass4.json",
}
failing_resources = {
"/fail.yaml",
"/fail.json",
"/fail2.yaml",
"/fail2.json",
+ "/fail3.yaml",
+ "/fail3.json",
+ "/fail4.yaml",
+ "/fail4.json",
}
passed_check_resources = {c.file_path for c in report.passed_checks}
| CKV_OPENAPI_20 incorrectly flags API keys via HTTPS
**Describe the issue**
#5253 added CKV_OPENAPI_20 with the message "Ensure that API keys are not sent over cleartext", but the [check](https://github.com/bridgecrewio/checkov/blob/main/checkov/openapi/checks/resource/generic/ClearTextAPIKey.py) does not check the API's supported schemes.
If the intent of this check is to prevent cleartext use of API keys, then if the root level [`schemes`](https://swagger.io/docs/specification/2-0/api-host-and-base-path/) key in OpenAPI 2.0 or [`servers`](https://swagger.io/docs/specification/api-host-and-base-path/) key in OpenAPI 3.0 specifies only `https` (2.0) or only `url`s which are HTTPS (3.0), this check should pass.
**Examples**
[fail2.json](https://github.com/bridgecrewio/checkov/blob/main/tests/openapi/checks/resource/generic/example_ClearTextAPIKey/fail2.json) and its YAML counterpart should fail, but if they specified `"schemes": ["https"]`, they should not.
Ditto for the OpenAPI 3.0 version of this example.
**Version (please complete the following information):**
2.3.312
**Additional context**
It may be that the message is wrong and you actually intend to flag all use of API keys, but if that's the case, the message should convey that. I also would argue that header API keys should not be scrutinized the same way as query parameter API keys, since the risk of leaking the API key unintentionally is higher with the latter.
| hey @ziggythehamster thanks for reaching out.
Yeah, the part about sending the token via header shouldn't be flagged, will check it with the creator of the check @tsmithv11 | 2023-07-06T21:34:20 | -1.0 |
bridgecrewio/checkov | 5,336 | bridgecrewio__checkov-5336 | [
"5331"
] | b0ccfcae13c3237a88b887f64b3cd4732082837d | diff --git a/checkov/common/output/sarif.py b/checkov/common/output/sarif.py
--- a/checkov/common/output/sarif.py
+++ b/checkov/common/output/sarif.py
@@ -3,6 +3,7 @@
import itertools
import json
from typing import TYPE_CHECKING, Any
+from urllib.parse import quote
from checkov.common.models.enums import CheckResult
from checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES
@@ -221,7 +222,7 @@ def _create_results(self) -> list[dict[str, Any]]:
"locations": [
{
"physicalLocation": {
- "artifactLocation": {"uri": record.repo_file_path.lstrip("/")},
+ "artifactLocation": {"uri": quote(record.repo_file_path.lstrip("/"))},
"region": {
"startLine": int(record.file_line_range[0]) or 1,
"endLine": int(record.file_line_range[1]) or 1,
| diff --git a/tests/common/output/test_sarif_report.py b/tests/common/output/test_sarif_report.py
--- a/tests/common/output/test_sarif_report.py
+++ b/tests/common/output/test_sarif_report.py
@@ -33,7 +33,7 @@ def test_valid_passing_valid_testcases(self):
resource="aws_ebs_volume.web_host_storage",
evaluations=None,
check_class=None,
- file_abs_path="./ec2.tf",
+ file_abs_path="/path to/ec2.tf", # spaces should be handled correctly
entity_tags={"tag1": "value1"},
)
record2.set_guideline("https://docs.bridgecrew.io/docs/general_7")
@@ -125,7 +125,7 @@ def test_valid_passing_valid_testcases(self):
"locations": [
{
"physicalLocation": {
- "artifactLocation": {"uri": "ec2.tf"},
+ "artifactLocation": {"uri": "path%20to/ec2.tf"},
"region": {
"startLine": 5,
"endLine": 7,
| Sarif report creates invalid uri for folder with spaces
**Describe the issue**
SonarQube will not import SARIF report from Checkov correctly because of invalid URI in SARIF
1) Scan folders with spaces that has some issues
example:
Secrets/Access Tokens/Azure/main.tf
2) Output result as sarif
3) Resulting file is not valid SARIF due to invalid URI
The field Secrets/Access Tokens/Azure/main.tf corresponds to the results/locations/physicalLocation/artifactLocation/uri object in the SARIF report. There is character the space in the URI. This is not expected. The URI field shouldn’t have any spaces.
This is against specification of URI, which forbids spaces in URIs.
Because of this problem , import of issues in directories with spaces will fail in SonarQube and possibly other tools
| hey @Jiri-Stary thanks for reaching out.
Sure, we can remove or replace the space. I couldn't find any official recommendation for it, any thoughts? Either removing it or replace it with `%20`.
@gruebel
please replace it with %20 so that way it could be mapped back to the original source code.
Removing it would break it when imported by other tools
I believe there is an RFC defining what uri should look like - rfc3986
I believe %20 is correct
https://datatracker.ietf.org/doc/html/rfc3986
_For example, consider an information service that provides data,
stored locally using an EBCDIC-based file system, to clients on the
Internet through an HTTP server. When an author creates a file with
the name "Laguna Beach" on that file system, the "http" URI
corresponding to that resource is expected to contain the meaningful
string "Laguna%20Beach"._
i think the proper approach is to use a function that creates a valid URI
Something like this:
There is a function in urllib called urllib.parse.quote which removes special characters from urls and replaces them with their equivalent percent encoding.
there are some online sarif validators that checks this as well - https://sarifweb.azurewebsites.net/Validation
You can check if the resulting file valid sarif - it reports this issue as well
sounds good, I also planed to use `urllib.parse.quote()` to do the proper encoding
> there are some online sarif validators that checks this as well - https://sarifweb.azurewebsites.net/Validation You can check if the resulting file valid sarif - it reports this issue as well
yeah, I tested it already and it doesn't complain about the percent encoding. | 2023-07-14T13:29:09 | -1.0 |
bridgecrewio/checkov | 5,370 | bridgecrewio__checkov-5370 | [
"5365"
] | fe5d9762a1e2e67b44ccbfd3ffade0012bc0162c | diff --git a/checkov/yaml_doc/base_registry.py b/checkov/yaml_doc/base_registry.py
--- a/checkov/yaml_doc/base_registry.py
+++ b/checkov/yaml_doc/base_registry.py
@@ -53,10 +53,7 @@ def _scan_yaml_array(
if isinstance(item, str):
item = self.set_lines_for_item(item)
if STARTLINE_MARK != item and ENDLINE_MARK != item:
- skip_info: "_SkippedCheck" = {}
- if skip_infos and skip_infos[0]:
- # multiple items could be found, so we need to skip the correct one(s)
- skip_info = ([skip for skip in skip_infos if item[STARTLINE_MARK] <= skip["line_number"] <= item[ENDLINE_MARK]] or [{}])[0]
+ skip_info = self._collect_inline_suppression_in_array(item=item, skip_infos=skip_infos)
self.update_result(
check,
@@ -347,3 +344,27 @@ def set_lines_for_item(self, item: str) -> dict[int | str, str | int] | str:
break
return item_dict
+
+ def _collect_inline_suppression_in_array(self, item: Any, skip_infos: list[_SkippedCheck]) -> _SkippedCheck:
+ if skip_infos and skip_infos[0]:
+ if isinstance(item, dict):
+ # multiple items could be found, so we need to skip the correct one(s)
+ skip_info = [
+ skip for skip in skip_infos if item[STARTLINE_MARK] <= skip["line_number"] <= item[ENDLINE_MARK]
+ ]
+ if skip_info:
+ return skip_info[0]
+ elif isinstance(item, list):
+ # depending on the check a list of uncomplaint items can be found and need to be correctly matched
+ for sub_item in item:
+ if isinstance(sub_item, dict):
+ # only one of the list items need to be matched
+ skip_info = [
+ skip
+ for skip in skip_infos
+ if sub_item[STARTLINE_MARK] <= skip["line_number"] <= sub_item[ENDLINE_MARK]
+ ]
+ if skip_info:
+ return skip_info[0]
+
+ return {} # nothing found
| diff --git a/tests/gitlab_ci/resources/rules/.gitlab-ci.yml b/tests/gitlab_ci/resources/rules/.gitlab-ci.yml
--- a/tests/gitlab_ci/resources/rules/.gitlab-ci.yml
+++ b/tests/gitlab_ci/resources/rules/.gitlab-ci.yml
@@ -14,3 +14,12 @@ planOnlySubset:
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_PIPELINE_SOURCE != "schedule"
when: manual
allow_failure: true
+
+jobSkip:
+ script: echo "This job creates double pipelines!"
+ rules:
+ - changes:
+ - $DOCKERFILES_DIR/*
+ # checkov:skip=CKV_GITLABCI_2: Ignore
+ - if: $CI_PIPELINE_SOURCE == "push"
+ - if: $CI_PIPELINE_SOURCE == "merge_request_event"
diff --git a/tests/gitlab_ci/test_runner.py b/tests/gitlab_ci/test_runner.py
--- a/tests/gitlab_ci/test_runner.py
+++ b/tests/gitlab_ci/test_runner.py
@@ -22,7 +22,7 @@ def test_runner(self):
self.assertEqual(len(report.failed_checks), 5)
self.assertEqual(report.parsing_errors, [])
self.assertEqual(len(report.passed_checks), 9)
- self.assertEqual(report.skipped_checks, [])
+ self.assertEqual(len(report.skipped_checks), 1)
report.print_console()
def test_runner_honors_enforcement_rules(self):
| YAML: Skip comment crashes checkov
**Describe the issue**
Checkov crashes while examining a skip rule when linting a Gitlab-CI file.
```sh
File "/usr/local/lib/python3.11/site-packages/checkov/yaml_doc/base_registry.py", line 59, in <listcomp>
skip_info = ([skip for skip in skip_infos if item[STARTLINE_MARK] <= skip["line_number"] <= item[ENDLINE_MARK]] or [{}])[0]
~~~~^^^^^^^^^^^^^^^^
TypeError: list indices must be integers or slices, not str
```
**Examples**
Create the following file in an empty directory:
_.gitlab-ci.yml_
```yaml
foo:
# checkov:skip=CKV_GITLABCI_2:False positive. Does not create double pipelines.
rules:
- if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
when: never
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_PIPELINE_SOURCE != "schedule"
when: manual
allow_failure: true
```
Affected code line:
https://github.com/bridgecrewio/checkov/blob/2162cb3696b7b35e024001a9a55e6e38a79f841f/checkov/yaml_doc/base_registry.py#L59C34-L59C34
**Exception Trace**
Please share the trace for the exception and all relevant output by checkov.
To maximize the understanding, please run checkov with LOG_LEVEL set to debug
as follows:
```sh
root@94028c03dcac:/code/aaa# LOG_LEVEL=DEBUG checkov -d .
2023-07-20 09:36:42,886 [MainThread ] [DEBUG] Leveraging the bundled IAM Definition.
2023-07-20 09:36:42,887 [MainThread ] [DEBUG] Leveraging the IAM definition at /usr/local/lib/python3.11/site-packages/policy_sentry/shared/data/iam-definition.json
2023-07-20 09:36:43,608 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250> with order 0
2023-07-20 09:36:43,608 [MainThread ] [DEBUG] self.features after the sort:
2023-07-20 09:36:43,608 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250>]
2023-07-20 09:36:43,609 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f15008f7210> with order 2
2023-07-20 09:36:43,609 [MainThread ] [DEBUG] self.features after the sort:
2023-07-20 09:36:43,609 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f15008f7210>]
2023-07-20 09:36:43,612 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f14ff583e50> with order 11
2023-07-20 09:36:43,612 [MainThread ] [DEBUG] self.features after the sort:
2023-07-20 09:36:43,613 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f15008f7210>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f14ff583e50>]
2023-07-20 09:36:43,613 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f14ff5b0d90> with order 0
2023-07-20 09:36:43,613 [MainThread ] [DEBUG] self.features after the sort:
2023-07-20 09:36:43,613 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f14ff5b0d90>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f15008f7210>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f14ff583e50>]
2023-07-20 09:36:43,614 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f14ff5b1850> with order 10
2023-07-20 09:36:43,614 [MainThread ] [DEBUG] self.features after the sort:
2023-07-20 09:36:43,614 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f14ff5b0d90>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f15008f7210>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f14ff5b1850>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f14ff583e50>]
2023-07-20 09:36:43,615 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f14ff5b2ad0> with order 6
2023-07-20 09:36:43,615 [MainThread ] [DEBUG] self.features after the sort:
2023-07-20 09:36:43,615 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f14ff5b0d90>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f15008f7210>, <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f14ff5b2ad0>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f14ff5b1850>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f14ff583e50>]
2023-07-20 09:36:43,616 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f14ff5b3250> with order 1
2023-07-20 09:36:43,616 [MainThread ] [DEBUG] self.features after the sort:
2023-07-20 09:36:43,616 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f15008f7250>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f14ff5b0d90>, <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f14ff5b3250>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f15008f7210>, <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f14ff5b2ad0>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f14ff5b1850>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f14ff583e50>]
2023-07-20 09:36:43,712 [MainThread ] [DEBUG] Loading external checks from /usr/local/lib/python3.11/site-packages/checkov/bicep/checks/graph_checks
2023-07-20 09:36:43,712 [MainThread ] [DEBUG] Searching through ['__pycache__'] and ['SQLServerAuditingEnabled.json', '__init__.py']
2023-07-20 09:36:43,714 [MainThread ] [DEBUG] Searching through [] and ['__init__.cpython-311.pyc']
2023-07-20 09:36:43,880 [MainThread ] [DEBUG] Popen(['git', 'version'], cwd=/code/aaa, universal_newlines=False, shell=None, istream=None)
2023-07-20 09:36:43,881 [MainThread ] [DEBUG] Popen(['git', 'version'], cwd=/code/aaa, universal_newlines=False, shell=None, istream=None)
2023-07-20 09:36:44,123 [MainThread ] [DEBUG] No API key present; setting include_all_checkov_policies to True
2023-07-20 09:36:44,125 [MainThread ] [DEBUG] Run metadata: {
"checkov_version": "2.3.335",
"python_executable": "/usr/local/bin/python3",
"python_version": "3.11.4 (main, Jul 4 2023, 05:51:40) [GCC 10.2.1 20210110]",
"checkov_executable": "/usr/local/bin/checkov",
"args": [
"Command Line Args: -d .",
"Defaults:",
" --framework: ['all']",
" --branch: master",
" --download-external-modules:False",
" --external-modules-download-path:.external_modules",
" --evaluate-variables:True",
" --secrets-scan-file-type:[]",
" --block-list-secret-scan:[]",
" --summary-position:top",
" --mask: []",
" --secrets-history-timeout:12h",
""
],
"OS_system_info": "Linux-5.15.112-microsoft-standard-WSL2-x86_64-with-glibc2.31",
"CPU_architecture": "",
"Python_implementation": "CPython"
}
2023-07-20 09:36:44,125 [MainThread ] [DEBUG] Using cert_reqs None
2023-07-20 09:36:44,125 [MainThread ] [DEBUG] Successfully set up HTTP manager
2023-07-20 09:36:44,125 [MainThread ] [DEBUG] Resultant set of frameworks (removing skipped frameworks): all
2023-07-20 09:36:44,125 [MainThread ] [DEBUG] BC_SOURCE = cli, version = 2.3.335
2023-07-20 09:36:44,125 [MainThread ] [DEBUG] terraform_runner declares no system dependency checks required.
2023-07-20 09:36:44,126 [MainThread ] [DEBUG] cloudformation_runner declares no system dependency checks required.
2023-07-20 09:36:44,126 [MainThread ] [DEBUG] kubernetes_runner declares no system dependency checks required.
2023-07-20 09:36:44,126 [MainThread ] [DEBUG] serverless_runner declares no system dependency checks required.
2023-07-20 09:36:44,126 [MainThread ] [DEBUG] arm_runner declares no system dependency checks required.
2023-07-20 09:36:44,126 [MainThread ] [DEBUG] terraform_plan_runner declares no system dependency checks required.
2023-07-20 09:36:44,126 [MainThread ] [INFO ] Checking necessary system dependancies for helm checks.
2023-07-20 09:36:44,127 [MainThread ] [INFO ] Error running necessary tools to process helm checks.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] dockerfile_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] secrets_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] json_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] yaml_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] github_configuration_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] gitlab_configuration_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] gitlab_ci_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] bitbucket_configuration_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [DEBUG] bitbucket_pipelines_runner declares no system dependency checks required.
2023-07-20 09:36:44,127 [MainThread ] [INFO ] Checking necessary system dependancies for kustomize checks.
2023-07-20 09:36:44,128 [MainThread ] [INFO ] Could not find usable tools locally to process kustomize checks. Framework will be disabled for this run.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] github_actions_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] bicep_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] openapi_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] sca_image_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] argo_workflows_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] circleci_pipelines_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] azure_pipelines_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] ansible_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] terraform_json_runner declares no system dependency checks required.
2023-07-20 09:36:44,128 [MainThread ] [DEBUG] sca_package_runner declares no system dependency checks required....
2023-07-20 09:36:45,548 [MainThread ] [DEBUG] skip_severity = None, explicit_skip = [], regex_match = False, suppressed_policies: []
2023-07-20 09:36:45,548 [MainThread ] [DEBUG] should_run_check CKV2_GHA_1: True
2023-07-20 09:36:45,548 [MainThread ] [DEBUG] bc_check_id = BC_REPO_GITLAB_CI_3, include_all_checkov_policies = True, is_external = False, explicit_run: []
2023-07-20 09:36:45,548 [MainThread ] [DEBUG] should_run_check CKV_GITLABCI_3: True
2023-07-20 09:36:45,543 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_31
2023-07-20 09:36:45,548 [MainThread ] [DEBUG] skip_severity = None, explicit_skip = [], regex_match = False, suppressed_policies: []
2023-07-20 09:36:45,548 [MainThread ] [DEBUG] bc_check_id = BC_REPO_GITLAB_CI_2, include_all_checkov_policies = True, is_external = False, explicit_run: []
2023-07-20 09:36:45,549 [MainThread ] [DEBUG] should_run_check CKV_GITLABCI_2: True
2023-07-20 09:36:45,546 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_2
2023-07-20 09:36:45,547 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_4
2023-07-20 09:36:45,547 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_13
2023-07-20 09:36:45,547 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_1
2023-07-20 09:36:45,551 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_GHA_1
2023-07-20 09:36:45,554 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_21
2023-07-20 09:36:45,549 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_28
2023-07-20 09:36:45,549 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_14
2023-07-20 09:36:45,550 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_37
2023-07-20 09:36:45,551 [MainThread ] [ERROR] Failed to invoke function /usr/local/lib/python3.11/site-packages/checkov/common/runners/runner_registry._parallel_run with <checkov.gitlab_ci.runner.Runner object at 0x7f14fdfb6d90>
Traceback (most recent call last):
File "/usr/local/lib/python3.11/site-packages/checkov/common/parallelizer/parallel_runner.py", line 39, in func_wrapper
result = original_func(item)
^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/checkov/common/runners/runner_registry.py", line 127, in _parallel_run
report = runner.run(
^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/checkov/gitlab_ci/runner.py", line 68, in run
report = super().run(root_folder=root_folder, external_checks_dir=external_checks_dir,
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/checkov/common/runners/object_runner.py", line 154, in run
self.add_python_check_results(report=report, registry=registry, runner_filter=runner_filter, root_folder=root_folder)
File "/usr/local/lib/python3.11/site-packages/checkov/common/runners/object_runner.py", line 173, in add_python_check_results
results = registry.scan(file_path, self.definitions[file_path], skipped_checks, runner_filter) # type:ignore[arg-type] # this is overridden in the subclass
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/checkov/yaml_doc/base_registry.py", line 226, in scan
self._scan_yaml(
File "/usr/local/lib/python3.11/site-packages/checkov/yaml_doc/base_registry.py", line 190, in _scan_yaml
scanner(
File "/usr/local/lib/python3.11/site-packages/checkov/yaml_doc/base_registry.py", line 59, in _scan_yaml_array
skip_info = ([skip for skip in skip_infos if item[STARTLINE_MARK] <= skip["line_number"] <= item[ENDLINE_MARK]] or [{}])[0]
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.11/site-packages/checkov/yaml_doc/base_registry.py", line 59, in <listcomp>
skip_info = ([skip for skip in skip_infos if item[STARTLINE_MARK] <= skip["line_number"] <= item[ENDLINE_MARK]] or [{}])[0]
~~~~^^^^^^^^^^^^^^^^
TypeError: list indices must be integers or slices, not str
2023-07-20 09:36:45,550 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_15
2023-07-20 09:36:45,551 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_20
2023-07-20 09:36:45,547 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_26
2023-07-20 09:36:45,554 [ThreadPoolEx] [DEBUG] Running graph check: CKV_AZURE_24
2023-07-20 09:36:45,554 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_10
2023-07-20 09:36:45,554 [ThreadPoolEx] [DEBUG] Running graph check: CKV_AZURE_120
2023-07-20 09:36:45,567 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_6
2023-07-20 09:36:45,568 [ThreadPoolEx] [DEBUG] Running graph check: CKV_AZURE_119
2023-07-20 09:36:45,570 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_16
2023-07-20 09:36:45,570 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_9
2023-07-20 09:36:45,571 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_19
2023-07-20 09:36:45,571 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_23
2023-07-20 09:36:45,572 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_36
2023-07-20 09:36:45,572 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_32
2023-07-20 09:36:45,573 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_24
2023-07-20 09:36:45,573 [ThreadPoolEx] [DEBUG] Running graph check: CKV_AZURE_23
2023-07-20 09:36:45,574 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_17
2023-07-20 09:36:45,578 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_33
2023-07-20 09:36:45,579 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_22
2023-07-20 09:36:45,579 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_30
2023-07-20 09:36:45,579 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_8
2023-07-20 09:36:45,585 [MainThread ] [INFO ] Creating Ansible graph
2023-07-20 09:36:45,579 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_38
2023-07-20 09:36:45,580 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_12
2023-07-20 09:36:45,586 [MainThread ] [DEBUG] [AnsibleLocalGraph] created 0 vertices
2023-07-20 09:36:45,586 [MainThread ] [DEBUG] [AnsibleLocalGraph] created 0 edges
2023-07-20 09:36:45,580 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_11
2023-07-20 09:36:45,587 [MainThread ] [INFO ] Successfully created Ansible graph
2023-07-20 09:36:45,581 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AZURE_29
2023-07-20 09:36:45,581 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AWS_8
2023-07-20 09:36:45,588 [MainThread ] [DEBUG] Loading external checks from /usr/local/lib/python3.11/site-packages/checkov/ansible/checks/graph_checks
2023-07-20 09:36:45,589 [MainThread ] [DEBUG] Searching through ['__pycache__'] and ['PanosPolicyDescription.json', 'DnfValidateCerts.json', 'PanosInterfaceMgmtProfileNoTelnet.json', 'PanosZoneUserIDIncludeACL.json', 'DnfDisableGpgCheck.json', 'PanosPolicyNoApplicationAny.json', 'PanosZoneProtectionProfile.json', 'DnfSslVerify.json', 'GetUrlHttpsOnly.json', 'PanosPolicyLogForwarding.json', 'PanosInterfaceMgmtProfileNoHTTP.json', 'PanosPolicyNoSrcAnyDstAny.json', 'PanosPolicyLoggingEnabled.json', 'PanosPolicyNoDSRI.json', 'BlockErrorHandling.json', 'UriHttpsOnly.json', '__init__.py', 'PanosPolicyNoServiceAny.json']
2023-07-20 09:36:45,582 [ThreadPoolEx] [DEBUG] Running graph check: CKV2_AWS_39
...
2023-07-20 09:36:45,771 [MainThread ] [DEBUG] In get_exit_code; exit code thresholds: {'soft_fail': False, 'soft_fail_checks': [], 'soft_fail_threshold': None, 'hard_fail_checks': [], 'hard_fail_threshold': None}, hard_fail_on_parsing_errors: False
2023-07-20 09:36:45,771 [MainThread ] [DEBUG] No failed checks in this report - returning 0
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By bridgecrew.io | version: 2.3.335
2023-07-20 09:36:45,772 [MainThread ] [DEBUG] Should run contributor metrics report: None
```
**Desktop (please complete the following information):**
- OS: Debian 11
- Checkov Version 2.3.335
| 2023-07-23T10:14:48 | -1.0 |
|
bridgecrewio/checkov | 5,388 | bridgecrewio__checkov-5388 | [
"5712"
] | cac66158c66bada08d797aaf765a96a24ec9c005 | diff --git a/checkov/common/output/gitlab_sast.py b/checkov/common/output/gitlab_sast.py
--- a/checkov/common/output/gitlab_sast.py
+++ b/checkov/common/output/gitlab_sast.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from datetime import datetime
+from datetime import datetime, timezone
from typing import TYPE_CHECKING, Any
from uuid import uuid4
@@ -37,7 +37,7 @@ def create_sast_json(self) -> dict[str, Any]:
}
def _create_scan(self) -> dict[str, Any]:
- current_datetime = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S")
+ current_datetime = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S")
scanner = {
"id": "checkov",
"name": "Checkov",
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -161,6 +161,7 @@ def run(self) -> None:
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
"Topic :: Security",
"Topic :: Software Development :: Build Tools",
"Typing :: Typed",
| diff --git a/tests/common/checks/test_base_check.py b/tests/common/checks/test_base_check.py
--- a/tests/common/checks/test_base_check.py
+++ b/tests/common/checks/test_base_check.py
@@ -1,7 +1,7 @@
import os
import unittest
-import mock
+from unittest import mock
from parameterized import parameterized
from checkov.common.checks.base_check import BaseCheck
@@ -96,7 +96,7 @@ def __init__(self):
TestCheckUnknownSignature()
self.assertIsInstance(context.exception, TypeError)
- self.assertRegex(context.exception.args[0], r"Can't instantiate abstract class TestCheckUnknownSignature with abstract method(s)? scan_entity_conf")
+ self.assertRegex(context.exception.args[0], r"Can't instantiate abstract class TestCheckUnknownSignature")
def test_details_reinitializing_after_execution(self):
check = TestCheckDetails()
diff --git a/tests/common/output/test_spdx.py b/tests/common/output/test_spdx.py
--- a/tests/common/output/test_spdx.py
+++ b/tests/common/output/test_spdx.py
@@ -1,4 +1,6 @@
-from freezegun import freeze_time
+from datetime import datetime, timezone
+
+from time_machine import travel
from checkov.common.bridgecrew.check_type import CheckType
from checkov.common.output.extra_resource import ExtraResource
@@ -7,7 +9,7 @@
from checkov.common.sca.output import create_report_cve_record
-@freeze_time("2022-12-24")
+@travel(datetime(2022, 12, 24, tzinfo=timezone.utc))
def test_sca_package_output():
# given
rootless_file_path = "requirements.txt"
diff --git a/tests/common/test_resource_code_logger_filter.py b/tests/common/test_resource_code_logger_filter.py
--- a/tests/common/test_resource_code_logger_filter.py
+++ b/tests/common/test_resource_code_logger_filter.py
@@ -1,5 +1,5 @@
import logging
-import mock
+from unittest import mock
from checkov.common.resource_code_logger_filter import add_resource_code_filter_to_logger
diff --git a/tests/github/checks/test_python_policies.py b/tests/github/checks/test_python_policies.py
--- a/tests/github/checks/test_python_policies.py
+++ b/tests/github/checks/test_python_policies.py
@@ -3,7 +3,7 @@
from pathlib import Path
import pytest
-from freezegun import freeze_time
+from time_machine import travel
from checkov.github.runner import Runner
from checkov.runner_filter import RunnerFilter
@@ -45,7 +45,7 @@ def test_GithubRequire2Approvals():
run_check(base_path=BASE_DIR / "branch_security", check="GithubRequire2Approvals")
-@freeze_time("2022-12-05")
+@travel("2022-12-05")
def test_GithubDisallowInactiveBranch60Days():
run_check(base_path=BASE_DIR / "branch_security", check="GithubDisallowInactiveBranch60Days")
diff --git a/tests/kustomize/test_runner.py b/tests/kustomize/test_runner.py
--- a/tests/kustomize/test_runner.py
+++ b/tests/kustomize/test_runner.py
@@ -1,7 +1,7 @@
import os
-import mock
import unittest
from pathlib import Path
+from unittest import mock
from checkov.common.bridgecrew.check_type import CheckType
from checkov.common.bridgecrew.severities import Severities, BcSeverities
diff --git a/tests/kustomize/test_runner_image_referencer.py b/tests/kustomize/test_runner_image_referencer.py
--- a/tests/kustomize/test_runner_image_referencer.py
+++ b/tests/kustomize/test_runner_image_referencer.py
@@ -1,10 +1,9 @@
from __future__ import annotations
import os
-import sys
from pathlib import Path
+from unittest import mock
-import mock
import pytest
from pytest_mock import MockerFixture
diff --git a/tests/sca_package/conftest.py b/tests/sca_package/conftest.py
--- a/tests/sca_package/conftest.py
+++ b/tests/sca_package/conftest.py
@@ -1,8 +1,8 @@
import os
from pathlib import Path
from unittest import mock
+from unittest.mock import MagicMock
-from mock.mock import MagicMock
from typing import Dict, Any, List
from pytest_mock import MockerFixture
diff --git a/tests/sca_package/test_runner.py b/tests/sca_package/test_runner.py
--- a/tests/sca_package/test_runner.py
+++ b/tests/sca_package/test_runner.py
@@ -1,6 +1,6 @@
from pathlib import Path
+from unittest.mock import MagicMock
-from mock.mock import MagicMock
from packaging import version as packaging_version
from pytest_mock import MockerFixture
@@ -38,7 +38,7 @@ def test_run(sca_package_report):
assert cve_record is not None
assert cve_record.bc_check_id == "BC_CVE_2020_29652"
assert cve_record.check_id == "CKV_CVE_2020_29652"
- assert cve_record.check_class == "mock.mock.MagicMock" # not the real one
+ assert cve_record.check_class == "unittest.mock.MagicMock" # not the real one
assert cve_record.check_name == "SCA package scan"
assert cve_record.check_result == {"result": CheckResult.FAILED}
assert cve_record.code_block == [(0, "golang.org/x/crypto: v0.0.1")]
@@ -190,7 +190,7 @@ def test_prepare_and_scan(mocker: MockerFixture, scan_result):
# then
assert real_result is not None
- assert runner._check_class == 'mock.mock.MagicMock'
+ assert runner._check_class == 'unittest.mock.MagicMock'
assert runner._code_repo_path == EXAMPLES_DIR
diff --git a/tests/sca_package_2/conftest.py b/tests/sca_package_2/conftest.py
--- a/tests/sca_package_2/conftest.py
+++ b/tests/sca_package_2/conftest.py
@@ -1,12 +1,11 @@
import os
from pathlib import Path
from unittest import mock
-
-from mock.mock import MagicMock
+from unittest.mock import MagicMock
from typing import Dict, Any, List
-from pytest_mock import MockerFixture
import pytest
+from pytest_mock import MockerFixture
from checkov.common.bridgecrew.bc_source import SourceType
from checkov.common.bridgecrew.platform_integration import BcPlatformIntegration, bc_integration
diff --git a/tests/sca_package_2/test_runner.py b/tests/sca_package_2/test_runner.py
--- a/tests/sca_package_2/test_runner.py
+++ b/tests/sca_package_2/test_runner.py
@@ -1,6 +1,6 @@
import os
from pathlib import Path
-from mock.mock import MagicMock
+from unittest.mock import MagicMock
from pytest_mock import MockerFixture
from packaging import version as packaging_version
@@ -144,7 +144,7 @@ def test_run(sca_package_2_report):
assert cve_record is not None
assert cve_record.bc_check_id == "BC_CVE_2020_29652"
assert cve_record.check_id == "CKV_CVE_2020_29652"
- assert cve_record.check_class == "mock.mock.MagicMock" # not the real one
+ assert cve_record.check_class == "unittest.mock.MagicMock" # not the real one
assert cve_record.check_name == "SCA package scan"
assert cve_record.check_result == {"result": CheckResult.FAILED}
assert cve_record.code_block == [(0, "golang.org/x/crypto: v0.0.1")]
diff --git a/tests/secrets/test_secrets_verification.py b/tests/secrets/test_secrets_verification.py
--- a/tests/secrets/test_secrets_verification.py
+++ b/tests/secrets/test_secrets_verification.py
@@ -1,7 +1,8 @@
from __future__ import annotations
+
import os
+from unittest import mock
-import mock
import pytest
import responses
diff --git a/tests/secrets/test_secrets_verification_suppressions.py b/tests/secrets/test_secrets_verification_suppressions.py
--- a/tests/secrets/test_secrets_verification_suppressions.py
+++ b/tests/secrets/test_secrets_verification_suppressions.py
@@ -1,6 +1,6 @@
import os
+from unittest import mock
-import mock
import responses
from checkov.common.models.enums import CheckResult
diff --git a/tests/terraform/checks/resource/gcp/test_GoogleComputeFirewallUnrestrictedIngress22.py b/tests/terraform/checks/resource/gcp/test_GoogleComputeFirewallUnrestrictedIngress22.py
--- a/tests/terraform/checks/resource/gcp/test_GoogleComputeFirewallUnrestrictedIngress22.py
+++ b/tests/terraform/checks/resource/gcp/test_GoogleComputeFirewallUnrestrictedIngress22.py
@@ -1,9 +1,6 @@
-import os
import unittest
from pathlib import Path
-import mock
-
from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.gcp.GoogleComputeFirewallUnrestrictedIngress22 import check
from checkov.terraform.runner import Runner
diff --git a/tests/terraform/graph/checks_infra/test_base_attribute_solver.py b/tests/terraform/graph/checks_infra/test_base_attribute_solver.py
--- a/tests/terraform/graph/checks_infra/test_base_attribute_solver.py
+++ b/tests/terraform/graph/checks_infra/test_base_attribute_solver.py
@@ -1,4 +1,5 @@
-from mock.mock import MagicMock
+from unittest.mock import MagicMock
+
from pytest_mock import MockerFixture
from checkov.common.checks_infra.solvers.attribute_solvers.base_attribute_solver import BaseAttributeSolver
@@ -29,8 +30,8 @@ def test_get_cached_jsonpath_statement(mocker: MockerFixture):
assert len(BaseAttributeSolver.jsonpath_parsed_statement_cache) == 1
# patch jsonpath_ng.parse to be able to check it was really not called again and the cache was properly used
- with mocker.patch("checkov.common.checks_infra.solvers.attribute_solvers.base_attribute_solver.parse", side_effect=jsonpath_parse_mock):
- solver_2._get_cached_jsonpath_statement(statement=statement)
+ mocker.patch("checkov.common.checks_infra.solvers.attribute_solvers.base_attribute_solver.parse", side_effect=jsonpath_parse_mock)
+ solver_2._get_cached_jsonpath_statement(statement=statement)
# then
assert len(BaseAttributeSolver.jsonpath_parsed_statement_cache) == 1
diff --git a/tests/terraform/graph/graph_builder/test_local_graph.py b/tests/terraform/graph/graph_builder/test_local_graph.py
--- a/tests/terraform/graph/graph_builder/test_local_graph.py
+++ b/tests/terraform/graph/graph_builder/test_local_graph.py
@@ -1,9 +1,7 @@
import os
from pathlib import Path
from unittest import TestCase
-
-import mock
-import json
+from unittest import mock
from checkov.common.graph.db_connectors.networkx.networkx_db_connector import NetworkxConnector
from checkov.common.graph.graph_builder import EncryptionValues, EncryptionTypes
diff --git a/tests/terraform/runner/test_plan_runner.py b/tests/terraform/runner/test_plan_runner.py
--- a/tests/terraform/runner/test_plan_runner.py
+++ b/tests/terraform/runner/test_plan_runner.py
@@ -4,11 +4,9 @@
from collections import defaultdict
from copy import deepcopy
from pathlib import Path
-
-
from typing import Dict, Any
+from unittest import mock
-import mock
from parameterized import parameterized_class
# do not remove - prevents circular import
| ERROR: Failed building wheel for aiohttp
**Describe the issue**
Explain what you expected to happen when checkov crashed.
I simply tried to commit a change (`git commit`) which triggered pre-commit to install the environment for https://github.com/bridgecrewio/checkov.
The error I'm receiving is `ERROR: Failed building wheel for aiohttp`.
**Examples**
Please share an example code sample (in the IaC of your choice) + the expected outcomes.
1. My .pre-commit-config.yaml contains the following repo for checkov:
```
- repo: https://github.com/bridgecrewio/checkov
rev: 3.0.16
hooks:
- id: checkov
args: [--config-file, .checkov/k8s.yaml]
```
2. .checkov/k8s.yaml looks like this:
```
quiet: true
compact: true
skip-download: true
download-external-modules: false
framework:
- helm
- kubernetes
directory:
- ./xyz
```
**Exception Trace**
Please share the trace for the exception and all relevant output by checkov.
To maximize the understanding, please run checkov with LOG_LEVEL set to debug
as follows:
```sh
git commit
[INFO] Installing environment for https://github.com/bridgecrewio/checkov.
[INFO] Once installed this environment will be reused.
[INFO] This may take a few minutes...
An unexpected error has occurred: CalledProcessError: command: ('/Users/michaelvoet/.cache/pre-commit/repor0bcn38a/py_env-python3.12/bin/python', '-mpip', 'install', '.')
return code: 1
stdout:
Processing /Users/michaelvoet/.cache/pre-commit/repor0bcn38a
Installing build dependencies: started
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'done'
Installing backend dependencies: started
Installing backend dependencies: finished with status 'done'
Preparing metadata (pyproject.toml): started
Preparing metadata (pyproject.toml): finished with status 'done'
Collecting bc-python-hcl2==0.3.51 (from checkov==3.0.16)
Using cached bc_python_hcl2-0.3.51-py3-none-any.whl (14 kB)
Collecting bc-detect-secrets==1.4.30 (from checkov==3.0.16)
Using cached bc_detect_secrets-1.4.30-py3-none-any.whl.metadata (23 kB)
Collecting bc-jsonpath-ng==1.5.9 (from checkov==3.0.16)
Using cached bc_jsonpath_ng-1.5.9-py3-none-any.whl (29 kB)
Collecting tabulate (from checkov==3.0.16)
Using cached tabulate-0.9.0-py3-none-any.whl (35 kB)
Collecting colorama (from checkov==3.0.16)
Using cached colorama-0.4.6-py2.py3-none-any.whl (25 kB)
Collecting termcolor (from checkov==3.0.16)
Using cached termcolor-2.3.0-py3-none-any.whl (6.9 kB)
Collecting junit-xml>=1.9 (from checkov==3.0.16)
Using cached junit_xml-1.9-py2.py3-none-any.whl (7.1 kB)
Collecting dpath==2.1.3 (from checkov==3.0.16)
Using cached dpath-2.1.3-py3-none-any.whl (17 kB)
Collecting pyyaml>=5.4.1 (from checkov==3.0.16)
Using cached PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl.metadata (2.1 kB)
Collecting boto3>=1.17 (from checkov==3.0.16)
Using cached boto3-1.28.76-py3-none-any.whl.metadata (6.7 kB)
Collecting gitpython (from checkov==3.0.16)
Using cached GitPython-3.1.40-py3-none-any.whl.metadata (12 kB)
Collecting jmespath (from checkov==3.0.16)
Using cached jmespath-1.0.1-py3-none-any.whl (20 kB)
Collecting tqdm (from checkov==3.0.16)
Using cached tqdm-4.66.1-py3-none-any.whl.metadata (57 kB)
Collecting update-checker (from checkov==3.0.16)
Using cached update_checker-0.18.0-py3-none-any.whl (7.0 kB)
Collecting semantic-version (from checkov==3.0.16)
Using cached semantic_version-2.10.0-py2.py3-none-any.whl (15 kB)
Collecting packaging (from checkov==3.0.16)
Using cached packaging-23.2-py3-none-any.whl.metadata (3.2 kB)
Collecting cloudsplaining>=0.6.2 (from checkov==3.0.16)
Using cached cloudsplaining-0.6.2-py3-none-any.whl.metadata (21 kB)
Collecting networkx<2.7 (from checkov==3.0.16)
Using cached networkx-2.6.3-py3-none-any.whl (1.9 MB)
Collecting igraph<0.11.0 (from checkov==3.0.16)
Using cached igraph-0.10.8-cp39-abi3-macosx_11_0_arm64.whl.metadata (3.8 kB)
Collecting dockerfile-parse (from checkov==3.0.16)
Using cached dockerfile_parse-2.0.1-py2.py3-none-any.whl.metadata (3.3 kB)
Collecting docker (from checkov==3.0.16)
Using cached docker-6.1.3-py3-none-any.whl.metadata (3.5 kB)
Collecting configargparse (from checkov==3.0.16)
Using cached ConfigArgParse-1.7-py3-none-any.whl.metadata (23 kB)
Collecting argcomplete (from checkov==3.0.16)
Using cached argcomplete-3.1.3-py3-none-any.whl.metadata (16 kB)
Collecting typing-extensions>=4.1.0 (from checkov==3.0.16)
Using cached typing_extensions-4.8.0-py3-none-any.whl.metadata (3.0 kB)
Collecting importlib-metadata>=0.12 (from checkov==3.0.16)
Using cached importlib_metadata-6.8.0-py3-none-any.whl.metadata (5.1 kB)
Collecting cachetools (from checkov==3.0.16)
Using cached cachetools-5.3.2-py3-none-any.whl.metadata (5.2 kB)
Collecting cyclonedx-python-lib>=5.0.0 (from checkov==3.0.16)
Using cached cyclonedx_python_lib-5.1.0-py3-none-any.whl.metadata (6.2 kB)
Collecting packageurl-python (from checkov==3.0.16)
Using cached packageurl_python-0.11.2-py3-none-any.whl.metadata (5.0 kB)
Collecting click>=8.0.0 (from checkov==3.0.16)
Using cached click-8.1.7-py3-none-any.whl.metadata (3.0 kB)
Collecting aiohttp (from checkov==3.0.16)
Using cached aiohttp-3.8.6.tar.gz (7.4 MB)
Installing build dependencies: started
Installing build dependencies: finished with status 'done'
Getting requirements to build wheel: started
Getting requirements to build wheel: finished with status 'done'
Installing backend dependencies: started
Installing backend dependencies: finished with status 'done'
Preparing metadata (pyproject.toml): started
Preparing metadata (pyproject.toml): finished with status 'done'
Collecting aiodns (from checkov==3.0.16)
Using cached aiodns-3.1.1-py3-none-any.whl.metadata (4.0 kB)
Collecting aiomultiprocess (from checkov==3.0.16)
Using cached aiomultiprocess-0.9.0-py3-none-any.whl (17 kB)
Collecting jsonschema<5.0.0,>=4.6.0 (from checkov==3.0.16)
Using cached jsonschema-4.19.2-py3-none-any.whl.metadata (7.9 kB)
Collecting prettytable>=3.0.0 (from checkov==3.0.16)
Using cached prettytable-3.9.0-py3-none-any.whl.metadata (26 kB)
Collecting pycep-parser==0.4.1 (from checkov==3.0.16)
Using cached pycep_parser-0.4.1-py3-none-any.whl.metadata (2.8 kB)
Collecting charset-normalizer (from checkov==3.0.16)
Using cached charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl.metadata (33 kB)
Collecting schema (from checkov==3.0.16)
Using cached schema-0.7.5-py2.py3-none-any.whl (17 kB)
Collecting requests>=2.27.0 (from checkov==3.0.16)
Using cached requests-2.31.0-py3-none-any.whl.metadata (4.6 kB)
Collecting yarl (from checkov==3.0.16)
Using cached yarl-1.9.2-cp312-cp312-macosx_13_0_arm64.whl
Collecting openai (from checkov==3.0.16)
Using cached openai-0.28.1-py3-none-any.whl.metadata (11 kB)
Collecting spdx-tools<0.9.0,>=0.8.0 (from checkov==3.0.16)
Using cached spdx_tools-0.8.2-py3-none-any.whl.metadata (14 kB)
Collecting license-expression (from checkov==3.0.16)
Using cached license_expression-30.1.1-py3-none-any.whl.metadata (12 kB)
Collecting rustworkx (from checkov==3.0.16)
Using cached rustworkx-0.13.2-cp312-cp312-macosx_11_0_arm64.whl.metadata (10 kB)
Collecting pydantic (from checkov==3.0.16)
Using cached pydantic-2.4.2-py3-none-any.whl.metadata (158 kB)
Collecting unidiff (from bc-detect-secrets==1.4.30->checkov==3.0.16)
Using cached unidiff-0.7.5-py2.py3-none-any.whl (14 kB)
Collecting ply (from bc-jsonpath-ng==1.5.9->checkov==3.0.16)
Using cached ply-3.11-py2.py3-none-any.whl (49 kB)
Collecting decorator (from bc-jsonpath-ng==1.5.9->checkov==3.0.16)
Using cached decorator-5.1.1-py3-none-any.whl (9.1 kB)
Collecting lark>=1.0.0 (from bc-python-hcl2==0.3.51->checkov==3.0.16)
Using cached lark-1.1.8-py3-none-any.whl.metadata (1.9 kB)
Collecting regex>=2022.1.18 (from pycep-parser==0.4.1->checkov==3.0.16)
Using cached regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl.metadata (40 kB)
Collecting botocore<1.32.0,>=1.31.76 (from boto3>=1.17->checkov==3.0.16)
Using cached botocore-1.31.76-py3-none-any.whl.metadata (6.1 kB)
Collecting s3transfer<0.8.0,>=0.7.0 (from boto3>=1.17->checkov==3.0.16)
Using cached s3transfer-0.7.0-py3-none-any.whl.metadata (1.8 kB)
Collecting cached-property (from cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached cached_property-1.5.2-py2.py3-none-any.whl (7.6 kB)
Collecting click-option-group (from cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached click_option_group-0.5.6-py3-none-any.whl.metadata (8.3 kB)
Collecting jinja2 (from cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached Jinja2-3.1.2-py3-none-any.whl (133 kB)
Collecting markdown (from cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached Markdown-3.5.1-py3-none-any.whl.metadata (7.1 kB)
Collecting policy-sentry>=0.11.3 (from cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached policy_sentry-0.12.10-py3-none-any.whl.metadata (20 kB)
Collecting py-serializable<0.16,>=0.15 (from cyclonedx-python-lib>=5.0.0->checkov==3.0.16)
Using cached py_serializable-0.15.0-py3-none-any.whl.metadata (3.8 kB)
Collecting sortedcontainers<3.0.0,>=2.4.0 (from cyclonedx-python-lib>=5.0.0->checkov==3.0.16)
Using cached sortedcontainers-2.4.0-py2.py3-none-any.whl (29 kB)
Collecting texttable>=1.6.2 (from igraph<0.11.0->checkov==3.0.16)
Using cached texttable-1.7.0-py2.py3-none-any.whl.metadata (9.8 kB)
Collecting zipp>=0.5 (from importlib-metadata>=0.12->checkov==3.0.16)
Using cached zipp-3.17.0-py3-none-any.whl.metadata (3.7 kB)
Collecting attrs>=22.2.0 (from jsonschema<5.0.0,>=4.6.0->checkov==3.0.16)
Using cached attrs-23.1.0-py3-none-any.whl (61 kB)
Collecting jsonschema-specifications>=2023.03.6 (from jsonschema<5.0.0,>=4.6.0->checkov==3.0.16)
Using cached jsonschema_specifications-2023.7.1-py3-none-any.whl.metadata (2.8 kB)
Collecting referencing>=0.28.4 (from jsonschema<5.0.0,>=4.6.0->checkov==3.0.16)
Using cached referencing-0.30.2-py3-none-any.whl.metadata (2.6 kB)
Collecting rpds-py>=0.7.1 (from jsonschema<5.0.0,>=4.6.0->checkov==3.0.16)
Using cached rpds_py-0.10.6-cp312-cp312-macosx_11_0_arm64.whl.metadata (3.7 kB)
Collecting six (from junit-xml>=1.9->checkov==3.0.16)
Using cached six-1.16.0-py2.py3-none-any.whl (11 kB)
Collecting boolean.py>=4.0 (from license-expression->checkov==3.0.16)
Using cached boolean.py-4.0-py3-none-any.whl (25 kB)
Collecting wcwidth (from prettytable>=3.0.0->checkov==3.0.16)
Using cached wcwidth-0.2.9-py2.py3-none-any.whl.metadata (14 kB)
Collecting idna<4,>=2.5 (from requests>=2.27.0->checkov==3.0.16)
Using cached idna-3.4-py3-none-any.whl (61 kB)
Collecting urllib3<3,>=1.21.1 (from requests>=2.27.0->checkov==3.0.16)
Using cached urllib3-2.0.7-py3-none-any.whl.metadata (6.6 kB)
Collecting certifi>=2017.4.17 (from requests>=2.27.0->checkov==3.0.16)
Using cached certifi-2023.7.22-py3-none-any.whl.metadata (2.2 kB)
Collecting xmltodict (from spdx-tools<0.9.0,>=0.8.0->checkov==3.0.16)
Using cached xmltodict-0.13.0-py2.py3-none-any.whl (10.0 kB)
Collecting rdflib (from spdx-tools<0.9.0,>=0.8.0->checkov==3.0.16)
Using cached rdflib-7.0.0-py3-none-any.whl.metadata (11 kB)
Collecting beartype (from spdx-tools<0.9.0,>=0.8.0->checkov==3.0.16)
Using cached beartype-0.16.4-py3-none-any.whl.metadata (29 kB)
Collecting uritools (from spdx-tools<0.9.0,>=0.8.0->checkov==3.0.16)
Using cached uritools-4.0.2-py3-none-any.whl.metadata (4.7 kB)
Collecting pycares>=4.0.0 (from aiodns->checkov==3.0.16)
Using cached pycares-4.4.0-cp312-cp312-macosx_10_9_universal2.whl.metadata (4.1 kB)
Collecting multidict<7.0,>=4.5 (from aiohttp->checkov==3.0.16)
Using cached multidict-6.0.4-cp312-cp312-macosx_13_0_arm64.whl
Collecting async-timeout<5.0,>=4.0.0a3 (from aiohttp->checkov==3.0.16)
Using cached async_timeout-4.0.3-py3-none-any.whl.metadata (4.2 kB)
Collecting frozenlist>=1.1.1 (from aiohttp->checkov==3.0.16)
Using cached frozenlist-1.4.0-cp312-cp312-macosx_13_0_arm64.whl
Collecting aiosignal>=1.1.2 (from aiohttp->checkov==3.0.16)
Using cached aiosignal-1.3.1-py3-none-any.whl (7.6 kB)
Collecting websocket-client>=0.32.0 (from docker->checkov==3.0.16)
Using cached websocket_client-1.6.4-py3-none-any.whl.metadata (7.7 kB)
Collecting gitdb<5,>=4.0.1 (from gitpython->checkov==3.0.16)
Using cached gitdb-4.0.11-py3-none-any.whl.metadata (1.2 kB)
Collecting annotated-types>=0.4.0 (from pydantic->checkov==3.0.16)
Using cached annotated_types-0.6.0-py3-none-any.whl.metadata (12 kB)
Collecting pydantic-core==2.10.1 (from pydantic->checkov==3.0.16)
Using cached pydantic_core-2.10.1-cp312-cp312-macosx_11_0_arm64.whl.metadata (6.5 kB)
Collecting numpy>=1.16.0 (from rustworkx->checkov==3.0.16)
Using cached numpy-1.26.1-cp312-cp312-macosx_11_0_arm64.whl.metadata (61 kB)
Collecting contextlib2>=0.5.5 (from schema->checkov==3.0.16)
Using cached contextlib2-21.6.0-py2.py3-none-any.whl (13 kB)
Collecting python-dateutil<3.0.0,>=2.1 (from botocore<1.32.0,>=1.31.76->boto3>=1.17->checkov==3.0.16)
Using cached python_dateutil-2.8.2-py2.py3-none-any.whl (247 kB)
Collecting smmap<6,>=3.0.1 (from gitdb<5,>=4.0.1->gitpython->checkov==3.0.16)
Using cached smmap-5.0.1-py3-none-any.whl.metadata (4.3 kB)
Collecting beautifulsoup4 (from policy-sentry>=0.11.3->cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached beautifulsoup4-4.12.2-py3-none-any.whl (142 kB)
Collecting defusedxml<0.8.0,>=0.7.1 (from py-serializable<0.16,>=0.15->cyclonedx-python-lib>=5.0.0->checkov==3.0.16)
Using cached defusedxml-0.7.1-py2.py3-none-any.whl (25 kB)
Collecting cffi>=1.5.0 (from pycares>=4.0.0->aiodns->checkov==3.0.16)
Using cached cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl.metadata (1.5 kB)
Collecting MarkupSafe>=2.0 (from jinja2->cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl.metadata (2.9 kB)
Collecting isodate<0.7.0,>=0.6.0 (from rdflib->spdx-tools<0.9.0,>=0.8.0->checkov==3.0.16)
Using cached isodate-0.6.1-py2.py3-none-any.whl (41 kB)
Collecting pyparsing<4,>=2.1.0 (from rdflib->spdx-tools<0.9.0,>=0.8.0->checkov==3.0.16)
Using cached pyparsing-3.1.1-py3-none-any.whl.metadata (5.1 kB)
Collecting pycparser (from cffi>=1.5.0->pycares>=4.0.0->aiodns->checkov==3.0.16)
Using cached pycparser-2.21-py2.py3-none-any.whl (118 kB)
Collecting soupsieve>1.2 (from beautifulsoup4->policy-sentry>=0.11.3->cloudsplaining>=0.6.2->checkov==3.0.16)
Using cached soupsieve-2.5-py3-none-any.whl.metadata (4.7 kB)
Using cached bc_detect_secrets-1.4.30-py3-none-any.whl (118 kB)
Using cached pycep_parser-0.4.1-py3-none-any.whl (22 kB)
Using cached boto3-1.28.76-py3-none-any.whl (135 kB)
Using cached click-8.1.7-py3-none-any.whl (97 kB)
Using cached cloudsplaining-0.6.2-py3-none-any.whl (2.0 MB)
Using cached cyclonedx_python_lib-5.1.0-py3-none-any.whl (187 kB)
Using cached igraph-0.10.8-cp39-abi3-macosx_11_0_arm64.whl (1.7 MB)
Using cached importlib_metadata-6.8.0-py3-none-any.whl (22 kB)
Using cached jsonschema-4.19.2-py3-none-any.whl (83 kB)
Using cached license_expression-30.1.1-py3-none-any.whl (103 kB)
Using cached packageurl_python-0.11.2-py3-none-any.whl (25 kB)
Using cached prettytable-3.9.0-py3-none-any.whl (27 kB)
Using cached PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl (165 kB)
Using cached requests-2.31.0-py3-none-any.whl (62 kB)
Using cached charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl (119 kB)
Using cached spdx_tools-0.8.2-py3-none-any.whl (285 kB)
Using cached typing_extensions-4.8.0-py3-none-any.whl (31 kB)
Using cached aiodns-3.1.1-py3-none-any.whl (5.4 kB)
Using cached argcomplete-3.1.3-py3-none-any.whl (41 kB)
Using cached cachetools-5.3.2-py3-none-any.whl (9.3 kB)
Using cached ConfigArgParse-1.7-py3-none-any.whl (25 kB)
Using cached docker-6.1.3-py3-none-any.whl (148 kB)
Using cached packaging-23.2-py3-none-any.whl (53 kB)
Using cached dockerfile_parse-2.0.1-py2.py3-none-any.whl (14 kB)
Using cached GitPython-3.1.40-py3-none-any.whl (190 kB)
Using cached openai-0.28.1-py3-none-any.whl (76 kB)
Using cached pydantic-2.4.2-py3-none-any.whl (395 kB)
Using cached pydantic_core-2.10.1-cp312-cp312-macosx_11_0_arm64.whl (1.7 MB)
Using cached rustworkx-0.13.2-cp312-cp312-macosx_11_0_arm64.whl (1.6 MB)
Using cached tqdm-4.66.1-py3-none-any.whl (78 kB)
Using cached annotated_types-0.6.0-py3-none-any.whl (12 kB)
Using cached async_timeout-4.0.3-py3-none-any.whl (5.7 kB)
Using cached botocore-1.31.76-py3-none-any.whl (11.3 MB)
Using cached certifi-2023.7.22-py3-none-any.whl (158 kB)
Using cached gitdb-4.0.11-py3-none-any.whl (62 kB)
Using cached jsonschema_specifications-2023.7.1-py3-none-any.whl (17 kB)
Using cached lark-1.1.8-py3-none-any.whl (111 kB)
Using cached numpy-1.26.1-cp312-cp312-macosx_11_0_arm64.whl (13.7 MB)
Using cached policy_sentry-0.12.10-py3-none-any.whl (4.0 MB)
Using cached py_serializable-0.15.0-py3-none-any.whl (19 kB)
Using cached pycares-4.4.0-cp312-cp312-macosx_10_9_universal2.whl (136 kB)
Using cached referencing-0.30.2-py3-none-any.whl (25 kB)
Using cached regex-2023.10.3-cp312-cp312-macosx_11_0_arm64.whl (292 kB)
Using cached rpds_py-0.10.6-cp312-cp312-macosx_11_0_arm64.whl (321 kB)
Using cached s3transfer-0.7.0-py3-none-any.whl (79 kB)
Using cached texttable-1.7.0-py2.py3-none-any.whl (10 kB)
Using cached urllib3-2.0.7-py3-none-any.whl (124 kB)
Using cached websocket_client-1.6.4-py3-none-any.whl (57 kB)
Using cached zipp-3.17.0-py3-none-any.whl (7.4 kB)
Using cached beartype-0.16.4-py3-none-any.whl (819 kB)
Using cached click_option_group-0.5.6-py3-none-any.whl (12 kB)
Using cached Markdown-3.5.1-py3-none-any.whl (102 kB)
Using cached rdflib-7.0.0-py3-none-any.whl (531 kB)
Using cached uritools-4.0.2-py3-none-any.whl (10 kB)
Using cached wcwidth-0.2.9-py2.py3-none-any.whl (102 kB)
Using cached cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl (177 kB)
Using cached MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl (17 kB)
Using cached pyparsing-3.1.1-py3-none-any.whl (103 kB)
Using cached smmap-5.0.1-py3-none-any.whl (24 kB)
Using cached soupsieve-2.5-py3-none-any.whl (36 kB)
Building wheels for collected packages: checkov, aiohttp
Building wheel for checkov (pyproject.toml): started
Building wheel for checkov (pyproject.toml): finished with status 'done'
Created wheel for checkov: filename=checkov-3.0.16-py3-none-any.whl size=1938771 sha256=ec83e3c0b832c9fc70641d03f5e2260fecaa6d3e63a816ed8c292ff112370488
Stored in directory: /private/var/folders/ql/0t19gxzn0bgc8lyjtdcc0xr80000gq/T/pip-ephem-wheel-cache-p0h18a1z/wheels/9b/58/61/fb14423de2da67c3236e26eee457dfbd0bad3eb94f9df2dc0c
Building wheel for aiohttp (pyproject.toml): started
Building wheel for aiohttp (pyproject.toml): finished with status 'error'
Successfully built checkov
Failed to build aiohttp
stderr:
error: subprocess-exited-with-error
× Building wheel for aiohttp (pyproject.toml) did not run successfully.
│ exit code: 1
╰─> [188 lines of output]
*********************
* Accelerated build *
*********************
running bdist_wheel
running build
running build_py
creating build
creating build/lib.macosx-13-arm64-cpython-312
creating build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_ws.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/worker.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/multipart.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_response.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/client_ws.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/test_utils.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/tracing.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_exceptions.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_middlewares.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/http_exceptions.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_app.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/streams.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_protocol.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/log.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/client.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_urldispatcher.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_request.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/http_websocket.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/client_proto.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/locks.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/__init__.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_runner.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_server.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/base_protocol.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/payload.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/client_reqrep.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/http.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_log.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/resolver.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/formdata.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/payload_streamer.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_routedef.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/connector.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/client_exceptions.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/typedefs.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/hdrs.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/web_fileresponse.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/http_writer.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/tcp_helpers.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/helpers.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/http_parser.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/cookiejar.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/pytest_plugin.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/abc.py -> build/lib.macosx-13-arm64-cpython-312/aiohttp
running egg_info
writing aiohttp.egg-info/PKG-INFO
writing dependency_links to aiohttp.egg-info/dependency_links.txt
writing requirements to aiohttp.egg-info/requires.txt
writing top-level names to aiohttp.egg-info/top_level.txt
reading manifest file 'aiohttp.egg-info/SOURCES.txt'
reading manifest template 'MANIFEST.in'
warning: no files found matching 'aiohttp' anywhere in distribution
warning: no previously-included files matching '*.pyc' found anywhere in distribution
warning: no previously-included files matching '*.pyd' found anywhere in distribution
warning: no previously-included files matching '*.so' found anywhere in distribution
warning: no previously-included files matching '*.lib' found anywhere in distribution
warning: no previously-included files matching '*.dll' found anywhere in distribution
warning: no previously-included files matching '*.a' found anywhere in distribution
warning: no previously-included files matching '*.obj' found anywhere in distribution
warning: no previously-included files found matching 'aiohttp/*.html'
no previously-included directories found matching 'docs/_build'
adding license file 'LICENSE.txt'
writing manifest file 'aiohttp.egg-info/SOURCES.txt'
copying aiohttp/_cparser.pxd -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/_find_header.pxd -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/_headers.pxi -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/_helpers.pyi -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/_helpers.pyx -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/_http_parser.pyx -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/_http_writer.pyx -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/_websocket.pyx -> build/lib.macosx-13-arm64-cpython-312/aiohttp
copying aiohttp/py.typed -> build/lib.macosx-13-arm64-cpython-312/aiohttp
creating build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/_cparser.pxd.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/_find_header.pxd.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/_helpers.pyi.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/_helpers.pyx.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/_http_parser.pyx.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/_http_writer.pyx.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/_websocket.pyx.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
copying aiohttp/.hash/hdrs.py.hash -> build/lib.macosx-13-arm64-cpython-312/aiohttp/.hash
running build_ext
building 'aiohttp._websocket' extension
creating build/temp.macosx-13-arm64-cpython-312
creating build/temp.macosx-13-arm64-cpython-312/aiohttp
clang -fno-strict-overflow -Wsign-compare -Wunreachable-code -fno-common -dynamic -DNDEBUG -g -O3 -Wall -isysroot /Library/Developer/CommandLineTools/SDKs/MacOSX13.sdk -I/Users/michaelvoet/.cache/pre-commit/repor0bcn38a/py_env-python3.12/include -I/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12 -c aiohttp/_websocket.c -o build/temp.macosx-13-arm64-cpython-312/aiohttp/_websocket.o
aiohttp/_websocket.c:1475:17: warning: 'Py_OptimizeFlag' is deprecated [-Wdeprecated-declarations]
if (unlikely(!Py_OptimizeFlag)) {
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/cpython/pydebug.h:13:1: note: 'Py_OptimizeFlag' has been explicitly marked deprecated here
Py_DEPRECATED(3.12) PyAPI_DATA(int) Py_OptimizeFlag;
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/pyport.h:317:54: note: expanded from macro 'Py_DEPRECATED'
#define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__))
^
aiohttp/_websocket.c:2680:27: warning: 'ma_version_tag' is deprecated [-Wdeprecated-declarations]
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
^
aiohttp/_websocket.c:1118:65: note: expanded from macro '__PYX_GET_DICT_VERSION'
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/cpython/dictobject.h:22:5: note: 'ma_version_tag' has been explicitly marked deprecated here
Py_DEPRECATED(3.12) uint64_t ma_version_tag;
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/pyport.h:317:54: note: expanded from macro 'Py_DEPRECATED'
#define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__))
^
aiohttp/_websocket.c:2692:36: warning: 'ma_version_tag' is deprecated [-Wdeprecated-declarations]
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
^
aiohttp/_websocket.c:1118:65: note: expanded from macro '__PYX_GET_DICT_VERSION'
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/cpython/dictobject.h:22:5: note: 'ma_version_tag' has been explicitly marked deprecated here
Py_DEPRECATED(3.12) uint64_t ma_version_tag;
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/pyport.h:317:54: note: expanded from macro 'Py_DEPRECATED'
#define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__))
^
aiohttp/_websocket.c:2696:56: warning: 'ma_version_tag' is deprecated [-Wdeprecated-declarations]
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
^
aiohttp/_websocket.c:1118:65: note: expanded from macro '__PYX_GET_DICT_VERSION'
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/cpython/dictobject.h:22:5: note: 'ma_version_tag' has been explicitly marked deprecated here
Py_DEPRECATED(3.12) uint64_t ma_version_tag;
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/pyport.h:317:54: note: expanded from macro 'Py_DEPRECATED'
#define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__))
^
aiohttp/_websocket.c:2741:9: warning: 'ma_version_tag' is deprecated [-Wdeprecated-declarations]
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
^
aiohttp/_websocket.c:1125:16: note: expanded from macro '__PYX_PY_DICT_LOOKUP_IF_MODIFIED'
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
^
aiohttp/_websocket.c:1118:65: note: expanded from macro '__PYX_GET_DICT_VERSION'
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/cpython/dictobject.h:22:5: note: 'ma_version_tag' has been explicitly marked deprecated here
Py_DEPRECATED(3.12) uint64_t ma_version_tag;
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/pyport.h:317:54: note: expanded from macro 'Py_DEPRECATED'
#define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__))
^
aiohttp/_websocket.c:2741:9: warning: 'ma_version_tag' is deprecated [-Wdeprecated-declarations]
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
^
aiohttp/_websocket.c:1129:30: note: expanded from macro '__PYX_PY_DICT_LOOKUP_IF_MODIFIED'
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
^
aiohttp/_websocket.c:1118:65: note: expanded from macro '__PYX_GET_DICT_VERSION'
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/cpython/dictobject.h:22:5: note: 'ma_version_tag' has been explicitly marked deprecated here
Py_DEPRECATED(3.12) uint64_t ma_version_tag;
^
/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/include/python3.12/pyport.h:317:54: note: expanded from macro 'Py_DEPRECATED'
#define Py_DEPRECATED(VERSION_UNUSED) __attribute__((__deprecated__))
^
aiohttp/_websocket.c:3042:55: error: no member named 'ob_digit' in 'struct _longobject'
const digit* digits = ((PyLongObject*)x)->ob_digit;
~~~~~~~~~~~~~~~~~~ ^
aiohttp/_websocket.c:3097:55: error: no member named 'ob_digit' in 'struct _longobject'
const digit* digits = ((PyLongObject*)x)->ob_digit;
~~~~~~~~~~~~~~~~~~ ^
aiohttp/_websocket.c:3238:55: error: no member named 'ob_digit' in 'struct _longobject'
const digit* digits = ((PyLongObject*)x)->ob_digit;
~~~~~~~~~~~~~~~~~~ ^
aiohttp/_websocket.c:3293:55: error: no member named 'ob_digit' in 'struct _longobject'
const digit* digits = ((PyLongObject*)x)->ob_digit;
~~~~~~~~~~~~~~~~~~ ^
aiohttp/_websocket.c:3744:47: error: no member named 'ob_digit' in 'struct _longobject'
const digit* digits = ((PyLongObject*)b)->ob_digit;
~~~~~~~~~~~~~~~~~~ ^
6 warnings and 5 errors generated.
error: command '/usr/bin/clang' failed with exit code 1
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
ERROR: Failed building wheel for aiohttp
ERROR: Could not build wheels for aiohttp, which is required to install pyproject.toml-based projects
```
**Desktop (please complete the following information):**
- OS: macOS Ventura (13.5)
- Checkov Version: 3.0.16
- Python version: 3.11.6
- Clang version: Clang 14.0.3 (clang-1403.0.22.14.1)]
- Pre-commit version: 3.5.0
**Additional context**
Add any other context about the problem here (e.g. code snippets).
| 2023-07-30T16:40:16 | -1.0 |
|
bridgecrewio/checkov | 5,413 | bridgecrewio__checkov-5413 | [
"5394"
] | a7927f6cf08386d09de6cdb9e58ff067217bb1cf | diff --git a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py b/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py
deleted file mode 100644
--- a/checkov/terraform/checks/resource/aws/DMSS3DefinesIntransitEncryption.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from __future__ import annotations
-
-from typing import Any
-
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
-from checkov.common.models.enums import CheckCategories
-
-
-class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):
- def __init__(self) -> None:
- name = "Ensure DMS S3 defines in-transit encryption"
- id = "CKV_AWS_299"
- supported_resources = ("aws_dms_s3_endpoint",)
- categories = (CheckCategories.ENCRYPTION,)
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
-
- def get_inspected_key(self) -> str:
- return 'ssl_mode'
-
- def get_expected_values(self) -> list[Any]:
- return ["require", "verify-ca", "verify-full"]
-
-
-check = DMSS3DefinesIntransitEncryption()
| diff --git a/performance_tests/test_checkov_performance.py b/performance_tests/test_checkov_performance.py
--- a/performance_tests/test_checkov_performance.py
+++ b/performance_tests/test_checkov_performance.py
@@ -18,7 +18,7 @@
'repo_name': 'terraform-aws-components',
'threshold': {
"Darwin": 19.0,
- "Linux": 13.0,
+ "Linux": 8.0,
"Windows": 15.0,
}
},
@@ -26,7 +26,7 @@
'repo_name': 'aws-cloudformation-templates',
'threshold': {
"Darwin": 350.0,
- "Linux": 240.0,
+ "Linux": 170.0,
"Windows": 300.0,
}
},
@@ -34,7 +34,7 @@
'repo_name': 'kubernetes-yaml-templates',
'threshold': {
"Darwin": 550.0,
- "Linux": 280.0,
+ "Linux": 180.0,
"Windows": 500.0,
}
}
diff --git a/tests/kustomize/test_runner_image_referencer.py b/tests/kustomize/test_runner_image_referencer.py
--- a/tests/kustomize/test_runner_image_referencer.py
+++ b/tests/kustomize/test_runner_image_referencer.py
@@ -21,7 +21,7 @@
RESOURCES_PATH = Path(__file__).parent / "runner/resources"
-@pytest.mark.xfail(sys.version_info.minor == 9, reason="for some reason this test is flaky on Python 3.9")
+@pytest.mark.xfail(reason="This is probably connected to the OS + kustomize version")
@pytest.mark.skipif(os.name == "nt" or not kustomize_exists(), reason="kustomize not installed or Windows OS")
@pytest.mark.parametrize("allow_kustomize_file_edits, code_lines", [
(True, "18-34"),
diff --git a/tests/terraform/checks/resource/aws/example_DMSS3DefinesIntransitEncryption/main.tf b/tests/terraform/checks/resource/aws/example_DMSS3DefinesIntransitEncryption/main.tf
deleted file mode 100644
--- a/tests/terraform/checks/resource/aws/example_DMSS3DefinesIntransitEncryption/main.tf
+++ /dev/null
@@ -1,28 +0,0 @@
-resource "aws_dms_s3_endpoint" "fail" {
- endpoint_id = "donnedtipi"
- endpoint_type = "target"
- bucket_name = "beckut_name"
- service_access_role_arn = aws_iam_role.example.arn
- depends_on = [aws_iam_role_policy.example]
-}
-
-resource "aws_dms_s3_endpoint" "fail2" {
- endpoint_id = "donnedtipi"
- endpoint_type = "target"
- bucket_name = "beckut_name"
- service_access_role_arn = aws_iam_role.example.arn
- ssl_mode="none"
- kms_key_arn=""
- depends_on = [aws_iam_role_policy.example]
-}
-
-resource "aws_dms_s3_endpoint" "pass" {
- endpoint_id = "donnedtipi"
- endpoint_type = "target"
- bucket_name = "beckut_name"
- service_access_role_arn = aws_iam_role.example.arn
- ssl_mode="require"
- kms_key_arn="arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
- depends_on = [aws_iam_role_policy.example]
-}
-
diff --git a/tests/terraform/checks/resource/aws/test_DMSS3DefinesIntransitEncryption.py b/tests/terraform/checks/resource/aws/test_DMSS3DefinesIntransitEncryption.py
deleted file mode 100644
--- a/tests/terraform/checks/resource/aws/test_DMSS3DefinesIntransitEncryption.py
+++ /dev/null
@@ -1,39 +0,0 @@
-import os
-import unittest
-
-from checkov.runner_filter import RunnerFilter
-from checkov.terraform.checks.resource.aws.DMSS3DefinesIntransitEncryption import check
-from checkov.terraform.runner import Runner
-
-
-class TestDMSS3DefinesIntransitEncryption(unittest.TestCase):
- def test(self):
- runner = Runner()
- current_dir = os.path.dirname(os.path.realpath(__file__))
-
- test_files_dir = current_dir + "/example_DMSS3DefinesIntransitEncryption"
- report = runner.run(root_folder=test_files_dir, runner_filter=RunnerFilter(checks=[check.id]))
- summary = report.get_summary()
-
- passing_resources = {
- "aws_dms_s3_endpoint.pass",
- }
- failing_resources = {
- "aws_dms_s3_endpoint.fail",
- "aws_dms_s3_endpoint.fail2",
- }
-
- passed_check_resources = set([c.resource for c in report.passed_checks])
- failed_check_resources = set([c.resource for c in report.failed_checks])
-
- self.assertEqual(summary["passed"], len(passing_resources))
- self.assertEqual(summary["failed"], len(failing_resources))
- self.assertEqual(summary["skipped"], 0)
- self.assertEqual(summary["parsing_errors"], 0)
-
- self.assertEqual(passing_resources, passed_check_resources)
- self.assertEqual(failing_resources, failed_check_resources)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/tests/terraform/runner/test_runner.py b/tests/terraform/runner/test_runner.py
--- a/tests/terraform/runner/test_runner.py
+++ b/tests/terraform/runner/test_runner.py
@@ -325,7 +325,7 @@ def test_no_missing_ids(self):
reverse=True,
key=lambda s: int(s.split('_')[-1])
)
- for i in range(1, len(aws_checks) + 7):
+ for i in range(1, len(aws_checks) + 8):
if f'CKV_AWS_{i}' == 'CKV_AWS_4':
# CKV_AWS_4 was deleted due to https://github.com/bridgecrewio/checkov/issues/371
continue
@@ -338,6 +338,10 @@ def test_no_missing_ids(self):
if f'CKV_AWS_{i}' == 'CKV_AWS_52':
# CKV_AWS_52 was deleted since it cannot be toggled in terraform.
continue
+ if f'CKV_AWS_{i}' == 'CKV_AWS_299':
+ # CKV_AWS_299 was deleted because AWS doesn't support it and seems to be a bug in Terraform.
+ # https://github.com/hashicorp/terraform-provider-aws/issues/31821
+ continue
self.assertIn(f'CKV_AWS_{i}', aws_checks, msg=f'The new AWS violation should have the ID "CKV_AWS_{i}"')
gcp_checks = sorted(
| CKV_AWS_299 Fails for Values That Can't be Set
**Describe the issue**
Failing on - Check: CKV_AWS_299: "Ensure DMS S3 defines in-transit encryption"
When the endpoint of a DMS Migration Task has a Endpoint that is S3, there is no option in AWS to specify ssl_mode. AWS does not have an ssl_mode for DMS Endpoints of type S3. Setting this value on extra connections does nothing. Also, setting this in Terraform does nothing. I may be crazy, but I think DMS transfers to S3 uses SSL by default.
**Examples**
```
from __future__ import annotations
from typing import Any
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
from checkov.common.models.enums import CheckCategories
class DMSS3DefinesIntransitEncryption(BaseResourceValueCheck):
def __init__(self) -> None:
name = "Ensure DMS S3 defines in-transit encryption"
id = "CKV_AWS_299"
supported_resources = ("aws_dms_s3_endpoint",)
categories = (CheckCategories.ENCRYPTION,)
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
return 'ssl_mode'
def get_expected_values(self) -> list[Any]:
return ["require", "verify-ca", "verify-full"]
check = DMSS3DefinesIntransitEncryption()
```
**Version (please complete the following information):**
- Checkov Version 2.3.343
**Additional context**
This is property does nothing in terraform when the aws_dme_enpoint is s3 and I file and issue over there, however, it was closed with no fix.
| hey @rossbush thanks for reaching out.
I think you are right. I also checked and my guess is, it is a copy paste error by Terraform. the normal DMS endpoint offers this functionality, but not S3.
hey @JamesWoolfenden can you remember something, when you added it?
> hey @rossbush thanks for reaching out.
>
> I think you are right. I also checked and my guess is, it is a copy paste error by Terraform. the normal DMS endpoint offers this functionality, but not S3.
>
> hey @JamesWoolfenden can you remember something, when you added it?
Greetings @gruebel, this a Quazi catch-22. When the ssl_mode value is set to anything other than "none", then AWS returns an InvalidParamaterCombination, when the endpoint is of type S3.
[See bug logged with terraform](https://github.com/hashicorp/terraform-provider-aws/issues/31821). On one hand, to pass Checkov, the value is set it to anything other than "none" but then an AWS exception is encountered during apply.
To get around the AWS exception, we set the value to "none" but then the Checov rule fails.
Currently, corporate has made this a soft fail for our pipelines, however, we are getting a heap of warnings and to me it just seems like an obsolete rule. There is no value allowed other than "none" for the resource backed by `DMSS3DefinesIntransitEncryption`
| 2023-08-04T13:26:17 | -1.0 |
bridgecrewio/checkov | 5,431 | bridgecrewio__checkov-5431 | [
"5428"
] | 43ce0d75e908702ff788a2f928c59510830e1fb6 | diff --git a/checkov/common/output/spdx.py b/checkov/common/output/spdx.py
--- a/checkov/common/output/spdx.py
+++ b/checkov/common/output/spdx.py
@@ -1,21 +1,23 @@
from __future__ import annotations
+
import itertools
import logging
-
-from checkov.common.output.extra_resource import ExtraResource
-from checkov.common.output.record import SCA_PACKAGE_SCAN_CHECK_NAME, Record
-from license_expression import get_spdx_licensing
-
+from datetime import datetime, timezone
from io import StringIO
+from uuid import uuid4
-from spdx.creationinfo import Tool, Organization
-from spdx.document import Document
-from spdx.license import License
-from spdx.package import Package
-from spdx.writers.tagvalue import write_document
+from license_expression import get_spdx_licensing
+from spdx_tools.spdx.model.actor import Actor, ActorType
+from spdx_tools.spdx.model.document import Document, CreationInfo
+from spdx_tools.spdx.model.package import Package
+from spdx_tools.spdx.model.spdx_none import SpdxNone
+from spdx_tools.spdx.writer.tagvalue.tagvalue_writer import write_document
+from checkov.common.output.extra_resource import ExtraResource
+from checkov.common.output.record import SCA_PACKAGE_SCAN_CHECK_NAME, Record
from checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES
from checkov.common.output.report import Report
+from checkov.version import version
DOCUMENT_NAME = "checkov-sbom"
SPDXREF = "SPDXRef-"
@@ -27,84 +29,97 @@ def __init__(self, repo_id: str | None, reports: list[Report]):
self.reports = reports
self.document = self.create_document()
+ self.packages: list[Package] = []
+
+ # each entry looks like '{file_name}#{package_name}#{package_version}'
+ self._added_packages_cache: set[str] = set()
def create_document(self) -> Document:
- document = Document(
- version="SPDX2.3",
- data_license=License.from_identifier(identifier="CC0-1.0"),
- name=DOCUMENT_NAME,
+ creation_info = CreationInfo(
+ spdx_version="SPDX-2.3",
spdx_id="SPDXRef-DOCUMENT",
- namespace=f"{self.repo_id}{DOCUMENT_NAME}",
+ name=DOCUMENT_NAME,
+ data_license="CC0-1.0",
+ document_namespace=f"https://spdx.org/spdxdocs/{DOCUMENT_NAME}-{version}-{uuid4()}",
+ creators=[
+ Actor(ActorType.TOOL, "checkov"),
+ Actor(ActorType.ORGANIZATION, "bridgecrew", "meet@bridgecrew.io"),
+ ],
+ created=datetime.now(timezone.utc),
)
- document.creation_info.set_created_now()
- document.creation_info.add_creator(Tool(name="checkov"))
- document.creation_info.add_creator(Organization(name="bridgecrew"))
-
- return document
+ return Document(creation_info=creation_info)
def get_tag_value_output(self) -> str:
output = StringIO()
self.add_packages_to_doc()
- write_document(document=self.document, out=output, validate=True) # later set to True
+ write_document(document=self.document, text_output=output)
return output.getvalue()
def validate_licenses(self, package: Package, license_: str) -> None:
- if license_ and license_ not in ['Unknown license', 'NOT_FOUND', 'Unknown']:
+ if license_ and license_ not in ["Unknown license", "NOT_FOUND", "Unknown"]:
split_licenses = license_.split(",")
licenses = []
for lic in split_licenses:
lic = lic.strip('"')
try:
- is_spdx_license = License(get_spdx_licensing().parse(lic), lic)
- licenses.append(is_spdx_license)
+ licenses.append(get_spdx_licensing().parse(lic))
except Exception as e:
logging.info(f"error occured when trying to parse the license:{split_licenses} due to error {e}")
- package.licenses_from_files = licenses
+ package.license_info_from_files = licenses
def create_package(self, check: Record | ExtraResource) -> Package:
package_data = check.vulnerability_details
if not package_data:
# this shouldn't happen
logging.error(f"Check {check.resource} doesn't have 'vulnerability_details' set")
- return Package(name="unknown")
+ return Package(name="unknown", spdx_id=f"{SPDXREF}unknown", download_location=SpdxNone())
+
+ package_name = package_data.get("package_name")
+ if not package_name:
+ # this shouldn't happen
+ logging.error(f"Package {package_data} doesn't have 'package_name' set")
+ return Package(name="unknown", spdx_id=f"{SPDXREF}unknown", download_location=SpdxNone())
- package_name = package_data.get('package_name')
package = Package(
name=package_name,
spdx_id=f"{SPDXREF}{package_name}",
- version=package_data['package_version'],
- download_location='N/A',
- file_name=check.file_path
+ version=package_data["package_version"],
+ download_location=SpdxNone(),
+ file_name=check.file_path,
)
- license_ = package_data.get('licenses', "")
+ license_ = package_data.get("licenses", "")
self.validate_licenses(package=package, license_=license_)
return package
def add_packages_to_doc(self) -> None:
- packages_set = set()
for report in self.reports:
for check in itertools.chain(report.passed_checks, report.skipped_checks):
if report.check_type in SCA_CHECKTYPES and check.check_name != SCA_PACKAGE_SCAN_CHECK_NAME:
continue
package = self.create_package(check)
- if package not in packages_set:
- packages_set.add(package)
+ self.add_new_package_to_list(package)
for check in report.failed_checks:
if report.check_type in SCA_CHECKTYPES and check.check_name != SCA_PACKAGE_SCAN_CHECK_NAME:
continue
package = self.create_package(check)
- if package not in packages_set:
- packages_set.add(package)
+ self.add_new_package_to_list(package)
for resource in sorted(report.extra_resources):
package = self.create_package(resource)
- if package not in packages_set:
- packages_set.add(package)
+ self.add_new_package_to_list(package)
+
+ if self.packages:
+ self.document.packages = self.packages
+
+ def add_new_package_to_list(self, package: Package) -> None:
+ """Adds a package to the list, if it not exists"""
- if packages_set:
- self.document.packages = list(packages_set)
+ package_cache_entry = f"{package.file_name}#{package.name}#{package.version}"
+ if package_cache_entry not in self._added_packages_cache:
+ self.packages.append(package)
+ self._added_packages_cache.add(package_cache_entry)
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -109,8 +109,8 @@ def run(self) -> None:
"requests>=2.27.0",
"yarl",
"openai",
- "spdx-tools<0.8.0",
- "license-expression==30.1.0",
+ "spdx-tools>=0.8.0,<0.9.0",
+ "license-expression",
],
dependency_links=[], # keep it empty, needed for pipenv-setup
license="Apache License 2.0",
| diff --git a/tests/common/output/test_spdx.py b/tests/common/output/test_spdx.py
new file mode 100644
--- /dev/null
+++ b/tests/common/output/test_spdx.py
@@ -0,0 +1,110 @@
+from freezegun import freeze_time
+
+from checkov.common.bridgecrew.check_type import CheckType
+from checkov.common.output.extra_resource import ExtraResource
+from checkov.common.output.report import Report
+from checkov.common.output.spdx import SPDX
+from checkov.common.sca.output import create_report_cve_record
+
+
+@freeze_time("2022-12-24")
+def test_sca_package_output():
+ # given
+ rootless_file_path = "requirements.txt"
+ file_abs_path = "/path/to/requirements.txt"
+ check_class = "checkov.sca_package.scanner.Scanner"
+ vulnerability_details = {
+ "id": "CVE-2019-19844",
+ "status": "fixed in 3.0.1, 2.2.9, 1.11.27",
+ "cvss": 9.8,
+ "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
+ "description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover. ...",
+ "severity": "moderate",
+ "packageName": "django",
+ "packageVersion": "1.2",
+ "link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
+ "riskFactors": ["Attack complexity: low", "Attack vector: network", "Critical severity", "Has fix"],
+ "impactedVersions": ["<1.11.27"],
+ "publishedDate": "2019-12-18T20:15:00+01:00",
+ "discoveredDate": "2019-12-18T19:15:00Z",
+ "fixDate": "2019-12-18T20:15:00+01:00",
+ }
+
+ record = create_report_cve_record(
+ rootless_file_path=rootless_file_path,
+ file_abs_path=file_abs_path,
+ check_class=check_class,
+ vulnerability_details=vulnerability_details,
+ licenses="OSI_BDS",
+ package={"package_registry": "https://registry.npmjs.org/", "is_private_registry": False},
+ )
+ # also add a BC_VUL_2 record
+ bc_record = create_report_cve_record(
+ rootless_file_path=rootless_file_path,
+ file_abs_path=file_abs_path,
+ check_class=check_class,
+ vulnerability_details=vulnerability_details,
+ licenses="OSI_BDS",
+ package={"package_registry": "https://registry.npmjs.org/", "is_private_registry": False},
+ )
+ bc_record.check_id = "BC_VUL_2"
+
+ report = Report(CheckType.SCA_PACKAGE)
+ report.add_resource(record.resource)
+ report.add_record(record)
+ report.add_record(bc_record)
+
+ report.extra_resources.add(
+ ExtraResource(
+ file_abs_path=file_abs_path,
+ file_path=f"/{rootless_file_path}",
+ resource=f"{rootless_file_path}.testpkg",
+ vulnerability_details={"package_name": "testpkg", "package_version": "1.1.1", "licenses": "MIT"},
+ )
+ )
+
+ # when
+ spdx = SPDX(repo_id="example", reports=[report])
+
+ # override dynamic data
+ spdx.document.creation_info.document_namespace = "https://spdx.org/spdxdocs/checkov-sbom-9.9.9"
+
+ # then
+ output = spdx.get_tag_value_output()
+
+ # remove dynamic data
+ assert output == "".join(
+ [
+ "## Document Information\n",
+ "SPDXVersion: SPDX-2.3\n",
+ "DataLicense: CC0-1.0\n",
+ "SPDXID: SPDXRef-DOCUMENT\n",
+ "DocumentName: checkov-sbom\n",
+ "DocumentNamespace: https://spdx.org/spdxdocs/checkov-sbom-9.9.9\n",
+ "\n",
+ "## Creation Information\n",
+ "Creator: Tool: checkov\n",
+ "Creator: Organization: bridgecrew (meet@bridgecrew.io)\n",
+ "Created: 2022-12-24T00:00:00+00:00Z\n",
+ "\n",
+ "## Package Information\n",
+ "PackageName: django\n",
+ "SPDXID: SPDXRef-django\n",
+ "PackageVersion: 1.2\n",
+ "PackageFileName: /requirements.txt\n",
+ "PackageDownloadLocation: NONE\n",
+ "FilesAnalyzed: True\n",
+ "PackageLicenseInfoFromFiles: OSI_BDS\n",
+ "\n",
+ "## Package Information\n",
+ "PackageName: testpkg\n",
+ "SPDXID: SPDXRef-testpkg\n",
+ "PackageVersion: 1.1.1\n",
+ "PackageFileName: /requirements.txt\n",
+ "PackageDownloadLocation: NONE\n",
+ "FilesAnalyzed: True\n",
+ "PackageLicenseInfoFromFiles: MIT\n",
+ "\n",
+ "\n",
+ ]
+ )
| Support for spdx-tools >= 0.8
**Describe the issue**
The 0.8 version of the `spdx-tools`module lays the groundwork for the upcoming SPDX-3.0 implementation. Unfortunately 0.8 contains breaking changes.
At first glance it looks like that at least `common/output/spdx.py` needs some modifications.
**Version (please complete the following information):**
- Checkov Version: latest
**Additional context**
This issue is mainly relevant for distributions which are shipping `checkov` as package and not users who are running with a Python venv.
| hey @fabaff thanks for reaching out.
We intentionally upper bounded it, because the `0.8.0` version will introduce breaking changes. So, thanks for letting us know, because I was not aware it was already released. I don't think it will be a heavy lift on our side, because it is just used to create the SPDX output 😄 | 2023-08-11T22:18:22 | -1.0 |
bridgecrewio/checkov | 5,468 | bridgecrewio__checkov-5468 | [
"5466"
] | e5e611861c2a8c18e714fa35dc0a6e53b62d819c | diff --git a/checkov/dockerfile/checks/ReferenceLatestTag.py b/checkov/dockerfile/checks/ReferenceLatestTag.py
--- a/checkov/dockerfile/checks/ReferenceLatestTag.py
+++ b/checkov/dockerfile/checks/ReferenceLatestTag.py
@@ -9,7 +9,7 @@
if TYPE_CHECKING:
from dockerfile_parse.parser import _Instruction
-MULTI_STAGE_PATTERN = re.compile(r"(\S+)\s+as\s+(\S+)", re.IGNORECASE)
+MULTI_STAGE_PATTERN = re.compile(r"(?:--platform=\S+\s+)?(\S+)\s+as\s+(\S+)", re.IGNORECASE)
class ReferenceLatestTag(BaseDockerfileCheck):
| diff --git a/tests/dockerfile/checks/example_ReferenceLatestTag/success_multi_stage_platform/Dockerfile b/tests/dockerfile/checks/example_ReferenceLatestTag/success_multi_stage_platform/Dockerfile
new file mode 100644
--- /dev/null
+++ b/tests/dockerfile/checks/example_ReferenceLatestTag/success_multi_stage_platform/Dockerfile
@@ -0,0 +1,5 @@
+FROM --platform=$BUILDPLATFORM python:3.11-slim AS build
+COPY test.sh /test.sh
+
+FROM build as run
+LABEL maintainer=checkov
diff --git a/tests/dockerfile/checks/test_ReferenceLatestTag.py b/tests/dockerfile/checks/test_ReferenceLatestTag.py
--- a/tests/dockerfile/checks/test_ReferenceLatestTag.py
+++ b/tests/dockerfile/checks/test_ReferenceLatestTag.py
@@ -20,7 +20,8 @@ def test(self):
"/success_multi_stage/Dockerfile.FROM",
"/success_multi_stage_capital/Dockerfile.FROM",
"/success_scratch/Dockerfile.FROM",
- "/success_multi_stage_scratch/Dockerfile.FROM"
+ "/success_multi_stage_scratch/Dockerfile.FROM",
+ "/success_multi_stage_platform/Dockerfile.FROM",
}
failing_resources = {
@@ -31,8 +32,8 @@ def test(self):
passed_check_resources = set([c.resource for c in report.passed_checks])
failed_check_resources = set([c.resource for c in report.failed_checks])
- self.assertEqual(summary["passed"], 5)
- self.assertEqual(summary["failed"], 2)
+ self.assertEqual(summary["passed"], len(passing_resources))
+ self.assertEqual(summary["failed"], len(failing_resources))
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
| [CKV_DOCKER_7] Erroneous failed check when --platform flag is used in multistage Dockerfile
**Describe the issue**
In the `CKV_DOCKER_7` check, when the `--platform` flag is used in a multistage Dockerfile, the check fails erroneously for images that have been created within the current Dockerfile and thus no version pegging should be required.
**Examples**
The Dockerfile below is green
```
FROM golang:1.20-bullseye AS base
FROM base AS build
```
but as soon as a `--platform` flag is added, the check fails:
```
FROM --platform=$BUILDPLATFORM golang:1.20-bullseye AS base
FROM base AS build
```
**Version (please complete the following information):**
- Checkov Version 2.4.4
| 2023-08-21T16:13:52 | -1.0 |
|
bridgecrewio/checkov | 5,509 | bridgecrewio__checkov-5509 | [
"5504"
] | c5b59eac36303f3ba8b68d321a45b0e705bdc54f | diff --git a/checkov/terraform_json/parser.py b/checkov/terraform_json/parser.py
--- a/checkov/terraform_json/parser.py
+++ b/checkov/terraform_json/parser.py
@@ -94,8 +94,8 @@ def prepare_definition(definition: dict[str, Any]) -> dict[str, Any]:
if block_name == COMMENT_FIELD_NAME or block_name in LINE_FIELD_NAMES:
continue
- if block_type == BlockType.RESOURCE:
- # resource have an extra nested level resource_type -> resource_name -> resource_config
+ if block_type in (BlockType.RESOURCE, BlockType.DATA):
+ # data/resource have an extra nested level resource_type -> resource_name -> resource_config
for resource_name, resource_config in config.items():
if resource_name in IGNORE_FILED_NAMES:
continue
diff --git a/checkov/terraform_json/runner.py b/checkov/terraform_json/runner.py
--- a/checkov/terraform_json/runner.py
+++ b/checkov/terraform_json/runner.py
@@ -19,7 +19,7 @@
from checkov.terraform.checks.resource.registry import resource_registry
from checkov.terraform.graph_builder.local_graph import TerraformLocalGraph
from checkov.terraform.runner import Runner as TerraformRunner
-from checkov.terraform_json.utils import get_scannable_file_paths, TF_JSON_POSSIBLE_FILE_ENDINGS, create_definitions
+from checkov.terraform_json.utils import get_scannable_file_paths, create_definitions
if TYPE_CHECKING:
from checkov.common.graph.checks_infra.registry import BaseRegistry
@@ -48,7 +48,7 @@ def __init__(
external_registries=external_registries,
source=source,
)
- self.file_extensions = TF_JSON_POSSIBLE_FILE_ENDINGS # override what gets set from the TF runner
+ self.file_extensions = (".json",) # just '.json' not 'tf.json' otherwise it will be filtered out
self.graph_registry = get_graph_checks_registry(super().check_type)
self.definitions: dict[str, dict[str, Any]] = {} # type:ignore[assignment] # need to check, how to support subclass differences
| diff --git a/tests/terraform_json/examples/cdk.tf.json b/tests/terraform_json/examples/cdk.tf.json
--- a/tests/terraform_json/examples/cdk.tf.json
+++ b/tests/terraform_json/examples/cdk.tf.json
@@ -16,6 +16,18 @@
}
}
},
+ "data": {
+ "aws_caller_identity": {
+ "current": {
+ "//": {
+ "metadata": {
+ "path": "AppStack/current",
+ "uniqueId": "current"
+ }
+ }
+ }
+ }
+ },
"output": {
"bucket_arn": {
"value": "${aws_s3_bucket.bucket.arn}"
diff --git a/tests/terraform_json/test_graph_manager.py b/tests/terraform_json/test_graph_manager.py
--- a/tests/terraform_json/test_graph_manager.py
+++ b/tests/terraform_json/test_graph_manager.py
@@ -33,7 +33,7 @@ def test_build_graph_from_definitions(graph_connector):
)
# then
- assert len(local_graph.vertices) == 5
+ assert len(local_graph.vertices) == 6
bucket_idx = local_graph.vertices_block_name_map["resource"]["aws_s3_bucket.bucket"][0]
bucket = local_graph.vertices[bucket_idx]
@@ -42,8 +42,8 @@ def test_build_graph_from_definitions(graph_connector):
assert bucket.id == "aws_s3_bucket.bucket"
assert bucket.source == "Terraform"
assert bucket.attributes[CustomAttributes.RESOURCE_TYPE] == ["aws_s3_bucket"]
- assert bucket.attributes[START_LINE] == 34
- assert bucket.attributes[END_LINE] == 53
+ assert bucket.attributes[START_LINE] == 46
+ assert bucket.attributes[END_LINE] == 65
assert bucket.config == {
"aws_s3_bucket": {
"bucket": {
@@ -53,32 +53,25 @@ def test_build_graph_from_definitions(graph_connector):
{
"comment": "Access logging not needed",
"id": "CKV_AWS_18",
- "__startline__": 38,
- "__endline__": 41,
+ "__startline__": 50,
+ "__endline__": 53,
}
],
- "__startline__": 36,
- "__endline__": 43,
+ "__startline__": 48,
+ "__endline__": 55,
},
"metadata": {
"path": "AppStack/bucket",
"uniqueId": "bucket",
- "__startline__": 44,
- "__endline__": 47,
+ "__startline__": 56,
+ "__endline__": 59,
},
- "__startline__": 35,
- "__endline__": 48,
+ "__startline__": 47,
+ "__endline__": 60,
},
- "tags": [
- {
- "Name": "example",
- "Private": "true",
- "__startline__": 49,
- "__endline__": 52,
- }
- ],
- "__startline__": 34,
- "__endline__": 53,
+ "tags": [{"Name": "example", "Private": "true", "__startline__": 61, "__endline__": 64}],
+ "__startline__": 46,
+ "__endline__": 65,
"__address__": "aws_s3_bucket.bucket",
}
}
| terraform_json not working for cdktf generated json
**Describe the issue**
terraform_json not working for cdktf generated json
**Examples**
```json
{
"//": {
"metadata": {
"backend": "s3",
"stackName": "prod-zone",
"version": "0.17.3"
},
"outputs": {
"sbc-prod-zone": {
"certificate_arn": "certificate_arn",
"domain": "domain",
"global_certificate_arn": "global_certificate_arn",
"nameservers": "nameservers",
"zone_id": "zone_id"
}
}
},
"data": {
"aws_caller_identity": {
"current": {
"//": {
"metadata": {
"path": "sbc-prod-zone/current",
"uniqueId": "current"
}
}
}
}
},
"output": {
"certificate_arn": {
"value": "${aws_acm_certificate.eu_certificate.arn}"
},
"domain": {
"value": "${aws_route53_zone.zone.name}"
},
"global_certificate_arn": {
"value": "${aws_acm_certificate.global_certificate.arn}"
},
"nameservers": {
"value": "${aws_route53_zone.zone.name_servers}"
},
"zone_id": {
"value": "${aws_route53_zone.zone.zone_id}"
}
},
"provider": {
"aws": [
{
"region": "eu-central-1"
},
{
"alias": "us",
"region": "us-east-1"
}
]
},
"resource": {
"aws_acm_certificate": {
"eu_certificate": {
"//": {
"metadata": {
"path": "sbc-prod-zone/eu_certificate",
"uniqueId": "eu_certificate"
}
},
"domain_name": "${aws_route53_zone.zone.name}",
"lifecycle": {
"create_before_destroy": true
},
"subject_alternative_names": [
"*.${aws_route53_zone.zone.name}"
],
"validation_method": "DNS"
},
"global_certificate": {
"//": {
"metadata": {
"path": "sbc-prod-zone/global_certificate",
"uniqueId": "global_certificate"
}
},
"domain_name": "${aws_route53_zone.zone.name}",
"lifecycle": {
"create_before_destroy": true
},
"provider": "aws.us",
"subject_alternative_names": [
"*.${aws_route53_zone.zone.name}"
],
"validation_method": "DNS"
}
},
"aws_route53_record": {
"dummy": {
"//": {
"metadata": {
"path": "sbc-prod-zone/dummy",
"uniqueId": "dummy"
}
},
"name": "${aws_route53_zone.zone.name}",
"records": [
"1.2.3.4"
],
"ttl": 300,
"type": "A",
"zone_id": "${aws_route53_zone.zone.zone_id}"
}
},
"aws_route53_zone": {
"zone": {
"//": {
"metadata": {
"path": "sbc-prod-zone/zone",
"uniqueId": "zone"
}
},
"name": "prod.stabl.cloud"
}
}
},
"terraform": {
"backend": {
"s3": {
"bucket": "terraform-state-145128234233",
"dynamodb_table": "terraform-locks",
"encrypt": true,
"key": "zone/terraform.tfstate",
"region": "eu-central-1"
}
},
"required_providers": {
"aws": {
"source": "aws",
"version": "4.67.0"
}
}
}
}
```
**Exception Trace**
```sh
LOG_LEVEL=DEBUG checkov -d t --framework terraform_json -o json
2023-08-30 07:41:20,833 [MainThread ] [DEBUG] Leveraging the bundled IAM Definition.
2023-08-30 07:41:20,833 [MainThread ] [DEBUG] Leveraging the IAM definition at /usr/local/lib/python3.9/dist-packages/policy_sentry/shared/data/iam-definition.json
2023-08-30 07:41:21,105 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730> with order 1
2023-08-30 07:41:21,105 [MainThread ] [DEBUG] self.features after the sort:
2023-08-30 07:41:21,105 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730>]
2023-08-30 07:41:21,106 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f12d45f50a0> with order 11
2023-08-30 07:41:21,106 [MainThread ] [DEBUG] self.features after the sort:
2023-08-30 07:41:21,106 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f12d45f50a0>]
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f12d460f460> with order 0
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] self.features after the sort:
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f12d460f460>, <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f12d45f50a0>]
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f12d460f9a0> with order 10
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] self.features after the sort:
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f12d460f460>, <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f12d460f9a0>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f12d45f50a0>]
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f12d460fdf0> with order 0
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] self.features after the sort:
2023-08-30 07:41:21,107 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f12d460f460>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f12d460fdf0>, <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f12d460f9a0>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f12d45f50a0>]
2023-08-30 07:41:21,108 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f12d461c640> with order 2
2023-08-30 07:41:21,108 [MainThread ] [DEBUG] self.features after the sort:
2023-08-30 07:41:21,108 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f12d460f460>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f12d460fdf0>, <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f12d461c640>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f12d460f9a0>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f12d45f50a0>]
2023-08-30 07:41:21,108 [MainThread ] [DEBUG] Adding the IntegrationFeatureRegistry <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f12d461ca90> with order 6
2023-08-30 07:41:21,108 [MainThread ] [DEBUG] self.features after the sort:
2023-08-30 07:41:21,108 [MainThread ] [DEBUG] [<checkov.common.bridgecrew.integration_features.features.policy_metadata_integration.PolicyMetadataIntegration object at 0x7f12d460f460>, <checkov.common.bridgecrew.integration_features.features.repo_config_integration.RepoConfigIntegration object at 0x7f12d460fdf0>, <checkov.common.bridgecrew.integration_features.features.custom_policies_integration.CustomPoliciesIntegration object at 0x7f12da076730>, <checkov.common.bridgecrew.integration_features.features.suppressions_integration.SuppressionsIntegration object at 0x7f12d461c640>, <checkov.common.bridgecrew.integration_features.features.licensing_integration.LicensingIntegration object at 0x7f12d461ca90>, <checkov.common.bridgecrew.integration_features.features.fixes_integration.FixesIntegration object at 0x7f12d460f9a0>, <checkov.common.bridgecrew.integration_features.features.policies_3d_integration.Policies3DIntegration object at 0x7f12d45f50a0>]
2023-08-30 07:41:21,123 [MainThread ] [DEBUG] Loading external checks from /home/node/.local/lib/python3.9/site-packages/checkov/bicep/checks/graph_checks
2023-08-30 07:41:21,124 [MainThread ] [DEBUG] Searching through ['__pycache__'] and ['SQLServerAuditingEnabled.json', '__init__.py']
2023-08-30 07:41:21,124 [MainThread ] [DEBUG] Searching through [] and ['__init__.cpython-39.pyc']
2023-08-30 07:41:21,195 [MainThread ] [DEBUG] Popen(['git', 'version'], cwd=/workspaces/SBC-Infrastructure/src/grafana, universal_newlines=False, shell=None, istream=None)
2023-08-30 07:41:21,199 [MainThread ] [DEBUG] Popen(['git', 'version'], cwd=/workspaces/SBC-Infrastructure/src/grafana, universal_newlines=False, shell=None, istream=None)
2023-08-30 07:41:21,368 [MainThread ] [DEBUG] No API key present; setting include_all_checkov_policies to True
2023-08-30 07:41:21,371 [MainThread ] [DEBUG] Run metadata: {
"checkov_version": "2.4.14",
"python_executable": "/usr/bin/python3",
"python_version": "3.9.2 (default, Feb 28 2021, 17:03:44) \n[GCC 10.2.1 20210110]",
"checkov_executable": "/home/node/.local/bin/checkov",
"args": [
"Command Line Args: -d t --framework terraform_json -o json",
"Defaults:",
" --branch: master",
" --download-external-modules:False",
" --external-modules-download-path:.external_modules",
" --evaluate-variables:True",
" --secrets-scan-file-type:[]",
" --block-list-secret-scan:[]",
" --summary-position:top",
" --mask: []",
" --secrets-history-timeout:12h",
""
],
"OS_system_info": "Linux-5.15.90.1-microsoft-standard-WSL2-x86_64-with-glibc2.31",
"CPU_architecture": "",
"Python_implementation": "CPython"
}
2023-08-30 07:41:21,372 [MainThread ] [DEBUG] Using cert_reqs None
2023-08-30 07:41:21,372 [MainThread ] [DEBUG] Successfully set up HTTP manager
2023-08-30 07:41:21,372 [MainThread ] [DEBUG] Resultant set of frameworks (removing skipped frameworks): terraform_json
2023-08-30 07:41:21,372 [MainThread ] [DEBUG] BC_SOURCE = cli, version = 2.4.14
2023-08-30 07:41:21,372 [MainThread ] [DEBUG] terraform_json_runner declares no system dependency checks required.
2023-08-30 07:41:21,372 [MainThread ] [DEBUG] No API key found. Scanning locally only.
2023-08-30 07:41:21,481 [MainThread ] [DEBUG] Got checkov mappings and guidelines from Bridgecrew platform
2023-08-30 07:41:21,482 [MainThread ] [DEBUG] Loading external checks from /home/node/.local/lib/python3.9/site-packages/checkov/terraform_json/checks/graph_checks
2023-08-30 07:41:21,485 [MainThread ] [DEBUG] Running without API key, so only open source runners will be enabled
2023-08-30 07:41:21,485 [MainThread ] [DEBUG] Filtered list of policies: []
2023-08-30 07:41:21,486 [MainThread ] [DEBUG] Received the following policy-level suppressions, that will be skipped from running: []
2023-08-30 07:41:21,486 [MainThread ] [DEBUG] Checking if terraform_json is valid for license
2023-08-30 07:41:21,486 [MainThread ] [DEBUG] Open source mode - the runner is enabled
2023-08-30 07:41:21,486 [MainThread ] [INFO ] Start to parse 1 files
2023-08-30 07:41:21,496 [MainThread ] [INFO ] Successfully parsed 1 files
2023-08-30 07:41:21,496 [MainThread ] [INFO ] Creating Terraform JSON graph
2023-08-30 07:41:21,496 [MainThread ] [WARNI] Failed to add block [{'aws_caller_identity': {'current': [{'//': {'metadata': {'path': 'sbc-prod-zone/current', 'uniqueId': 'current', '__startline__': 22, '__endline__': 25}, '__startline__': 21, '__endline__': 26}, '__startline__': 20, '__endline__': 27}], '__startline__': 19, '__endline__': 28}}]. Error:
2023-08-30 07:41:21,496 [MainThread ] [WARNI] 'list' object has no attribute 'items'
2023-08-30 07:41:21,497 [MainThread ] [INFO ] Creating vertices
2023-08-30 07:41:21,497 [MainThread ] [INFO ] [TerraformLocalGraph] created 11 vertices
2023-08-30 07:41:21,497 [MainThread ] [INFO ] Creating edges
2023-08-30 07:41:21,497 [MainThread ] [INFO ] [TerraformLocalGraph] created 11 edges
2023-08-30 07:41:21,498 [MainThread ] [INFO ] Rendering variables, graph has 11 vertices and 11 edges
2023-08-30 07:41:21,498 [MainThread ] [DEBUG] evaluating 9 edges
2023-08-30 07:41:21,499 [MainThread ] [DEBUG] evaluating 2 edges
2023-08-30 07:41:21,499 [MainThread ] [DEBUG] done evaluating edges
2023-08-30 07:41:21,500 [MainThread ] [DEBUG] done evaluate_non_rendered_values
2023-08-30 07:41:21,500 [MainThread ] [INFO ] Building cross variable edges
2023-08-30 07:41:21,500 [MainThread ] [INFO ] Found 0 cross variable edges
2023-08-30 07:41:21,500 [MainThread ] [INFO ] Successfully created Terraform JSON graph
2023-08-30 07:41:21,501 [MainThread ] [DEBUG] Scanning file: /cdk.tf.json
2023-08-30 07:41:21,501 [MainThread ] [ERROR] Exception traceback:
Traceback (most recent call last):
File "/home/node/.local/lib/python3.9/site-packages/checkov/main.py", line 470, in run
self.scan_reports = runner_registry.run(
File "/home/node/.local/lib/python3.9/site-packages/checkov/common/runners/runner_registry.py", line 120, in run
self.runners[0].run(root_folder, external_checks_dir=external_checks_dir, files=files,
File "/home/node/.local/lib/python3.9/site-packages/checkov/terraform_json/runner.py", line 107, in run
self.add_python_check_results(report=report, runner_filter=runner_filter)
File "/home/node/.local/lib/python3.9/site-packages/checkov/terraform_json/runner.py", line 124, in add_python_check_results
self.run_block(
File "/home/node/.local/lib/python3.9/site-packages/checkov/terraform_json/runner.py", line 202, in run_block
start_line = entity_config[START_LINE]
TypeError: list indices must be integers or slices, not str
Traceback (most recent call last):
File "/home/node/.local/bin/checkov", line 9, in <module>
sys.exit(Checkov().run())
File "/home/node/.local/lib/python3.9/site-packages/checkov/main.py", line 470, in run
self.scan_reports = runner_registry.run(
File "/home/node/.local/lib/python3.9/site-packages/checkov/common/runners/runner_registry.py", line 120, in run
self.runners[0].run(root_folder, external_checks_dir=external_checks_dir, files=files,
File "/home/node/.local/lib/python3.9/site-packages/checkov/terraform_json/runner.py", line 107, in run
self.add_python_check_results(report=report, runner_filter=runner_filter)
File "/home/node/.local/lib/python3.9/site-packages/checkov/terraform_json/runner.py", line 124, in add_python_check_results
self.run_block(
File "/home/node/.local/lib/python3.9/site-packages/checkov/terraform_json/runner.py", line 202, in run_block
start_line = entity_config[START_LINE]
TypeError: list indices must be integers or slices, not str
```
**Desktop (please complete the following information):**
- OS: Ubuntu
- Checkov Version: 2.4.14
| 2023-08-30T13:30:26 | -1.0 |
|
bridgecrewio/checkov | 5,540 | bridgecrewio__checkov-5540 | [
"5537"
] | 05e7ddbf01e3e713ee2195c54f37526ca251dd30 | diff --git a/checkov/terraform/checks/resource/azure/SQLDatabaseZoneRedundant.py b/checkov/terraform/checks/resource/azure/SQLDatabaseZoneRedundant.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/SQLDatabaseZoneRedundant.py
@@ -0,0 +1,43 @@
+from __future__ import annotations
+
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class SQLDatabaseZoneRedundant(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ """
+ This is a best practise which helps to:
+ - Improved High Availability: Zone redundancy ensures that your database is replicated
+ across Availability Zones within an Azure region. If one Availability Zone experiences an outage,
+ your database continues to operate from the other zones, minimizing downtime.
+ - Reduced Maintenance Downtime: Zone-redundant configurations often require
+ less planned maintenance downtime because updates and patches can be applied to
+ one zone at a time while the other zones continue to serve traffic.
+ - Improved Scalability: Zone-redundant configurations are designed to scale with your workload.
+ You can take advantage of features like Hyperscale to dynamically adjust resources based on
+ your database's performance needs.
+ - Improved SLA: Azure SQL Database zone-redundant configurations typically offer
+ a higher service-level agreement (SLA) for availability compared to non-zone-redundant configurations.
+
+ However, it's critical to note that:
+ Note that:
+ - Zone-redundant availability is available to databases in the
+ General Purpose, Premium, Business Critical and Hyperscale service tiers of the vCore purchasing model,
+ and not the Basic and Standard service tiers of the DTU-based purchasing model.
+ - This may not be required for:
+ - Databases that supports applications which doesn't a high maturity in terms of "High Availability"
+ - Databases that are very sensitive to network latency that may increase the transaction commit time,
+ and thus impact the performance of some OLTP workloads.
+ """
+ name = "Ensure the Azure SQL Database Namespace is zone redundant"
+ id = "CKV_AZURE_229"
+ supported_resources = ("azurerm_mssql_database",)
+ categories = (CheckCategories.BACKUP_AND_RECOVERY,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "zone_redundant"
+
+
+check = SQLDatabaseZoneRedundant()
| diff --git a/tests/terraform/checks/resource/azure/example_SQLDatabaseZoneRedundant/main.tf b/tests/terraform/checks/resource/azure/example_SQLDatabaseZoneRedundant/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_SQLDatabaseZoneRedundant/main.tf
@@ -0,0 +1,44 @@
+resource "azurerm_mssql_database" "pass" {
+ name = "example-database"
+ server_id = azurerm_mssql_server.example.id
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ license_type = "LicenseIncluded"
+ max_size_gb = 4
+ read_scale = true
+ sku_name = "S0"
+ zone_redundant = true
+
+ tags = {
+ environment = "Production"
+ }
+}
+
+resource "azurerm_mssql_database" "fail2" {
+ name = "example-database"
+ server_id = azurerm_mssql_server.example.id
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ license_type = "LicenseIncluded"
+ max_size_gb = 4
+ read_scale = true
+ sku_name = "S0"
+ zone_redundant = false
+
+ tags = {
+ environment = "Production"
+ }
+}
+
+resource "azurerm_mssql_database" "fail" {
+ name = "example-database"
+ server_id = azurerm_mssql_server.example.id
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ license_type = "LicenseIncluded"
+ max_size_gb = 4
+ read_scale = true
+ sku_name = "S0"
+
+ tags = {
+ environment = "Production"
+ }
+
+}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_SQLDatabaseZoneRedundant.py b/tests/terraform/checks/resource/azure/test_SQLDatabaseZoneRedundant.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_SQLDatabaseZoneRedundant.py
@@ -0,0 +1,42 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.SQLDatabaseZoneRedundant import check
+
+
+class TestSQLDatabaseZoneRedundant (unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_SQLDatabaseZoneRedundant")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_mssql_database.pass',
+ }
+ failing_resources = {
+ 'azurerm_mssql_database.fail',
+ 'azurerm_mssql_database.fail2',
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
| Azure SQL Database - Ensure that the SQL database is zone-redundant
**Describe the issue**
It seems that there are no Checkov checks related to the best practice making SQL Databases zone-redundant [azurerm_eventhub_namespace](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/mssql_database)
**Examples**
````hcl
resource "azurerm_mssql_database" "test" {
name = "acctest-db-d"
server_id = azurerm_mssql_server.example.id
collation = "SQL_Latin1_General_CP1_CI_AS"
license_type = "LicenseIncluded"
max_size_gb = 4
read_scale = true
sku_name = "S0"
zone_redundant = true
tags = {
environment = "Production"
}
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
Having SQL Database zone-redundant comes with the following advantages:
- High Availability: Zone redundancy ensures that your database is replicated across Availability Zones within an Azure region. If one Availability Zone experiences an outage, your database continues to operate from the other zones, minimizing downtime.
- Reduced Maintenance Downtime: Zone-redundant configurations often require less planned maintenance downtime because updates and patches can be applied to one zone at a time while the other zones continue to serve traffic.
- Scalability: Zone-redundant configurations are designed to scale with your workload. You can take advantage of features like Hyperscale to dynamically adjust resources based on your database's performance needs.
- Improved SLA: Azure SQL Database zone-redundant configurations typically offer a higher service-level agreement (SLA) for availability compared to non-zone-redundant configurations.
Note that:
- Zone-redundant availability is available to databases in the General Purpose, Premium, Business Critical and Hyperscale service tiers of the vCore purchasing model, and not the Basic and Standard service tiers of the DTU-based purchasing model.
- This may not be required for:
- Databases that supports applications which doesn't a high maturity in terms of "High Availability"
- Databases that are very sensitive to network latency that may increase the transaction commit time, and thus impact the performance of some OLTP workloads.
| 2023-09-07T16:01:32 | -1.0 |
|
bridgecrewio/checkov | 5,541 | bridgecrewio__checkov-5541 | [
"5539"
] | 05e7ddbf01e3e713ee2195c54f37526ca251dd30 | diff --git a/checkov/terraform/checks/resource/azure/SQLDatabaseLedgerEnabled.py b/checkov/terraform/checks/resource/azure/SQLDatabaseLedgerEnabled.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/SQLDatabaseLedgerEnabled.py
@@ -0,0 +1,39 @@
+from __future__ import annotations
+
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class SQLDatabaseLedgerEnabled(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ """
+ Ledger helps protect data from any attacker or high-privileged user, including database administrators (DBAs),
+ system administrators, and cloud administrators. As with a traditional ledger, the feature preserves
+ historical data.
+ If a row is updated in the database, its previous value is maintained and protected
+ in a history table. Ledger provides a chronicle of all changes made to the database over time.
+ Ledger and the historical data are managed transparently, offering protection without any application changes.
+ The feature maintains historical data in a relational form to support SQL queries for auditing,
+ forensics, and other purposes.
+ It provides guarantees of cryptographic data integrity while maintaining the power, flexibility,
+ and performance of the SQL database.
+
+ Note that:
+ - Ledger needs to be enabled at the deployment of the database and can't be removed once enabled
+ - Ledger may come with performance impact, which means that it is advise to closely monitor
+ the database performance in order to ensure that the database meets the performance objectives
+ - Ledger comes with an additional cost, due to the data being stored
+
+ """
+ name = "Ensure that the Ledger feature is enabled on database that "
+ name += "requires cryptographic proof and nonrepudiation of data integrity"
+ id = "CKV_AZURE_224"
+ supported_resources = ("azurerm_mssql_database",)
+ categories = (CheckCategories.LOGGING,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "ledger_enabled"
+
+
+check = SQLDatabaseLedgerEnabled()
| diff --git a/tests/terraform/checks/resource/azure/example_SQLDatabaseLedgerEnabled/main.tf b/tests/terraform/checks/resource/azure/example_SQLDatabaseLedgerEnabled/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_SQLDatabaseLedgerEnabled/main.tf
@@ -0,0 +1,44 @@
+resource "azurerm_mssql_database" "pass" {
+ name = "example-database"
+ server_id = azurerm_mssql_server.example.id
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ license_type = "LicenseIncluded"
+ max_size_gb = 4
+ read_scale = true
+ sku_name = "S0"
+ ledger_enabled = true
+
+ tags = {
+ environment = "Production"
+ }
+}
+
+resource "azurerm_mssql_database" "fail2" {
+ name = "example-database"
+ server_id = azurerm_mssql_server.example.id
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ license_type = "LicenseIncluded"
+ max_size_gb = 4
+ read_scale = true
+ sku_name = "S0"
+ ledger_enabled = false
+
+ tags = {
+ environment = "Production"
+ }
+}
+
+resource "azurerm_mssql_database" "fail" {
+ name = "example-database"
+ server_id = azurerm_mssql_server.example.id
+ collation = "SQL_Latin1_General_CP1_CI_AS"
+ license_type = "LicenseIncluded"
+ max_size_gb = 4
+ read_scale = true
+ sku_name = "S0"
+
+ tags = {
+ environment = "Production"
+ }
+
+}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_SQLDatabaseLedgerEnabled.py b/tests/terraform/checks/resource/azure/test_SQLDatabaseLedgerEnabled.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_SQLDatabaseLedgerEnabled.py
@@ -0,0 +1,42 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.SQLDatabaseLedgerEnabled import check
+
+
+class SQLDatabaseLedgerEnabled(unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_SQLDatabaseLedgerEnabled")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_mssql_database.pass',
+ }
+ failing_resources = {
+ 'azurerm_mssql_database.fail',
+ 'azurerm_mssql_database.fail2',
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
| Azure SQL Database - Ensure that the Ledger feature is enabled on database that requires cryptographic proof and nonrepudiation of data integrity
**Describe the issue**
It seems that there are no checks in regard to using a "[Ledger](https://learn.microsoft.com/en-us/sql/relational-databases/security/ledger/ledger-overview?view=sql-server-ver16&viewFallbackFrom=azuresql)" within Azure SQL Database.
[azurerm_mssql_database](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/data-sources/mssql_database)
**Examples**
````hcl
resource "azurerm_mssql_database" "test" {
name = "acctest-db-d"
server_id = azurerm_mssql_server.example.id
collation = "SQL_Latin1_General_CP1_CI_AS"
license_type = "LicenseIncluded"
max_size_gb = 4
read_scale = true
sku_name = "S0"
ledger_enabled = true
tags = {
foo = "bar"
}
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
Ledger helps protect data from any attacker or high-privileged user, including database administrators (DBAs), system administrators, and cloud administrators. As with a traditional ledger, the feature preserves historical data. If a row is updated in the database, its previous value is maintained and protected in a history table. Ledger provides a chronicle of all changes made to the database over time.
Ledger and the historical data are managed transparently, offering protection without any application changes. The feature maintains historical data in a relational form to support SQL queries for auditing, forensics, and other purposes. It provides guarantees of cryptographic data integrity while maintaining the power, flexibility, and performance of the SQL database.
**Note that**:
- **Ledger needs to be enabled at the deployment of the database and can't be removed once enabled**

- **Ledger may come with performance impact, which means that it is advise to closely monitor the database performance in order to ensure that the database meets the performance objectives**
- **Ledger comes with an additional cost, due to the data being stored**
| 2023-09-07T16:34:40 | -1.0 |
|
bridgecrewio/checkov | 5,577 | bridgecrewio__checkov-5577 | [
"5576"
] | f950c57fc10ee65ad7a2e3e9ef125efdc7dc39fc | diff --git a/checkov/terraform/checks/resource/azure/AppServicePlanZoneRedundant.py b/checkov/terraform/checks/resource/azure/AppServicePlanZoneRedundant.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/AppServicePlanZoneRedundant.py
@@ -0,0 +1,26 @@
+from __future__ import annotations
+
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class AppServicePlanZoneRedundant(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ """
+ To enhance the resiliency and reliability of business-critical workloads,
+ it's recommended to deploy new App Service Plans with zone-redundancy.
+
+ There's no additional cost associated with enabling availability zones.
+ Pricing for a zone redundant App Service is the same as a single zone App Service.
+ """
+ name = "Ensure the App Service Plan is zone redundant"
+ id = "CKV_AZURE_225"
+ supported_resources = ("azurerm_service_plan",)
+ categories = (CheckCategories.BACKUP_AND_RECOVERY,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "zone_balancing_enabled"
+
+
+check = AppServicePlanZoneRedundant()
| diff --git a/tests/terraform/checks/resource/azure/example_AppServicePlanZoneRedundant/main.tf b/tests/terraform/checks/resource/azure/example_AppServicePlanZoneRedundant/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_AppServicePlanZoneRedundant/main.tf
@@ -0,0 +1,26 @@
+resource "azurerm_service_plan" "pass" {
+ name = "example"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ os_type = "Linux"
+ sku_name = "P1v2"
+ zone_balancing_enabled = true
+}
+
+resource "azurerm_service_plan" "fail1" {
+ name = "example"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ os_type = "Linux"
+ sku_name = "P1v2"
+ zone_balancing_enabled = false
+}
+
+
+resource "azurerm_service_plan" "fail2" {
+ name = "example"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ os_type = "Linux"
+ sku_name = "P1v2"
+}
diff --git a/tests/terraform/checks/resource/azure/test_AppServicePlanZoneRedundant.py b/tests/terraform/checks/resource/azure/test_AppServicePlanZoneRedundant.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_AppServicePlanZoneRedundant.py
@@ -0,0 +1,42 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.AppServicePlanZoneRedundant import check
+
+
+class AppServicePlanZoneRedundant(unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_AppServicePlanZoneRedundant")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_service_plan.pass',
+ }
+ failing_resources = {
+ 'azurerm_service_plan.fail1',
+ 'azurerm_service_plan.fail2',
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
| Azure App Service Plan - Ensure that the App Service Plan is zone redundant
**Describe the issue**
It seems that there are no checks to ensure that [azurerm_service_plan](https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/service_plan) are deployed in a zone-redundant way.
**Examples**
````hcl
resource "azurerm_service_plan" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
os_type = "Linux"
sku_name = "P1v2"
zone_balancing_enabled = true
}
````
**Version (please complete the following information):**
N/A
**Additional context**
To enhance the resiliency and reliability of your business-critical workloads, it's recommended that you deploy your new App Service Plans with zone-redundancy. Follow the steps to [redeploy to availability zone support](https://learn.microsoft.com/en-us/azure/reliability/reliability-app-service?tabs=graph%2Ccli#create-a-resource-with-availability-zone-enabled), configure your pipelines to redeploy your WebApp on the new App Services Plan, and then use a [Blue-Green deployment](https://learn.microsoft.com/en-us/azure/spring-apps/concepts-blue-green-deployment-strategies) approach to failover to the new site.
By distributing your applications across multiple availability zones, you can ensure their continued operation even in the event of a datacenter-level failure. For more information on availability zone support in Azure App Service, see [Availability zone support](https://learn.microsoft.com/en-us/azure/reliability/reliability-app-service?tabs=graph%2Ccli#availability-zone-support).
There's no additional cost associated with enabling availability zones. Pricing for a zone redundant App Service is the same as a single zone App Service. You'll be charged based on your App Service plan SKU, the capacity you specify, and any instances you scale to based on your autoscale criteria. If you enable availability zones but specify a capacity less than three, the platform will enforce a minimum instance count of three and charge you for those three instances.
| 2023-09-19T12:35:44 | -1.0 |
|
bridgecrewio/checkov | 5,584 | bridgecrewio__checkov-5584 | [
"5583"
] | 4950b142bedb04511479ee962fba764440b6e8d0 | diff --git a/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py
@@ -0,0 +1,34 @@
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+from typing import Any
+
+
+class AKSEphemeralOSDisks(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ """
+ Temporary data can contain sensitive data at some points, by using ephemeral disks,
+ we ensure that data written to OS disk is stored on local VM storage and isn't persisted to Azure Storage
+
+ Azure automatically replicates data stored in the managed OS disk of a virtual machine to Azure storage
+ to avoid data loss in case the virtual machine needs to be relocated to another host.
+ Generally speaking, containers are not designed to have local state persisted to the managed OS disk,
+ hence this behavior offers limited value to AKS hosted while providing some drawbacks,
+ including slower node provisioning and higher read/write latency.
+
+ Ephemeral disks allow us also to have faster cluster operations like scale or upgrade
+ due to faster re-imaging and boot times.
+ """
+ name = "Ensure ephemeral disks are used for OS disks"
+ id = "CKV_AZURE_226"
+ supported_resources = ("azurerm_kubernetes_cluster",)
+ categories = (CheckCategories.KUBERNETES,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "os_disk_type"
+
+ def get_expected_value(self) -> Any:
+ return "Ephemeral"
+
+
+check = AKSEphemeralOSDisks()
| diff --git a/tests/terraform/checks/resource/azure/example_AKSEphemeralOSDisks/main.tf b/tests/terraform/checks/resource/azure/example_AKSEphemeralOSDisks/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_AKSEphemeralOSDisks/main.tf
@@ -0,0 +1,34 @@
+resource "azurerm_kubernetes_cluster" "pass" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
+ os_disk_type = "Ephemeral"
+
+ tags = {
+ Environment = "Production"
+ }
+}
+
+resource "azurerm_kubernetes_cluster" "fail" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
+
+ tags = {
+ Environment = "Production"
+ }
+}
+
+resource "azurerm_kubernetes_cluster" "fail2" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
+ os_disk_type = "Managed"
+
+ tags = {
+ Environment = "Production"
+ }
+}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_AKSEphemeralOSDisks.py b/tests/terraform/checks/resource/azure/test_AKSEphemeralOSDisks.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_AKSEphemeralOSDisks.py
@@ -0,0 +1,42 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.AKSEphemeralOSDisks import check
+
+
+class AKSEphemeralOSDisks(unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_AKSEphemeralOSDisks")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_kubernetes_cluster.pass',
+ }
+ failing_resources = {
+ 'azurerm_kubernetes_cluster.fail',
+ 'azurerm_kubernetes_cluster.fail2',
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
| Azure Kubernetes Service - Ensure ephemeral disks are used for OS disks
**Describe the issue**
It seems that there are no checks in regards to use "Ephemeral Disks" for OS disks within Azure AKS Node Pools.
**Examples**
````hcl
resource "azurerm_kubernetes_cluster" "example" {
name = "example-aks1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "exampleaks1"
os_disk_type = "Ephemeral"
tags = {
Environment = "Production"
}
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
Temporary data can contain sensitive data at some points, by using ephemeral disks, we ensure that data written to OS disk is stored on local VM storage and isn't persisted to Azure Storage
Azure automatically replicates data stored in the managed OS disk of a virtual machine to Azure storage to avoid data loss in case the virtual machine needs to be relocated to another host.
Generally speaking, containers are not designed to have local state persisted to the managed OS disk, hence this behavior offers limited value to AKS hosted while providing some drawbacks, including slower node provisioning and higher read/write latency.
Ephemeral disks allow us also to have faster cluster operations like scale or upgrade due to faster re-imaging and boot times.
| 2023-09-21T12:29:30 | -1.0 |
|
bridgecrewio/checkov | 5,588 | bridgecrewio__checkov-5588 | [
"5586"
] | f950c57fc10ee65ad7a2e3e9ef125efdc7dc39fc | diff --git a/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py
@@ -0,0 +1,28 @@
+
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class AKSEncryptionAtHostEnabled(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ """
+ With host-based encryption, the data stored on the VM host of
+ your AKS agent nodes' VMs is encrypted at rest and flows encrypted to the Storage service.
+
+ This means the temp disks are encrypted at rest with platform-managed keys.
+ The cache of OS and data disks is encrypted at rest with either platform-managed keys
+ or customer-managed keys depending on the encryption type set on those disks.
+ """
+ name = "Ensure that the AKS cluster encrypt temp disks, caches, and data flows "
+ name += "between Compute and Storage resources"
+ id = "CKV_AZURE_227"
+ supported_resources = ("azurerm_kubernetes_cluster", "azurerm_kubernetes_cluster_node_pool")
+ categories = (CheckCategories.KUBERNETES,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,
+ missing_block_result=CheckResult.FAILED)
+
+ def get_inspected_key(self) -> str:
+ return "enable_host_encryption"
+
+
+check = AKSEncryptionAtHostEnabled()
| diff --git a/tests/terraform/checks/resource/azure/example_AKSEncryptionAtHostEnabled/main.tf b/tests/terraform/checks/resource/azure/example_AKSEncryptionAtHostEnabled/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_AKSEncryptionAtHostEnabled/main.tf
@@ -0,0 +1,98 @@
+resource "azurerm_kubernetes_cluster" "pass" {
+ name = "example-aks1"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ dns_prefix = "exampleaks1"
+ enable_host_encryption = true
+
+ default_node_pool {
+ name = "default"
+ node_count = 1
+ vm_size = "Standard_D2_v2"
+ }
+
+ identity {
+ type = "SystemAssigned"
+ }
+
+ tags = {
+ Environment = "Production"
+ }
+}
+
+resource "azurerm_kubernetes_cluster_node_pool" "pass" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
+ enable_host_encryption = true
+
+ tags = {
+ Environment = "Production"
+ }
+}
+
+resource "azurerm_kubernetes_cluster" "fail" {
+ name = "example-aks1"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ dns_prefix = "exampleaks1"
+
+ default_node_pool {
+ name = "default"
+ node_count = 1
+ vm_size = "Standard_D2_v2"
+ }
+
+ identity {
+ type = "SystemAssigned"
+ }
+
+ tags = {
+ Environment = "Production"
+ }
+}
+
+resource "azurerm_kubernetes_cluster_node_pool" "fail" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
+ tags = {
+ Environment = "Production"
+ }
+}
+
+resource "azurerm_kubernetes_cluster" "fail1" {
+ name = "example-aks1"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ dns_prefix = "exampleaks1"
+ enable_host_encryption = false
+
+ default_node_pool {
+ name = "default"
+ node_count = 1
+ vm_size = "Standard_D2_v2"
+ }
+
+ identity {
+ type = "SystemAssigned"
+ }
+
+ tags = {
+ Environment = "Production"
+ }
+}
+
+resource "azurerm_kubernetes_cluster_node_pool" "fail1" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
+ enable_host_encryption = false
+
+ tags = {
+ Environment = "Production"
+ }
+}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_AKSEncryptionAtHostEnabled.py b/tests/terraform/checks/resource/azure/test_AKSEncryptionAtHostEnabled.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_AKSEncryptionAtHostEnabled.py
@@ -0,0 +1,45 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.AKSEncryptionAtHostEnabled import check
+
+
+class AKSEncryptionAtHostEnabled(unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_AKSEncryptionAtHostEnabled")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_kubernetes_cluster.pass',
+ 'azurerm_kubernetes_cluster_node_pool.pass'
+ }
+ failing_resources = {
+ 'azurerm_kubernetes_cluster.fail',
+ 'azurerm_kubernetes_cluster.fail1',
+ 'azurerm_kubernetes_cluster_node_pool.fail',
+ 'azurerm_kubernetes_cluster_node_pool.fail1',
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
| Azure Kubernetes Service - Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources
**Describe the issue**
It seems that there are no checks in order to ensure that "Encryption at Host" is enabled or not.
**Examples**
````hcl
resource "azurerm_kubernetes_cluster" "example" {
name = "example-aks1"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
dns_prefix = "exampleaks1"
enable_host_encryption = true
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
}
identity {
type = "SystemAssigned"
}
tags = {
Environment = "Production"
}
}
resource "azurerm_kubernetes_cluster_node_pool" "example" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
enable_host_encryption = true
tags = {
Environment = "Production"
}
}
````
**Version (please complete the following information):**
- Checkov Version [e.g. 22]
**Additional context**
With host-based encryption, the data stored on the VM host of your AKS agent nodes' VMs is encrypted at rest and flows encrypted to the Storage service. This means the temp disks are encrypted at rest with platform-managed keys. The cache of OS and data disks is encrypted at rest with either platform-managed keys or customer-managed keys depending on the encryption type set on those disks.
By default, when using AKS, OS and data disks use server-side encryption with platform-managed keys. The caches for these disks are encrypted at rest with platform-managed keys. You can specify your own managed keys following [Bring your own keys (BYOK) with Azure disks in Azure Kubernetes Service](https://learn.microsoft.com/en-us/azure/aks/azure-disk-customer-managed-keys). The caches for these disks are also encrypted using the key you specify.
Host-based encryption is different than server-side encryption (SSE), which is used by Azure Storage. Azure-managed disks use Azure Storage to automatically encrypt data at rest when saving data. Host-based encryption uses the host of the VM to handle encryption before the data flows through Azure Storage.
Note that on the documentation of Terraform, it is seens as "Preview".

However, it has been set as "General availability" on May 25, 2021

| 2023-09-22T08:37:27 | -1.0 |
|
bridgecrewio/checkov | 5,638 | bridgecrewio__checkov-5638 | [
"5611"
] | dd803b575e35042d0410c3c4129449b56636c9ce | diff --git a/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py
--- a/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py
+++ b/checkov/terraform/checks/resource/azure/AKSEncryptionAtHostEnabled.py
@@ -1,4 +1,3 @@
-
from checkov.common.models.enums import CheckCategories, CheckResult
from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
@@ -18,11 +17,19 @@ def __init__(self) -> None:
id = "CKV_AZURE_227"
supported_resources = ("azurerm_kubernetes_cluster", "azurerm_kubernetes_cluster_node_pool")
categories = (CheckCategories.KUBERNETES,)
- super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,
- missing_block_result=CheckResult.FAILED)
+ super().__init__(
+ name=name,
+ id=id,
+ categories=categories,
+ supported_resources=supported_resources,
+ missing_block_result=CheckResult.FAILED,
+ )
def get_inspected_key(self) -> str:
- return "enable_host_encryption"
+ if self.entity_type == "azurerm_kubernetes_cluster":
+ return "default_node_pool/[0]/enable_host_encryption"
+ else:
+ return "enable_host_encryption"
check = AKSEncryptionAtHostEnabled()
diff --git a/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py
--- a/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py
+++ b/checkov/terraform/checks/resource/azure/AKSEphemeralOSDisks.py
@@ -25,7 +25,7 @@ def __init__(self) -> None:
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self) -> str:
- return "os_disk_type"
+ return "default_node_pool/[0]/os_disk_type"
def get_expected_value(self) -> Any:
return "Ephemeral"
| diff --git a/tests/terraform/checks/resource/azure/example_AKSEncryptionAtHostEnabled/main.tf b/tests/terraform/checks/resource/azure/example_AKSEncryptionAtHostEnabled/main.tf
--- a/tests/terraform/checks/resource/azure/example_AKSEncryptionAtHostEnabled/main.tf
+++ b/tests/terraform/checks/resource/azure/example_AKSEncryptionAtHostEnabled/main.tf
@@ -1,23 +1,26 @@
resource "azurerm_kubernetes_cluster" "pass" {
- name = "example-aks1"
- location = azurerm_resource_group.example.location
- resource_group_name = azurerm_resource_group.example.name
- dns_prefix = "exampleaks1"
- enable_host_encryption = true
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
default_node_pool {
- name = "default"
- node_count = 1
- vm_size = "Standard_D2_v2"
+ name = "default"
+
+ enable_host_encryption = true
+ vm_size = "Standard_E4ads_v5"
+ os_disk_type = "Ephemeral"
+ zones = [1, 2, 3]
+ only_critical_addons_enabled = true
+
+ type = "VirtualMachineScaleSets"
+ vnet_subnet_id = var.subnet_id
+ enable_auto_scaling = true
+ max_count = 6
+ min_count = 2
+ orchestrator_version = local.kubernetes_version
}
- identity {
- type = "SystemAssigned"
- }
-
- tags = {
- Environment = "Production"
- }
}
resource "azurerm_kubernetes_cluster_node_pool" "pass" {
@@ -25,72 +28,84 @@ resource "azurerm_kubernetes_cluster_node_pool" "pass" {
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
- enable_host_encryption = true
+ enable_host_encryption = true
tags = {
Environment = "Production"
}
}
-resource "azurerm_kubernetes_cluster" "fail" {
- name = "example-aks1"
- location = azurerm_resource_group.example.location
- resource_group_name = azurerm_resource_group.example.name
- dns_prefix = "exampleaks1"
- default_node_pool {
- name = "default"
- node_count = 1
- vm_size = "Standard_D2_v2"
- }
-
- identity {
- type = "SystemAssigned"
- }
+resource "azurerm_kubernetes_cluster" "fail1" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
tags = {
Environment = "Production"
}
+
+ default_node_pool {
+ name = "default"
+
+ enable_host_encryption = false
+ vm_size = "Standard_E4ads_v5"
+ zones = [1, 2, 3]
+ only_critical_addons_enabled = true
+
+ type = "VirtualMachineScaleSets"
+ vnet_subnet_id = var.subnet_id
+ enable_auto_scaling = true
+ max_count = 6
+ min_count = 2
+ orchestrator_version = local.kubernetes_version
+ }
+
}
-resource "azurerm_kubernetes_cluster_node_pool" "fail" {
+resource "azurerm_kubernetes_cluster_node_pool" "fail1" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
+ enable_host_encryption = false
+
tags = {
Environment = "Production"
}
}
-resource "azurerm_kubernetes_cluster" "fail1" {
- name = "example-aks1"
- location = azurerm_resource_group.example.location
- resource_group_name = azurerm_resource_group.example.name
- dns_prefix = "exampleaks1"
- enable_host_encryption = false
- default_node_pool {
- name = "default"
- node_count = 1
- vm_size = "Standard_D2_v2"
- }
+resource "azurerm_kubernetes_cluster" "fail2" {
+ name = "internal"
+ kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
+ vm_size = "Standard_DS2_v2"
+ node_count = 1
- identity {
- type = "SystemAssigned"
+ default_node_pool {
+ name = "default"
+
+ vm_size = "Standard_E4ads_v5"
+ os_disk_type = "Ephemeral"
+ zones = [1, 2, 3]
+ only_critical_addons_enabled = true
+
+ type = "VirtualMachineScaleSets"
+ vnet_subnet_id = var.subnet_id
+ enable_auto_scaling = true
+ max_count = 6
+ min_count = 2
+ orchestrator_version = local.kubernetes_version
}
- tags = {
- Environment = "Production"
- }
}
-resource "azurerm_kubernetes_cluster_node_pool" "fail1" {
+resource "azurerm_kubernetes_cluster_node_pool" "fail2" {
name = "internal"
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
- enable_host_encryption = false
tags = {
Environment = "Production"
diff --git a/tests/terraform/checks/resource/azure/example_AKSEphemeralOSDisks/main.tf b/tests/terraform/checks/resource/azure/example_AKSEphemeralOSDisks/main.tf
--- a/tests/terraform/checks/resource/azure/example_AKSEphemeralOSDisks/main.tf
+++ b/tests/terraform/checks/resource/azure/example_AKSEphemeralOSDisks/main.tf
@@ -3,11 +3,24 @@ resource "azurerm_kubernetes_cluster" "pass" {
kubernetes_cluster_id = azurerm_kubernetes_cluster.example.id
vm_size = "Standard_DS2_v2"
node_count = 1
- os_disk_type = "Ephemeral"
- tags = {
- Environment = "Production"
+ default_node_pool {
+ name = "default"
+
+ enable_host_encryption = true
+ vm_size = "Standard_E4ads_v5"
+ os_disk_type = "Ephemeral"
+ zones = [1, 2, 3]
+ only_critical_addons_enabled = true
+
+ type = "VirtualMachineScaleSets"
+ vnet_subnet_id = var.subnet_id
+ enable_auto_scaling = true
+ max_count = 6
+ min_count = 2
+ orchestrator_version = local.kubernetes_version
}
+
}
resource "azurerm_kubernetes_cluster" "fail" {
@@ -19,6 +32,23 @@ resource "azurerm_kubernetes_cluster" "fail" {
tags = {
Environment = "Production"
}
+
+ default_node_pool {
+ name = "default"
+
+ enable_host_encryption = true
+ vm_size = "Standard_E4ads_v5"
+ zones = [1, 2, 3]
+ only_critical_addons_enabled = true
+
+ type = "VirtualMachineScaleSets"
+ vnet_subnet_id = var.subnet_id
+ enable_auto_scaling = true
+ max_count = 6
+ min_count = 2
+ orchestrator_version = local.kubernetes_version
+ }
+
}
resource "azurerm_kubernetes_cluster" "fail2" {
@@ -31,4 +61,23 @@ resource "azurerm_kubernetes_cluster" "fail2" {
tags = {
Environment = "Production"
}
+
+ default_node_pool {
+ name = "default"
+
+ enable_host_encryption = true
+ vm_size = "Standard_E4ads_v5"
+ os_disk_type = "Managed"
+ zones = [1, 2, 3]
+ only_critical_addons_enabled = true
+
+ type = "VirtualMachineScaleSets"
+ vnet_subnet_id = var.subnet_id
+ enable_auto_scaling = true
+ max_count = 6
+ min_count = 2
+ orchestrator_version = local.kubernetes_version
+ }
+
+
}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_AKSEncryptionAtHostEnabled.py b/tests/terraform/checks/resource/azure/test_AKSEncryptionAtHostEnabled.py
--- a/tests/terraform/checks/resource/azure/test_AKSEncryptionAtHostEnabled.py
+++ b/tests/terraform/checks/resource/azure/test_AKSEncryptionAtHostEnabled.py
@@ -22,15 +22,15 @@ def test(self):
'azurerm_kubernetes_cluster_node_pool.pass'
}
failing_resources = {
- 'azurerm_kubernetes_cluster.fail',
'azurerm_kubernetes_cluster.fail1',
- 'azurerm_kubernetes_cluster_node_pool.fail',
+ 'azurerm_kubernetes_cluster.fail2',
'azurerm_kubernetes_cluster_node_pool.fail1',
+ 'azurerm_kubernetes_cluster_node_pool.fail2',
}
skipped_resources = {}
- passed_check_resources = set([c.resource for c in report.passed_checks])
- failed_check_resources = set([c.resource for c in report.failed_checks])
+ passed_check_resources = {c.resource for c in report.passed_checks}
+ failed_check_resources = {c.resource for c in report.failed_checks}
self.assertEqual(summary['passed'], len(passing_resources))
self.assertEqual(summary['failed'], len(failing_resources))
| CKV_AZURE_226: error in check and testcase
**Describe the issue**
CKV_AZURE_226 checks for ephemeral disks within the "main resource" azurerm_kubernetes_cluster but the cluster itself doesn't have any argument called os_disk_type. The argument os_disk_type is part of the node pool.
The testcase [here](https://github.com/bridgecrewio/checkov/pull/5584/files#diff-c0b8f08537766f6eff2a5d10b9439d227fdaaebe6ff7903008825c5f9d51c22dR1) is misleading and the check itself [here](https://github.com/bridgecrewio/checkov/pull/5584/files#diff-c9248390aa120f7af4643f1908d3d824fb903fd3c6cd63e9e77fe8e9ecd59289R28) too.
In my opinion this must be something like
```
def get_inspected_key(self) -> str:
return "default_node_pool/[0]/os_disk_type"
```
otherwise it won't work?
Same for CKV_AZURE_227.
**Examples**
```
[root] # head -30 aks.tf
resource "azurerm_kubernetes_cluster" "this" {
name = local.name_prefix
location = var.resource_group.location
resource_group_name = var.resource_group.name
node_resource_group = "${local.name_prefix}-node-pool"
dns_prefix = local.name_prefix
kubernetes_version = local.kubernetes_version
sku_tier = var.sku_tier
api_server_access_profile {
authorized_ip_ranges = var.api_server_authorized_ip_ranges
}
default_node_pool {
name = "default"
enable_host_encryption = true
vm_size = "Standard_E4ads_v5"
os_disk_type = "Ephemeral"
zones = [1, 2, 3]
only_critical_addons_enabled = true
type = "VirtualMachineScaleSets"
vnet_subnet_id = var.subnet_id
enable_auto_scaling = true
max_count = 6
min_count = 2
orchestrator_version = local.kubernetes_version
upgrade_settings {
```
results in
```
[root] # checkov --skip-framework kubernetes --skip-framework helm --quiet --compact -o junitxml -o cli --directory .
2023-10-02 11:58:47,399 [MainThread ] [WARNI] The framework "sca_image" is part of the "SCA" module, which is not enabled in the platform
2023-10-02 11:58:47,399 [MainThread ] [WARNI] The framework "sca_package" is part of the "SCA" module, which is not enabled in the platform
terraform scan results:
Passed checks: 6, Failed checks: 11, Skipped checks: 0
[...]
Check: CKV_AZURE_226: "Ensure ephemeral disks are used for OS disks"
FAILED for resource: azurerm_kubernetes_cluster.this
File: /aks.tf:1-64
Check: CKV_AZURE_227: "Ensure that the AKS cluster encrypt temp disks, caches, and data flows between Compute and Storage resources"
FAILED for resource: azurerm_kubernetes_cluster.this
File: /aks.tf:1-64
[...]
```
Please also see https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster for code example.
**Version (please complete the following information):**
- Checkov Version 2.4.58
**Additional context**
This is related to https://github.com/bridgecrewio/checkov/pull/5584 and https://github.com/bridgecrewio/checkov/pull/5588.
| hey @tberreis thanks for reaching out.
Nice catch, are you interested in contributing the needed change and adjusting the test cases? | 2023-10-12T11:12:00 | -1.0 |
bridgecrewio/checkov | 5,649 | bridgecrewio__checkov-5649 | [
"5554"
] | 321b6a3272f3dde6cef22e941b2737ff74dca0ea | diff --git a/checkov/terraform/checks/resource/azure/RedisCacheStandardReplicationEnabled.py b/checkov/terraform/checks/resource/azure/RedisCacheStandardReplicationEnabled.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/RedisCacheStandardReplicationEnabled.py
@@ -0,0 +1,35 @@
+from __future__ import annotations
+
+from typing import Any
+
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class RedisCacheStandardReplicationEnabled(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ """
+ With Standard Replication, Azure Cache for Redis has a high availability architecture
+ that ensures your managed instance is functioning, even when outages affect
+ the underlying virtual machines (VMs). Whether the outage is planned or unplanned outages,
+ Azure Cache for Redis delivers greater percentage availability rates than what's attainable
+ by hosting Redis on a single VM.
+
+ An Azure Cache for Redis in the applicable tiers runs on a pair of Redis servers by default.
+ The two servers are hosted on dedicated VMs.
+ Open-source Redis allows only one server to handle data write requests.
+ """
+ name = "Standard Replication should be enabled"
+ id = "CKV_AZURE_230"
+ supported_resources = ("azurerm_redis_cache",)
+ categories = (CheckCategories.BACKUP_AND_RECOVERY,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "sku_name"
+
+ def get_expected_values(self) -> list[Any]:
+ return ["Standard", "Premium"]
+
+
+check = RedisCacheStandardReplicationEnabled()
| diff --git a/tests/terraform/checks/resource/azure/example_RedisCacheStandardReplicationEnabled/main.tf b/tests/terraform/checks/resource/azure/example_RedisCacheStandardReplicationEnabled/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_RedisCacheStandardReplicationEnabled/main.tf
@@ -0,0 +1,41 @@
+resource "azurerm_redis_cache" "pass1" {
+ name = "example-cache"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ capacity = 2
+ family = "C"
+ sku_name = "Standard"
+ enable_non_ssl_port = false
+ minimum_tls_version = "1.2"
+
+ redis_configuration {
+ }
+}
+
+resource "azurerm_redis_cache" "pass2" {
+ name = "example-cache"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ capacity = 2
+ family = "C"
+ sku_name = "Premium"
+ enable_non_ssl_port = false
+ minimum_tls_version = "1.2"
+
+ redis_configuration {
+ }
+}
+
+resource "azurerm_redis_cache" "fail1" {
+ name = "example-cache"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ capacity = 2
+ family = "C"
+ sku_name = "Basic"
+ enable_non_ssl_port = false
+ minimum_tls_version = "1.2"
+
+ redis_configuration {
+ }
+}
diff --git a/tests/terraform/checks/resource/azure/test_RedisCacheStandardReplicationEnabled.py b/tests/terraform/checks/resource/azure/test_RedisCacheStandardReplicationEnabled.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_RedisCacheStandardReplicationEnabled.py
@@ -0,0 +1,42 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.RedisCacheStandardReplicationEnabled import check
+
+
+class TestRedisCacheStandardReplicationEnabled(unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_RedisCacheStandardReplicationEnabled")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_redis_cache.pass1',
+ 'azurerm_redis_cache.pass2'
+ }
+ failing_resources = {
+ 'azurerm_redis_cache.fail1'
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| Azure Redis Cache - Ensure standard replication is enabled
**Describe the issue**
It seems that there is no checkov that checks if the Azure Redis Cache has "Standard replication" enabled
**Examples**
````hcl
resource "azurerm_redis_cache" "example" {
name = "example-cache"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
capacity = 2
family = "C"
sku_name = "Standard"
enable_non_ssl_port = false
minimum_tls_version = "1.2"
redis_configuration {
}
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
With [Standard Replication](https://learn.microsoft.com/en-us/azure/azure-cache-for-redis/cache-high-availability#standard-replication-for-high-availability), Azure Cache for Redis has a high availability architecture that ensures your managed instance is functioning, even when outages affect the underlying virtual machines (VMs). Whether the outage is planned or unplanned outages, Azure Cache for Redis delivers greater percentage availability rates than what's attainable by hosting Redis on a single VM.
An Azure Cache for Redis in the applicable tiers runs on a pair of Redis servers by default. The two servers are hosted on dedicated VMs. Open-source Redis allows only one server to handle data write requests.
This is automatically enabled with the SKUs is either:
- Standard
- Premium
- Enterprise
- Enterprise Flash
| 2023-10-16T10:35:08 | -1.0 |
|
bridgecrewio/checkov | 5,662 | bridgecrewio__checkov-5662 | [
"5661"
] | f6e9739e3c70c92ceefe718e7340958916e57fe3 | diff --git a/checkov/terraform/checks/resource/azure/AppServiceEnvironmentZoneRedundant.py b/checkov/terraform/checks/resource/azure/AppServiceEnvironmentZoneRedundant.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/AppServiceEnvironmentZoneRedundant.py
@@ -0,0 +1,17 @@
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class AppServiceEnvironmentZoneRedundant(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ name = "Ensure App Service Environment is zone redundant"
+ id = "CKV_AZURE_231"
+ supported_resources = ("azurerm_app_service_environment_v3",)
+ categories = (CheckCategories.BACKUP_AND_RECOVERY,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "zone_redundant"
+
+
+check = AppServiceEnvironmentZoneRedundant()
| diff --git a/tests/terraform/checks/resource/azure/example_AppServiceEnvironmentZoneRedundant/main.tf b/tests/terraform/checks/resource/azure/example_AppServiceEnvironmentZoneRedundant/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_AppServiceEnvironmentZoneRedundant/main.tf
@@ -0,0 +1,34 @@
+resource "azurerm_app_service_environment_v3" "pass" {
+ name = "example-asev3"
+ resource_group_name = azurerm_resource_group.example.name
+ subnet_id = azurerm_subnet.example.id
+ zone_redundant = true
+
+ tags = {
+ env = "production"
+ terraformed = "true"
+ }
+}
+
+resource "azurerm_app_service_environment_v3" "fail1" {
+ name = "example-asev3"
+ resource_group_name = azurerm_resource_group.example.name
+ subnet_id = azurerm_subnet.example.id
+ zone_redundant = false
+
+ tags = {
+ env = "production"
+ terraformed = "true"
+ }
+}
+
+resource "azurerm_app_service_environment_v3" "fail2" {
+ name = "example-asev3"
+ resource_group_name = azurerm_resource_group.example.name
+ subnet_id = azurerm_subnet.example.id
+
+ tags = {
+ env = "production"
+ terraformed = "true"
+ }
+}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_AppServiceEnvironmentZoneRedundant.py b/tests/terraform/checks/resource/azure/test_AppServiceEnvironmentZoneRedundant.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_AppServiceEnvironmentZoneRedundant.py
@@ -0,0 +1,42 @@
+import unittest
+from pathlib import Path
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.checks.resource.azure.AppServiceEnvironmentZoneRedundant import check
+from checkov.terraform.runner import Runner
+
+
+class TestAppServiceEnvironmentZoneRedundant(unittest.TestCase):
+ def test(self):
+ # given
+ test_files_dir = Path(__file__).parent / "example_AppServiceEnvironmentZoneRedundant"
+
+ # when
+ report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id]))
+
+ # then
+ summary = report.get_summary()
+
+ passing_resources = {
+ "azurerm_app_service_environment_v3.pass",
+ }
+ failing_resources = {
+ "azurerm_app_service_environment_v3.fail1",
+ "azurerm_app_service_environment_v3.fail2",
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == "__main__":
+ unittest.main()
| Azure App Service Environment - Ensure App Service Environment is zone redundant
**Describe the issue**
It seems that there is no checks to ensure that App Service Environment v3 are zone-redundant.
**Examples**
**Additional context**
````hcl
resource "azurerm_app_service_environment_v3" "example" {
name = "example-asev3"
resource_group_name = azurerm_resource_group.example.name
subnet_id = azurerm_subnet.example.id
zone_redundant = true
tags = {
env = "production"
terraformed = "true"
}
}
````
**Version (please complete the following information):**
N/A
**Additional context**
Applications deployed on a zonal ILB (Internally Load Balanced) ASE (App Service Environment) will continue to run and serve traffic on that ASE even if other zones in the same region suffer an outage. It is possible that non-runtime behaviors, including; application service plan scaling, application creation, application configuration, and application publishing may still be impacted from an outage in other availability zones.
Source : https://learn.microsoft.com/en-us/azure/app-service/environment/zone-redundancy
| 2023-10-18T13:02:34 | -1.0 |
|
bridgecrewio/checkov | 5,665 | bridgecrewio__checkov-5665 | [
"5664"
] | bdf356b1d52a16033f6dac60dd55b4a1a06d2213 | diff --git a/checkov/terraform/checks/resource/azure/AKSOnlyCriticalPodsOnSystemNodes.py b/checkov/terraform/checks/resource/azure/AKSOnlyCriticalPodsOnSystemNodes.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/AKSOnlyCriticalPodsOnSystemNodes.py
@@ -0,0 +1,24 @@
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class AKSOnlyCriticalPodsOnSystemNodes(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ """
+ Microsoft recommends to isolate critical system pods from application pods
+ to prevent misconfigured or rogue application pods from accidentally killing system pods.
+
+ This can be enforced by creating a dedicated system node pool with the CriticalAddonsOnly=true:NoSchedule taint
+ to prevent application pods from being scheduled on system node pools.
+ """
+ name = "Ensure that only critical system pods run on system nodes"
+ id = "CKV_AZURE_232"
+ supported_resources = ("azurerm_kubernetes_cluster",)
+ categories = (CheckCategories.KUBERNETES,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "default_node_pool/[0]/only_critical_addons_enabled"
+
+
+check = AKSOnlyCriticalPodsOnSystemNodes()
| diff --git a/tests/terraform/checks/resource/azure/example_AKSOnlyCriticalPodsOnSystemNodes/main.tf b/tests/terraform/checks/resource/azure/example_AKSOnlyCriticalPodsOnSystemNodes/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_AKSOnlyCriticalPodsOnSystemNodes/main.tf
@@ -0,0 +1,37 @@
+resource "azurerm_kubernetes_cluster" "pass" {
+ name = "example"
+
+ default_node_pool {
+ name = "defaultpool"
+ only_critical_addons_enabled = true
+ }
+}
+
+resource "azurerm_kubernetes_cluster" "fail1" {
+ name = "example"
+
+ default_node_pool {
+ name = "defaultpool"
+ }
+}
+
+resource "azurerm_kubernetes_cluster" "fail2" {
+ name = "example"
+
+ default_node_pool {
+ name = "defaultpool"
+ only_critical_addons_enabled = false
+ }
+}
+
+resource "azurerm_kubernetes_cluster" "fail3" {
+ name = "example"
+
+}
+
+resource "azurerm_kubernetes_cluster" "fail4" {
+ name = "example"
+ only_critical_addons_enabled = true
+
+}
+
diff --git a/tests/terraform/checks/resource/azure/test_AKSOnlyCriticalPodsOnSystemNodes.py b/tests/terraform/checks/resource/azure/test_AKSOnlyCriticalPodsOnSystemNodes.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_AKSOnlyCriticalPodsOnSystemNodes.py
@@ -0,0 +1,43 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.AKSOnlyCriticalPodsOnSystemNodes import check
+
+
+class TestAKSOnlyCriticalPodsOnSystemNodes(unittest.TestCase):
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_AKSOnlyCriticalPodsOnSystemNodes")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_kubernetes_cluster.pass',
+ }
+ failing_resources = {
+ 'azurerm_kubernetes_cluster.fail1',
+ 'azurerm_kubernetes_cluster.fail2',
+ 'azurerm_kubernetes_cluster.fail3',
+ 'azurerm_kubernetes_cluster.fail4',
+ }
+ skipped_resources = {}
+
+ passed_check_resources = {c.resource for c in report.passed_checks}
+ failed_check_resources = {c.resource for c in report.failed_checks}
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| Azure Kubernetes Services - Ensure that only critical system pods run on system nodes.
**Describe the issue**
It seems that there are no checks to ensure that critical system pods run on system nodes.
**Examples**
````hcl
resource "azurerm_kubernetes_cluster" "cluster" {
name = "example"
location = var.location
...
default_node_pool {
name = "defaultpool"
vm_size = var.pool_vm_size
only_critical_addons_enabled = true
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
Microsoft recommends to isolate critical system pods from application pods to prevent misconfigured or rogue application pods from accidentally killing system pods.
This can be enforced by creating a dedicated system node pool with the CriticalAddonsOnly=true:NoSchedule taint to prevent application pods from being scheduled on system node pools.
This is explained here https://learn.microsoft.com/en-us/azure/aks/use-system-pools?tabs=azure-cli#system-and-user-node-pools
(Through Terraform, this is done with "only_critical_addons_enabled")

| 2023-10-18T14:21:46 | -1.0 |
|
bridgecrewio/checkov | 5,679 | bridgecrewio__checkov-5679 | [
"5634"
] | 5a0231bef3bc686253cbea9be812da284dc23a6a | diff --git a/checkov/secrets/plugins/detector_utils.py b/checkov/secrets/plugins/detector_utils.py
--- a/checkov/secrets/plugins/detector_utils.py
+++ b/checkov/secrets/plugins/detector_utils.py
@@ -108,7 +108,13 @@
flags=re.IGNORECASE,
)
-ALLOW_LIST = ('secretsmanager', "secretName", "secret_name", "creation_token") # can add more keys like that
+ALLOW_LIST = ( # can add more keys like that
+ 'secretsmanager',
+ "secretName",
+ "secret_name",
+ "creation_token",
+ "client_secret_setting_name",
+)
ALLOW_LIST_REGEX = r'|'.join(ALLOW_LIST)
# Support for suffix of function name i.e "secretsmanager:GetSecretValue"
CAMEL_CASE_NAMES = r'[A-Z]([A-Z0-9]*[a-z][a-z0-9]*[A-Z]|[a-z0-9]*[A-Z][A-Z0-9]*[a-z])[A-Za-z0-9]*'
| diff --git a/tests/secrets/sanity/iac_fp/main.tf b/tests/secrets/sanity/iac_fp/main.tf
--- a/tests/secrets/sanity/iac_fp/main.tf
+++ b/tests/secrets/sanity/iac_fp/main.tf
@@ -1,3 +1,5 @@
secret_name = "example_secret_name"
-creation_token = "my-product"
\ No newline at end of file
+creation_token = "my-product"
+
+client_secret_setting_name = "MICROSOFT_PROVIDER_AUTHENTICATION_SECRET"
| Terraform azurerm_windows_function_app resource - CKV_SECRET_6 false positive for active_directory_v2 setting
**Describe the issue**
When adding azure function app with identity provider active directory v2, checkov reports false positive
secret is not exposed but checkov reports :
| 0 | CKV_SECRET_6 | xxxxxxxxx| 0a69b97b56bf2230f607111888418a59f1891c92 | Base64 High Entropy String | https://docs.paloaltonetworks.com/content/techdocs/en_US/prisma/prisma-cloud/prisma-cloud-code-security-policy-reference/secrets-policies/secrets-policy-index/git-secrets-6.html |
with resource 0a69b97b56bf2230f607111888418a59f1891c92
Because from resource id is not clear which resource is the culprit the only possible one is client_secret_setting_name which is set to
"MICROSOFT_PROVIDER_AUTHENTICATION_SECRET" and not exposed secret.
**Examples**
snippet of resource azurerm_windows_function_app :
active_directory_v2 {
client_id = local.managed_identity_client_id
client_secret_setting_name = "MICROSOFT_PROVIDER_AUTHENTICATION_SECRET"
tenant_auth_endpoint = local.identity_provider.tenant_auth_endpoint
www_authentication_disabled = false
}
**Version (please complete the following information):**
- Checkov Version 2.4.50
**Additional context**
Add any other context about the problem here.
| hey @vigor-vavan thanks for reaching out.
Especially `CKV_SECRET_6` can result in false positives, but always a good idea to revisit and if possible improving it. | 2023-10-23T15:17:04 | -1.0 |
bridgecrewio/checkov | 5,687 | bridgecrewio__checkov-5687 | [
"5667"
] | 2c8fcc7d1d597e7ea75466aa9d13de66f62cfa2e | diff --git a/checkov/terraform/checks/resource/azure/AppServiceSlotHTTPSOnly.py b/checkov/terraform/checks/resource/azure/AppServiceSlotHTTPSOnly.py
--- a/checkov/terraform/checks/resource/azure/AppServiceSlotHTTPSOnly.py
+++ b/checkov/terraform/checks/resource/azure/AppServiceSlotHTTPSOnly.py
@@ -6,12 +6,12 @@ class AppServiceSlotHTTPSOnly(BaseResourceValueCheck):
def __init__(self):
name = "Ensure web app redirects all HTTP traffic to HTTPS in Azure App Service Slot"
id = "CKV_AZURE_153"
- supported_resources = ['azurerm_app_service_slot']
+ supported_resources = ["azurerm_app_service_slot", "azurerm_linux_web_app_slot", "azurerm_windows_web_app_slot"]
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def get_inspected_key(self):
- return 'https_only/[0]'
+ return "https_only/[0]"
check = AppServiceSlotHTTPSOnly()
| diff --git a/tests/terraform/checks/resource/azure/example_AppServiceSlotHTTPSOnly/main.tf b/tests/terraform/checks/resource/azure/example_AppServiceSlotHTTPSOnly/main.tf
--- a/tests/terraform/checks/resource/azure/example_AppServiceSlotHTTPSOnly/main.tf
+++ b/tests/terraform/checks/resource/azure/example_AppServiceSlotHTTPSOnly/main.tf
@@ -26,6 +26,23 @@ resource "azurerm_app_service_slot" "fail" {
}
+resource "azurerm_linux_web_app_slot" "fail" {
+ name = "fail-slot"
+ app_service_id = azurerm_linux_web_app.fail.id
+ https_only = false
+
+ site_config {}
+}
+
+resource "azurerm_windows_web_app_slot" "fail" {
+ name = "fail-slot"
+ app_service_id = azurerm_windows_web_app.fail.id
+ https_only = false
+
+ site_config {}
+}
+
+
resource "azurerm_app_service_slot" "fail2" {
name = random_id.server.hex
app_service_name = azurerm_app_service.example.name
@@ -77,3 +94,19 @@ resource "azurerm_app_service_slot" "pass" {
value = "Server=some-server.mydomain.com;Integrated Security=SSPI"
}
}
+
+resource "azurerm_linux_web_app_slot" "pass" {
+ name = "pass-slot"
+ app_service_id = azurerm_linux_web_app.pass.id
+ https_only = true
+
+ site_config {}
+}
+
+resource "azurerm_windows_web_app_slot" "pass" {
+ name = "pass-slot"
+ app_service_id = azurerm_windows_web_app.pass.id
+ https_only = true
+
+ site_config {}
+}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_AppServiceSlotHTTPSOnly.py b/tests/terraform/checks/resource/azure/test_AppServiceSlotHTTPSOnly.py
--- a/tests/terraform/checks/resource/azure/test_AppServiceSlotHTTPSOnly.py
+++ b/tests/terraform/checks/resource/azure/test_AppServiceSlotHTTPSOnly.py
@@ -20,18 +20,23 @@ def test(self):
passing_resources = {
"azurerm_app_service_slot.pass",
+ "azurerm_linux_web_app_slot.pass",
+ "azurerm_windows_web_app_slot.pass",
}
failing_resources = {
"azurerm_app_service_slot.fail",
"azurerm_app_service_slot.fail2",
+ "azurerm_linux_web_app_slot.fail",
+ "azurerm_windows_web_app_slot.fail",
}
+ skipped_resources = {}
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
- self.assertEqual(summary["passed"], 1)
- self.assertEqual(summary["failed"], 2)
- self.assertEqual(summary["skipped"], 0)
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
self.assertEqual(summary["parsing_errors"], 0)
self.assertEqual(passing_resources, passed_check_resources)
diff --git a/tests/terraform/image_referencer/test_runner_azure_resources.py b/tests/terraform/image_referencer/test_runner_azure_resources.py
--- a/tests/terraform/image_referencer/test_runner_azure_resources.py
+++ b/tests/terraform/image_referencer/test_runner_azure_resources.py
@@ -182,7 +182,7 @@ def test_app_service_linux_web_resources(mocker: MockerFixture, graph_framework)
assert len(tf_report.resources) == 2
assert len(tf_report.passed_checks) == 4
- assert len(tf_report.failed_checks) == 13
+ assert len(tf_report.failed_checks) == 14
assert len(tf_report.skipped_checks) == 0
assert len(tf_report.parsing_errors) == 0
@@ -271,7 +271,8 @@ def test_app_service_windows_web_resources(mocker: MockerFixture, graph_framewor
assert len(tf_report.resources) == 2
assert len(tf_report.passed_checks) == 4
- assert len(tf_report.failed_checks) == 13
+ # Changed from 13 to 14 due to PR #5687
+ assert len(tf_report.failed_checks) == 14
assert len(tf_report.skipped_checks) == 0
assert len(tf_report.parsing_errors) == 0
| CKV_AZURE_153 - Not available for linux_web_app_slot & azurerm_windows_web_app_slot
**Describe the issue**
It seems that the best practice CKV_AZURE_153 is not being checked against the following Terraform resources:
- azurerm_linux_web_app_slot
- azurerm_windows_web_app_slot
CKV_AZURE_153 is used to "Ensure web app redirects all HTTP traffic to HTTPS in Azure App Service"
**Examples**
Same as with "azurerm_app_service_slot"
**Version (please complete the following information):**
N/A
**Additional context**
It seems that the check needs to be adjusted cc https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/app_service_slot

| 2023-10-25T12:45:14 | -1.0 |
|
bridgecrewio/checkov | 5,705 | bridgecrewio__checkov-5705 | [
"5698"
] | a5b97b1c266385aed32ae43e16ed0e9ede8432dc | diff --git a/checkov/common/output/cyclonedx.py b/checkov/common/output/cyclonedx.py
--- a/checkov/common/output/cyclonedx.py
+++ b/checkov/common/output/cyclonedx.py
@@ -100,12 +100,8 @@ def create_bom(self) -> Bom:
continue
component = self.create_component(check_type=report.check_type, resource=check)
- if bom.has_component(component=component):
- component = (
- bom.get_component_by_purl( # type:ignore[assignment] # the previous line checks, if exists
- purl=component.purl
- )
- )
+ if existing_component := bom.get_component_by_purl(purl=component.purl):
+ component = existing_component
else:
bom.components.add(component)
@@ -121,7 +117,7 @@ def create_bom(self) -> Bom:
for resource in sorted(report.extra_resources):
component = self.create_component(check_type=report.check_type, resource=resource)
- if not bom.has_component(component=component):
+ if not bom.get_component_by_purl(purl=component.purl):
bom.components.add(component)
if is_image_report:
| diff --git a/tests/common/output/test_cyclonedx_report.py b/tests/common/output/test_cyclonedx_report.py
--- a/tests/common/output/test_cyclonedx_report.py
+++ b/tests/common/output/test_cyclonedx_report.py
@@ -197,6 +197,74 @@ def test_sca_packages_cyclonedx_bom():
assert record.file_line_range == [2, 6]
assert output
+
+def test_duplicate_sca_packages_cyclonedx_bom():
+ # given
+ rootless_file_path = "requirements.txt"
+ file_abs_path = "/path/to/requirements.txt"
+ check_class = "checkov.sca_package.scanner.Scanner"
+ vulnerability_details = {
+ "id": "CVE-2019-19844",
+ "status": "fixed in 3.0.1, 2.2.9, 1.11.27",
+ "cvss": 9.8,
+ "vector": "CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
+ "description": "Django before 1.11.27, 2.x before 2.2.9, and 3.x before 3.0.1 allows account takeover. ...",
+ "severity": "moderate",
+ "packageName": "django",
+ "packageVersion": "1.2",
+ "link": "https://nvd.nist.gov/vuln/detail/CVE-2019-19844",
+ "riskFactors": ["Attack complexity: low", "Attack vector: network", "Critical severity", "Has fix"],
+ "impactedVersions": ["<1.11.27"],
+ "publishedDate": "2019-12-18T20:15:00+01:00",
+ "discoveredDate": "2019-12-18T19:15:00Z",
+ "fixDate": "2019-12-18T20:15:00+01:00",
+ }
+ package_1 = {
+ "package_registry": "https://registry.npmjs.org/",
+ "is_private_registry": False,
+ "linesNumbers": [2, 3],
+ }
+ package_2 = {
+ "package_registry": "https://registry.npmjs.org/",
+ "is_private_registry": False,
+ "linesNumbers": [5, 6],
+ }
+
+ record_1 = create_report_cve_record(
+ rootless_file_path=rootless_file_path,
+ file_abs_path=file_abs_path,
+ check_class=check_class,
+ vulnerability_details=vulnerability_details,
+ licenses="OSI_BDS",
+ package=package_1,
+ file_line_range=get_package_lines(package_1),
+ )
+ record_2 = create_report_cve_record(
+ rootless_file_path=rootless_file_path,
+ file_abs_path=file_abs_path,
+ check_class=check_class,
+ vulnerability_details=vulnerability_details,
+ licenses="OSI_BDS",
+ package=package_2,
+ file_line_range=get_package_lines(package_2),
+ )
+
+ report = Report(CheckType.SCA_PACKAGE)
+ report.add_resource(record_1.resource)
+ report.add_record(record_1)
+ report.add_resource(record_2.resource)
+ report.add_record(record_2)
+
+ # when
+ cyclonedx = CycloneDX([report], "repoid/test")
+
+ # then
+ assert len(cyclonedx.bom.components) == 1
+
+ component = next(iter(cyclonedx.bom.components))
+ assert component.bom_ref.value == "pkg:pypi/repoid/test/requirements.txt/django@1.2"
+
+
def test_create_schema_version_1_3(mocker: MockerFixture):
# given
test_file = Path(__file__).parent / "fixtures/main.tf"
| SCA Scan with Cyclonedx Output Causes Crash
**Describe the issue**
Running the following command is causing crash:
```sh
checkov --framework sca_package -d . --bc-api-key $PRISMA_ACCESS_KEY::$PRISMA_SECRET_KEY --repo-id hagopj13/node-express-boilerplate -o cyclonedx
```
**Examples**
Using this repo as an example https://github.com/hagopj13/node-express-boilerplate
**Exception Trace**
Please share the trace for the exception and all relevant output by checkov.
To maximize the understanding, please run checkov with LOG_LEVEL set to debug
as follows:
```sh
LOG_LEVEL=DEBUG checkov --framework sca_package -d . --bc-api-key $PRISMA_ACCESS_KEY::$PRISMA_SECRET_KEY --repo-id hagopj13/node-express-boilerplate -o cyclonedx
TRUNCATED OUTPUT IS TOO LONG
2023-10-27 09:03:01,435 [MainThread ] [INFO ] Finalize repository hagopj13/node-express-boilerplate in bridgecrew's platform
2023-10-27 09:03:01,436 [MainThread ] [DEBUG] Getting exit code for report sca_package
2023-10-27 09:03:01,436 [MainThread ] [DEBUG] Soft fail severity threshold: None
2023-10-27 09:03:01,436 [MainThread ] [DEBUG] Soft fail checks: []
2023-10-27 09:03:01,436 [MainThread ] [DEBUG] Hard fail severity threshold: None
2023-10-27 09:03:01,436 [MainThread ] [DEBUG] Hard fail checks: []
2023-10-27 09:03:01,436 [MainThread ] [DEBUG] Use enforcement rules is FALSE
2023-10-27 09:03:01,436 [MainThread ] [DEBUG] In get_exit_code; exit code thresholds: {'soft_fail': False, 'soft_fail_checks': [], 'soft_fail_threshold': None, 'hard_fail_checks': [], 'hard_fail_threshold': None}, hard_fail_on_parsing_errors: False
2023-10-27 09:03:01,437 [MainThread ] [DEBUG] There are failed checks and all soft/hard fail args are empty - returning 1
2023-10-27 09:03:01,450 [MainThread ] [ERROR] Exception traceback:
Traceback (most recent call last):
File "/opt/homebrew/lib/python3.11/site-packages/checkov/main.py", line 554, in run
exit_codes.append(self.print_results(
^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/main.py", line 773, in print_results
return runner_registry.print_reports(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/runners/runner_registry.py", line 526, in print_reports
cyclonedx = CycloneDX(repo_id=metadata_integration.bc_integration.repo_id, reports=cyclonedx_reports)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 64, in __init__
self.bom = self.create_bom()
^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 112, in create_bom
vulnerability = self.create_vulnerability(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 287, in create_vulnerability
vulnerability = self.create_cve_vulnerability(resource=resource, component=component)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 408, in create_cve_vulnerability
affects=[BomTarget(ref=component.bom_ref.value)],
^^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'bom_ref'
Traceback (most recent call last):
File "/opt/homebrew/bin/checkov", line 9, in <module>
sys.exit(Checkov().run())
^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/main.py", line 554, in run
exit_codes.append(self.print_results(
^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/main.py", line 773, in print_results
return runner_registry.print_reports(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/runners/runner_registry.py", line 526, in print_reports
cyclonedx = CycloneDX(repo_id=metadata_integration.bc_integration.repo_id, reports=cyclonedx_reports)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 64, in __init__
self.bom = self.create_bom()
^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 112, in create_bom
vulnerability = self.create_vulnerability(
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 287, in create_vulnerability
vulnerability = self.create_cve_vulnerability(resource=resource, component=component)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/opt/homebrew/lib/python3.11/site-packages/checkov/common/output/cyclonedx.py", line 408, in create_cve_vulnerability
affects=[BomTarget(ref=component.bom_ref.value)],
^^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'bom_ref'
```
**Desktop (please complete the following information):**
- OS: OSX
- Checkov Version 3.0.7
**Additional context**
Scan works with CSV output
| hey @hi-artem thanks for reaching out.
I can confirm this is a bug, resulting from a transitive dependency, which exists multiple times. | 2023-10-30T22:34:47 | -1.0 |
bridgecrewio/checkov | 5,747 | bridgecrewio__checkov-5747 | [
"5743"
] | 4875adc472f409bc7039a879347b404468e88215 | diff --git a/checkov/cloudformation/checks/resource/aws/LambdaEnvironmentCredentials.py b/checkov/cloudformation/checks/resource/aws/LambdaEnvironmentCredentials.py
--- a/checkov/cloudformation/checks/resource/aws/LambdaEnvironmentCredentials.py
+++ b/checkov/cloudformation/checks/resource/aws/LambdaEnvironmentCredentials.py
@@ -24,6 +24,10 @@ def scan_resource_conf(self, conf: dict[str, Any]) -> CheckResult:
variables = environment.get("Variables")
if variables and isinstance(variables, dict):
for var_name, value in variables.items():
+ if isinstance(value, dict):
+ # if it is a resolved instrinsic function like !Ref: xyz, then it can't be a secret
+ continue
+
secrets = get_secrets_from_string(str(value), AWS, GENERAL)
if secrets:
self.evaluated_keys = [f"Properties/Environment/Variables/{var_name}"]
diff --git a/checkov/common/util/secrets.py b/checkov/common/util/secrets.py
--- a/checkov/common/util/secrets.py
+++ b/checkov/common/util/secrets.py
@@ -67,7 +67,7 @@
# now combine all the compiled patterns into one long list
_patterns['all'] = list(itertools.chain.from_iterable(_patterns.values()))
-_hash_patterns = list(map(lambda regex: re.compile(regex, re.IGNORECASE), ['^[a-f0-9]{32}$', '^[a-f0-9]{40}$']))
+_hash_patterns = [re.compile(regex, re.IGNORECASE) for regex in ('^[a-f0-9]{32}$', '^[a-f0-9]{40}$')]
def is_hash(s: str) -> bool:
@@ -96,14 +96,14 @@ def string_has_secrets(s: str, *categories: str) -> bool:
:return:
"""
+ if is_hash(s):
+ return False
+
# set a default if no category is provided; or, if categories were provided and they include 'all', then just set it
# explicitly so we don't do any duplication
if not categories or "all" in categories:
categories = ("all",)
- if is_hash(s):
- return False
-
for c in categories:
if any([pattern.search(s) for pattern in _patterns[c]]):
return True
@@ -229,18 +229,14 @@ def omit_secret_value_from_graph_checks(
def get_secrets_from_string(s: str, *categories: str) -> list[str]:
# set a default if no category is provided; or, if categories were provided and they include 'all', then just set it
# explicitly so we don't do any duplication
+ if is_hash(s):
+ return []
+
if not categories or "all" in categories:
categories = ("all",)
- if is_hash(s):
- return list()
-
secrets: list[str] = []
for c in categories:
- matches: list[str] = []
for pattern in _patterns[c]:
- _matches = re.finditer(pattern, s)
- matches.extend([str(match.group()) for match in _matches])
- if matches:
- secrets.extend(matches)
+ secrets.extend(str(match.group()) for match in pattern.finditer(s))
return secrets
| diff --git a/tests/cloudformation/checks/resource/aws/example_LambdaEnvironmentCredentials/PASS.yaml b/tests/cloudformation/checks/resource/aws/example_LambdaEnvironmentCredentials/PASS.yaml
--- a/tests/cloudformation/checks/resource/aws/example_LambdaEnvironmentCredentials/PASS.yaml
+++ b/tests/cloudformation/checks/resource/aws/example_LambdaEnvironmentCredentials/PASS.yaml
@@ -41,3 +41,18 @@ Resources:
Description: Invoke a function during stack creation.
TracingConfig:
Mode: Active
+ UnresolvedEnv:
+ Type: AWS::Lambda::Function
+ Properties:
+ Runtime: nodejs12.x
+ Role: arn:aws:iam::123456789012:role/lambda-role
+ Handler: index.handler
+ Environment:
+ Variables:
+ MY_COOL_STATE_MACHINE: !Ref MySuperCoolFortyCharLongStateMachineeeee
+ Code:
+ ZipFile: |
+ print('hi')
+ Description: Invoke a function during stack creation.
+ TracingConfig:
+ Mode: Active
diff --git a/tests/cloudformation/checks/resource/aws/test_LambdaEnvironmentCredentials.py b/tests/cloudformation/checks/resource/aws/test_LambdaEnvironmentCredentials.py
--- a/tests/cloudformation/checks/resource/aws/test_LambdaEnvironmentCredentials.py
+++ b/tests/cloudformation/checks/resource/aws/test_LambdaEnvironmentCredentials.py
@@ -17,6 +17,7 @@ def test_summary(self):
"AWS::Lambda::Function.NoEnv",
"AWS::Lambda::Function.NoSecret",
"AWS::Lambda::Function.EnvNull",
+ "AWS::Lambda::Function.UnresolvedEnv",
"AWS::Serverless::Function.NoEnv",
"AWS::Serverless::Function.NoProperties",
"AWS::Serverless::Function.NoSecret",
@@ -29,8 +30,8 @@ def test_summary(self):
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
- self.assertEqual(summary["passed"], 6)
- self.assertEqual(summary["failed"], 2)
+ self.assertEqual(summary["passed"], len(passing_resources))
+ self.assertEqual(summary["failed"], len(failing_resources))
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
| CKV_AWS_45 False Positive
**Describe the issue**
[CKV_AWS_45](https://github.com/bridgecrewio/checkov/blob/main/checkov/cloudformation/checks/resource/aws/LambdaEnvironmentCredentials.py) gives a false positive to any value after a lambda environment variable that is exactly 40 characters long due to the AWS secret access key [regex](https://github.com/bridgecrewio/checkov/blob/4875adc472f409bc7039a879347b404468e88215/checkov/common/util/secrets.py#L49). Obviously this regex makes sense. However, this check could do a deeper analysis on the environment variable being passed in to have less false positives. In the example below, I think it would be at least a good idea to check if an intrinsic function is used (in this case `!Ref`) in order to reduce false positives.
**Examples**
```yml
Resources:
MyCoolLambdaFunction:
Type: AWS::Serverless::Function
Properties:
CodeUri: blah
Handler: src.app.lambda_handler
Policies:
- StepFunctionsExecutionPolicy:
StateMachineName: !GetAtt MySuperCoolFortyCharLongStateMachineeeee.Name
Environment:
Variables:
MY_COOL_STATE_MACHINE: !Ref MySuperCoolFortyCharLongStateMachineeeee # 40 chars in ref fails check
```
**Version (please complete the following information):**
- Checkov Version latest
**Additional context**
Add any other context about the problem here.
You can see how this works by calling the `get_secrets_from_string` method directly in a python terminal which is what is being called in the `CKV_AWS_45` [check](https://github.com/bridgecrewio/checkov/blob/4875adc472f409bc7039a879347b404468e88215/checkov/cloudformation/checks/resource/aws/LambdaEnvironmentCredentials.py#L27).
```python
from checkov.common.util.secrets import AWS, GENERAL, get_secrets_from_string
get_secrets_from_string("MySuperCoolFortyCharLongStateMachineeeee", AWS, GENERAL)
# ['MySuperCoolFortyCharLongStateMachineeeee']
get_secrets_from_string("x"*40, AWS, GENERAL)
# ['xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx']
get_secrets_from_string("x"*41, AWS, GENERAL)
# []
```
| hey @bakosa thanks for reaching out.
Yeah, seems like a very good idea 😄 I will create a PR for it | 2023-11-11T10:35:44 | -1.0 |
bridgecrewio/checkov | 5,748 | bridgecrewio__checkov-5748 | [
"5717"
] | 4875adc472f409bc7039a879347b404468e88215 | diff --git a/checkov/terraform/checks/resource/azure/ACREnableZoneRedundancy.py b/checkov/terraform/checks/resource/azure/ACREnableZoneRedundancy.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/ACREnableZoneRedundancy.py
@@ -0,0 +1,37 @@
+from __future__ import annotations
+
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+
+from typing import Any
+
+
+class ACREnableZoneRedundancy(BaseResourceCheck):
+
+ def __init__(self) -> None:
+ """
+ Zone redundancy provides resiliency and high availability to
+ a registry or replication resource in a specific region. Supported on Premium.
+ """
+ name = "Ensure Azure Container Registry (ACR) is zone redundant"
+ id = "CKV_AZURE_233"
+ supported_resources = ("azurerm_container_registry",)
+ categories = (CheckCategories.BACKUP_AND_RECOVERY,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ # check registry. default=false
+ if conf.get("zone_redundancy_enabled", []) != [True]:
+ return CheckResult.FAILED
+
+ # check each replica. default=false
+ replications = conf.get("georeplications", {})
+ for replica in replications:
+ zone_redundancy_enabled = replica.get('zone_redundancy_enabled', [])
+ if zone_redundancy_enabled != [True]:
+ return CheckResult.FAILED
+
+ return CheckResult.PASSED
+
+
+check = ACREnableZoneRedundancy()
| diff --git a/tests/terraform/checks/resource/azure/example_ACREnableZoneRedundancy/main.tf b/tests/terraform/checks/resource/azure/example_ACREnableZoneRedundancy/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_ACREnableZoneRedundancy/main.tf
@@ -0,0 +1,58 @@
+
+resource "azurerm_container_registry" "pass" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ sku = "Premium"
+ zone_redundancy_enabled = true
+}
+
+
+resource "azurerm_container_registry" "pass2" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ sku = "Premium"
+ zone_redundancy_enabled = true
+ georeplications {
+ location = "East US"
+ zone_redundancy_enabled = true
+ }
+ georeplications {
+ location = "North Europe"
+ zone_redundancy_enabled = true
+ }
+}
+
+
+resource "azurerm_container_registry" "fail" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ sku = "Premium"
+}
+
+
+resource "azurerm_container_registry" "fail2" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ sku = "Premium"
+ zone_redundancy_enabled = false
+}
+
+
+resource "azurerm_container_registry" "fail3" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ sku = "Premium"
+ zone_redundancy_enabled = true
+ georeplications {
+ location = "East US"
+ }
+ georeplications {
+ location = "North Europe"
+ zone_redundancy_enabled = true
+ }
+}
diff --git a/tests/terraform/checks/resource/azure/test_ACREnableZoneRedundancy.py b/tests/terraform/checks/resource/azure/test_ACREnableZoneRedundancy.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_ACREnableZoneRedundancy.py
@@ -0,0 +1,44 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.ACREnableZoneRedundancy import check
+
+
+class TestACREnableZoneRedundancy(unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_ACREnableZoneRedundancy")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_container_registry.pass',
+ 'azurerm_container_registry.pass2',
+ }
+ failing_resources = {
+ 'azurerm_container_registry.fail',
+ 'azurerm_container_registry.fail2',
+ 'azurerm_container_registry.fail3',
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
| Azure Container Registry - Ensure the registry is zone-redundant
**Describe the issue**
It seems that there are no checkov test in order to ensure that Azure Container Registry deployed using Terraform are zone-redunant.
**Examples**
````hcl
resource "azurerm_container_registry" "example" {
name = "containerRegistry1"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
sku = "Premium"
zone_redundancy_enabled = true
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
Azure Container Registry supports optional zone redundancy. [Zone redundancy](https://learn.microsoft.com/en-us/azure/availability-zones/az-overview#availability-zones) provides resiliency and high availability to a registry or replication resource (replica) in a specific region.
Source : https://learn.microsoft.com/en-us/azure/container-registry/zone-redundancy
| 2023-11-11T20:03:25 | -1.0 |
|
bridgecrewio/checkov | 5,753 | bridgecrewio__checkov-5753 | [
"5721"
] | 5796faf8523acbe4fb5f5fb340c682a27b7851d8 | diff --git a/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py b/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py
--- a/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py
+++ b/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py
@@ -4,15 +4,27 @@
class FunctionAppMinTLSVersion(BaseResourceValueCheck):
def __init__(self):
+ """
+ The minimum supported TLS version for the function app.
+ Defaults to 1.2 for new function apps.
+ field name is:
+ - min_tls_version in azurerm_function_app, azurerm_function_app_slot.
+ - minimum_tls_version in newer resources (with linux/windows).
+ """
name = "Ensure Function app is using the latest version of TLS encryption"
id = "CKV_AZURE_145"
- supported_resources = ['azurerm_function_app']
+ supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',
+ 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',
+ 'azurerm_windows_function_app_slot']
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources,
missing_block_result=CheckResult.PASSED)
def get_inspected_key(self):
- return "site_config/[0]/min_tls_version"
+ if self.entity_type in ("azurerm_function_app", "azurerm_function_app_slot"):
+ return "site_config/[0]/min_tls_version"
+ else:
+ return "site_config/[0]/minimum_tls_version"
def get_expected_value(self):
return 1.2
| diff --git a/tests/terraform/checks/resource/azure/example_FunctionAppMinTLSVersion/main.tf b/tests/terraform/checks/resource/azure/example_FunctionAppMinTLSVersion/main.tf
--- a/tests/terraform/checks/resource/azure/example_FunctionAppMinTLSVersion/main.tf
+++ b/tests/terraform/checks/resource/azure/example_FunctionAppMinTLSVersion/main.tf
@@ -17,8 +17,65 @@ resource "azurerm_function_app" "fail" {
allowed_origins = ["*"]
}
}
+}
+
+resource "azurerm_function_app_slot" "fail2" {
+ name = "test-azure-functions_slot"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ function_app_name = azurerm_function_app.example.name
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ site_config {
+ min_tls_version = 1.1
+ }
+}
+resource "azurerm_linux_function_app" "fail3" {
+ name = "example-linux-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ service_plan_id = azurerm_service_plan.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {
+ minimum_tls_version = 1.1
+ }
}
+resource "azurerm_windows_function_app" "fail4" {
+ name = "example-windows-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ service_plan_id = azurerm_service_plan.example.id
+
+ site_config {
+ minimum_tls_version = 1.1
+ }
+}
+
+resource "azurerm_linux_function_app_slot" "fail5" {
+ name = "example-linux-function-app-slot"
+ function_app_id = azurerm_linux_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {
+ minimum_tls_version = 1.1
+ }
+}
+resource "azurerm_windows_function_app_slot" "fail6" {
+ name = "example-slot"
+ function_app_id = azurerm_windows_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {
+ minimum_tls_version = 1.1
+ }
+}
+
resource "azurerm_function_app" "pass" {
name = "test-azure-functions"
location = azurerm_resource_group.example.location
@@ -38,6 +95,7 @@ resource "azurerm_function_app" "pass" {
}
}
}
+
resource "azurerm_function_app" "pass2" {
name = "test-azure-functions"
location = azurerm_resource_group.example.location
@@ -79,3 +137,64 @@ resource "azurerm_function_app" "pass3" {
}
}
}
+
+resource "azurerm_function_app_slot" "pass4" {
+ name = "test-azure-functions_slot"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ function_app_name = azurerm_function_app.example.name
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+}
+
+resource "azurerm_linux_function_app" "pass5" {
+ name = "example-linux-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ service_plan_id = azurerm_service_plan.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {
+ minimum_tls_version = 1.2
+ }
+}
+resource "azurerm_windows_function_app" "pass6" {
+ name = "example-windows-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ service_plan_id = azurerm_service_plan.example.id
+
+ site_config {
+ minimum_tls_version = 1.2
+ }
+}
+
+resource "azurerm_linux_function_app_slot" "pass7" {
+ name = "example-linux-function-app-slot"
+ function_app_id = azurerm_linux_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {
+ minimum_tls_version = 1.2
+ }
+}
+resource "azurerm_windows_function_app_slot" "pass8" {
+ name = "example-slot"
+ function_app_id = azurerm_windows_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {
+ minimum_tls_version = 1.2
+ }
+}
+resource "azurerm_windows_function_app_slot" "pass9" {
+ name = "example-slot"
+ function_app_id = azurerm_windows_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {}
+}
diff --git a/tests/terraform/checks/resource/azure/test_FunctionAppMinTLSVersion.py b/tests/terraform/checks/resource/azure/test_FunctionAppMinTLSVersion.py
--- a/tests/terraform/checks/resource/azure/test_FunctionAppMinTLSVersion.py
+++ b/tests/terraform/checks/resource/azure/test_FunctionAppMinTLSVersion.py
@@ -17,16 +17,27 @@ def test(self):
"azurerm_function_app.pass",
"azurerm_function_app.pass2",
"azurerm_function_app.pass3",
+ "azurerm_function_app_slot.pass4",
+ "azurerm_linux_function_app.pass5",
+ "azurerm_windows_function_app.pass6",
+ "azurerm_linux_function_app_slot.pass7",
+ "azurerm_windows_function_app_slot.pass8",
+ "azurerm_windows_function_app_slot.pass9",
}
failing_resources = {
"azurerm_function_app.fail",
+ "azurerm_function_app_slot.fail2",
+ "azurerm_linux_function_app.fail3",
+ "azurerm_windows_function_app.fail4",
+ "azurerm_linux_function_app_slot.fail5",
+ "azurerm_windows_function_app_slot.fail6",
}
passed_check_resources = {c.resource for c in report.passed_checks}
failed_check_resources = {c.resource for c in report.failed_checks}
- self.assertEqual(summary["passed"], 3)
- self.assertEqual(summary["failed"], 1)
+ self.assertEqual(summary["passed"], len(passing_resources))
+ self.assertEqual(summary["failed"], len(failing_resources))
self.assertEqual(summary["skipped"], 0)
self.assertEqual(summary["parsing_errors"], 0)
diff --git a/tests/terraform/image_referencer/test_runner_azure_resources.py b/tests/terraform/image_referencer/test_runner_azure_resources.py
--- a/tests/terraform/image_referencer/test_runner_azure_resources.py
+++ b/tests/terraform/image_referencer/test_runner_azure_resources.py
@@ -134,7 +134,7 @@ def test_app_service_linux_function_resources(mocker: MockerFixture, graph_frame
sca_image_report = next(report for report in reports if report.check_type == CheckType.SCA_IMAGE)
assert len(tf_report.resources) == 2
- assert len(tf_report.passed_checks) == 0
+ assert len(tf_report.passed_checks) == 2
assert len(tf_report.failed_checks) == 2
assert len(tf_report.skipped_checks) == 0
assert len(tf_report.parsing_errors) == 0
| Azure Function App Slots - Ensure Azure Function App Slots use at least TLS 1.2
**Describe the issue**
It seems that there are no checks that ensure that the following resources to use at least TLS 1.2 :
- azurerm_function_app_slot
- azurerm_linux_function_app_slot
- azurerm_windows_function_app_slot
**Examples**
````hcl
````hcl
resource "azurerm_function_app_slot" "example" {
name = "test-azure-functions_slot"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
app_service_plan_id = azurerm_app_service_plan.example.id
function_app_name = azurerm_function_app.example.name
storage_account_name = azurerm_storage_account.example.name
storage_account_access_key = azurerm_storage_account.example.primary_access_key
site_config {
min_tls_version = 1.2
}
}
resource "azurerm_linux_function_app_slot" "example" {
name = "example-linux-function-app-slot"
function_app_id = azurerm_linux_function_app.example.id
storage_account_name = azurerm_storage_account.example.name
site_config {
min_tls_version = 1.2
}
}
resource "azurerm_windows_function_app" "example" {
name = "example-windows-function-app"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_account_name = azurerm_storage_account.example.name
service_plan_id = azurerm_service_plan.example.id
site_config {
min_tls_version = 1.2
}
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
N/A
| you can just extend check ID`CKV_AZURE_145` https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/azure/FunctionAppMinTLSVersion.py | 2023-11-12T20:25:53 | -1.0 |
bridgecrewio/checkov | 5,756 | bridgecrewio__checkov-5756 | [
"5744"
] | 767062529622a2dde35a685ef0a4383d90ad8a28 | diff --git a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py
--- a/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py
+++ b/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py
@@ -3,6 +3,10 @@
class KeyspacesTableUsesCMK(BaseResourceCheck):
+ """
+ Valid values for encryption_specification type:
+ AWS_OWNED_KMS_KEY (default), CUSTOMER_MANAGED_KMS_KEY (requires kms_key_identifier:ARN)
+ """
def __init__(self):
name = "Ensure Keyspaces Table uses CMK"
id = "CKV_AWS_265"
@@ -14,7 +18,7 @@ def scan_resource_conf(self, conf) -> CheckResult:
if conf.get("encryption_specification") and isinstance(conf.get("encryption_specification"), list):
encrypt = conf.get("encryption_specification")[0]
if encrypt.get("kms_key_identifier") and isinstance(encrypt.get("kms_key_identifier"), list):
- if encrypt.get("type") == ["CUSTOMER_MANAGED_KEY"]:
+ if encrypt.get("type") == ["CUSTOMER_MANAGED_KMS_KEY"]:
return CheckResult.PASSED
self.evaluated_keys = ["encryption_specification/[0]/type"]
self.evaluated_keys = ["encryption_specification/[0]/kms_key_identifier"]
| diff --git a/tests/terraform/checks/resource/aws/example_KeyspacesTableUsesCMK/main.tf b/tests/terraform/checks/resource/aws/example_KeyspacesTableUsesCMK/main.tf
--- a/tests/terraform/checks/resource/aws/example_KeyspacesTableUsesCMK/main.tf
+++ b/tests/terraform/checks/resource/aws/example_KeyspacesTableUsesCMK/main.tf
@@ -31,7 +31,6 @@ resource "aws_keyspaces_table" "fail2" {
encryption_specification {
type="AWS_OWNED_KMS_KEY"
}
-
}
@@ -53,7 +52,6 @@ resource "aws_keyspaces_table" "fail3" {
kms_key_identifier=aws_kms_key.example.arn
type="AWS_OWNED_KMS_KEY"
}
-
}
resource "aws_keyspaces_table" "pass" {
@@ -72,7 +70,6 @@ resource "aws_keyspaces_table" "pass" {
}
encryption_specification {
kms_key_identifier=aws_kms_key.example.arn
- type="CUSTOMER_MANAGED_KEY"
+ type="CUSTOMER_MANAGED_KMS_KEY"
}
-
-}
\ No newline at end of file
+}
| CKV_AWS_265 false positive
**Describe the issue**
CKV_AWS_265 fails checks on KeySpaces not using a Customer Managed KMS key even though it is configured to do so
This is because the code is looking for ["CUSTOMER_MANAGED_KEY"](https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/aws/KeyspacesTableUsesCMK.py#L17)
When the specification is listed as actually ["CUSTOMER_MANAGED_KMS_KEY"](https://docs.aws.amazon.com/keyspaces/latest/APIReference/API_EncryptionSpecification.html)
**Examples**
```
resource "aws_keyspaces_table" "keyspace_table" {
keyspace_name = "foo"
table_name = "bar"
encryption_specification {
## This should not fail the check CKV_AWS_265 but it does
kms_key_identifier = var.kms_key_arn
type = "CUSTOMER_MANAGED_KMS_KEY"
}
}
```
**Version (please complete the following information):**
- 3.0.32
**Additional context**
```
~/Downloads> checkov -f ./keyspaces.tf --support
2023-11-10 09:21:38,953 [MainThread ] [WARNI] --bc-api-key argument is required when using --support
[ terraform framework ]: 100%|████████████████████|[1/1], Current File Scanned=keyspaces.tf
[ secrets framework ]: 100%|████████████████████|[1/1], Current File Scanned=./keyspaces.tf
_ _
___| |__ ___ ___| | _______ __
/ __| '_ \ / _ \/ __| |/ / _ \ \ / /
| (__| | | | __/ (__| < (_) \ V /
\___|_| |_|\___|\___|_|\_\___/ \_/
By Prisma Cloud | version: 3.0.32
terraform scan results:
Passed checks: 0, Failed checks: 1, Skipped checks: 0
Check: CKV_AWS_265: "Ensure Keyspaces Table uses CMK"
FAILED for resource: aws_keyspaces_table.keyspace_table
File: /keyspaces.tf:1-9
Guide: https://docs.prismacloud.io/en/enterprise-edition/policy-reference/aws-policies/aws-general-policies/ensure-aws-keyspace-table-uses-customer-managed-keys-cmks
1 | resource "aws_keyspaces_table" "keyspace_table" {
2 | keyspace_name = "foo"
3 | table_name = "bar"
4 | encryption_specification {
5 | ## This should not fail the check CKV_AWS_265 but it does
6 | kms_key_identifier = var.kms_key_arn
7 | type = "CUSTOMER_MANAGED_KMS_KEY"
8 | }
9 | }
```
| Hey @andyfase thanks for reaching out.
Great observation, are you interested in contributing the needed change? | 2023-11-15T08:17:56 | -1.0 |
bridgecrewio/checkov | 5,766 | bridgecrewio__checkov-5766 | [
"5720"
] | 1f51de17b9327e5e0da8cb826fb04036c40b7c46 | diff --git a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py
--- a/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py
+++ b/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py
@@ -1,17 +1,44 @@
-from checkov.common.models.enums import CheckCategories
-from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+from __future__ import annotations
+from typing import Any
-class FunctionAppsAccessibleOverHttps(BaseResourceValueCheck):
- def __init__(self):
+from checkov.common.models.enums import CheckCategories, CheckResult
+from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
+
+
+class FunctionAppsAccessibleOverHttps(BaseResourceCheck):
+
+ def __init__(self) -> None:
name = "Ensure that Function apps is only accessible over HTTPS"
id = "CKV_AZURE_70"
- supported_resources = ['azurerm_function_app']
+ supported_resources = ['azurerm_function_app', 'azurerm_linux_function_app', 'azurerm_windows_function_app',
+ 'azurerm_function_app_slot', 'azurerm_linux_function_app_slot',
+ 'azurerm_windows_function_app_slot']
categories = [CheckCategories.NETWORKING]
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
- def get_inspected_key(self):
- return 'https_only'
+ def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
+ # default=false for https_only
+ if 'https_only' not in conf.keys():
+ return CheckResult.FAILED
+
+ https_only = conf.get('https_only')[0]
+ if not https_only:
+ return CheckResult.FAILED
+
+ # relevant for linux/windows resources
+ if 'auth_settings_v2' in conf.keys():
+ auth_settings_v2 = conf['auth_settings_v2'][0]
+
+ # default=true for require_https
+ if 'require_https' not in auth_settings_v2.keys():
+ return CheckResult.PASSED
+
+ require_https = auth_settings_v2.get('require_https')[0]
+ if not require_https:
+ return CheckResult.FAILED
+
+ return CheckResult.PASSED
check = FunctionAppsAccessibleOverHttps()
| diff --git a/tests/terraform/checks/resource/azure/example_FunctionAppAccessibleOverHttps/main.tf b/tests/terraform/checks/resource/azure/example_FunctionAppAccessibleOverHttps/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_FunctionAppAccessibleOverHttps/main.tf
@@ -0,0 +1,177 @@
+
+## app
+
+resource "azurerm_function_app" "fail" {
+ name = "test-azure-functions"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+}
+resource "azurerm_function_app" "fail2" {
+ name = "test-azure-functions"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ https_only = false
+}
+resource "azurerm_function_app" "pass" {
+ name = "test-azure-functions"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ https_only = true
+}
+
+## app_slot
+
+resource "azurerm_function_app_slot" "fail" {
+ name = "test-azure-functions_slot"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ function_app_name = azurerm_function_app.example.name
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+}
+resource "azurerm_function_app_slot" "fail2" {
+ name = "test-azure-functions_slot"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ function_app_name = azurerm_function_app.example.name
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ https_only = false
+}
+resource "azurerm_function_app_slot" "pass" {
+ name = "test-azure-functions_slot"
+ location = azurerm_resource_group.example.location
+ resource_group_name = azurerm_resource_group.example.name
+ app_service_plan_id = azurerm_app_service_plan.example.id
+ function_app_name = azurerm_function_app.example.name
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ https_only = true
+}
+
+#### linux/windows
+
+## app
+
+resource "azurerm_linux_function_app" "fail" {
+ name = "example-linux-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ service_plan_id = azurerm_service_plan.example.id
+
+ site_config {}
+}
+resource "azurerm_linux_function_app" "fail2" {
+ name = "example-linux-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ service_plan_id = azurerm_service_plan.example.id
+
+ site_config {}
+ https_only = false
+}
+resource "azurerm_linux_function_app" "fail3" {
+ name = "example-linux-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ service_plan_id = azurerm_service_plan.example.id
+
+ site_config {}
+
+ https_only = true
+ auth_settings_v2 {
+ require_https = false
+ }
+}
+resource "azurerm_linux_function_app" "pass" {
+ name = "example-linux-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ service_plan_id = azurerm_service_plan.example.id
+
+ site_config {}
+ https_only = true
+}
+resource "azurerm_linux_function_app" "pass2" {
+ name = "example-linux-function-app"
+ resource_group_name = azurerm_resource_group.example.name
+ location = azurerm_resource_group.example.location
+ storage_account_name = azurerm_storage_account.example.name
+ storage_account_access_key = azurerm_storage_account.example.primary_access_key
+ service_plan_id = azurerm_service_plan.example.id
+
+ site_config {}
+
+ https_only = true
+ auth_settings_v2 {
+ require_https = true
+ }
+}
+
+## app slot
+
+resource "azurerm_linux_function_app_slot" "fail" {
+ name = "example-linux-function-app-slot"
+ function_app_id = azurerm_linux_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {}
+}
+resource "azurerm_linux_function_app_slot" "fail2" {
+ name = "example-linux-function-app-slot"
+ function_app_id = azurerm_linux_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {}
+ https_only = false
+}
+resource "azurerm_linux_function_app_slot" "fail3" {
+ name = "example-linux-function-app-slot"
+ function_app_id = azurerm_linux_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {}
+ auth_settings_v2 {
+ require_https = false
+ }
+ https_only = true
+}
+resource "azurerm_linux_function_app_slot" "pass" {
+ name = "example-linux-function-app-slot"
+ function_app_id = azurerm_linux_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {}
+ auth_settings_v2 {}
+ https_only = true
+}
+resource "azurerm_linux_function_app_slot" "pass2" {
+ name = "example-linux-function-app-slot"
+ function_app_id = azurerm_linux_function_app.example.id
+ storage_account_name = azurerm_storage_account.example.name
+
+ site_config {}
+ auth_settings_v2 {
+ require_https = true
+ }
+ https_only = true
+}
diff --git a/tests/terraform/checks/resource/azure/test_FunctionAppsAccessibleOverHttps.py b/tests/terraform/checks/resource/azure/test_FunctionAppsAccessibleOverHttps.py
--- a/tests/terraform/checks/resource/azure/test_FunctionAppsAccessibleOverHttps.py
+++ b/tests/terraform/checks/resource/azure/test_FunctionAppsAccessibleOverHttps.py
@@ -1,53 +1,50 @@
import unittest
+from pathlib import Path
-import hcl2
-
+from checkov.runner_filter import RunnerFilter
from checkov.terraform.checks.resource.azure.FunctionAppsAccessibleOverHttps import check
-from checkov.common.models.enums import CheckResult
+from checkov.terraform.runner import Runner
class TestFunctionAppsAccessibleOverHttps(unittest.TestCase):
+ def test(self):
+ test_files_dir = Path(__file__).parent / "example_FunctionAppAccessibleOverHttps"
+
+ report = Runner().run(root_folder=str(test_files_dir), runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ "azurerm_function_app.pass",
+ "azurerm_function_app_slot.pass",
+ "azurerm_linux_function_app.pass",
+ "azurerm_linux_function_app.pass2",
+ "azurerm_linux_function_app_slot.pass",
+ "azurerm_linux_function_app_slot.pass2",
+ }
+ failing_resources = {
+ "azurerm_function_app.fail",
+ "azurerm_function_app.fail2",
+ "azurerm_function_app_slot.fail",
+ "azurerm_function_app_slot.fail2",
+ "azurerm_linux_function_app.fail",
+ "azurerm_linux_function_app.fail2",
+ "azurerm_linux_function_app.fail3",
+ "azurerm_linux_function_app_slot.fail",
+ "azurerm_linux_function_app_slot.fail2",
+ "azurerm_linux_function_app_slot.fail3",
+ }
+
+ passed_check_resources = {c.resource for c in report.passed_checks}
+ failed_check_resources = {c.resource for c in report.failed_checks}
+
+ self.assertEqual(summary["passed"], len(passing_resources))
+ self.assertEqual(summary["failed"], len(failing_resources))
+ self.assertEqual(summary["skipped"], 0)
+ self.assertEqual(summary["parsing_errors"], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
- def test_failure1(self):
- hcl_res = hcl2.loads("""
- resource "azurerm_app_service" "example" {
- name = "example-app-service"
- location = azurerm_resource_group.example.location
- resource_group_name = azurerm_resource_group.example.name
- app_service_plan_id = azurerm_app_service_plan.example.id
- https_only = false
- }
- """)
- resource_conf = hcl_res['resource'][0]['azurerm_app_service']['example']
- scan_result = check.scan_resource_conf(conf=resource_conf)
- self.assertEqual(CheckResult.FAILED, scan_result)
-
- def test_failure2(self):
- hcl_res = hcl2.loads("""
- resource "azurerm_app_service" "example" {
- name = "example-app-service"
- location = azurerm_resource_group.example.location
- resource_group_name = azurerm_resource_group.example.name
- app_service_plan_id = azurerm_app_service_plan.example.id
- }
- """)
- resource_conf = hcl_res['resource'][0]['azurerm_app_service']['example']
- scan_result = check.scan_resource_conf(conf=resource_conf)
- self.assertEqual(CheckResult.FAILED, scan_result)
-
- def test_success(self):
- hcl_res = hcl2.loads("""
- resource "azurerm_app_service" "example" {
- name = "example-app-service"
- location = azurerm_resource_group.example.location
- resource_group_name = azurerm_resource_group.example.name
- app_service_plan_id = azurerm_app_service_plan.example.id
- https_only = true
- }
- """)
- resource_conf = hcl_res['resource'][0]['azurerm_app_service']['example']
- scan_result = check.scan_resource_conf(conf=resource_conf)
- self.assertEqual(CheckResult.PASSED, scan_result)
if __name__ == '__main__':
diff --git a/tests/terraform/image_referencer/test_runner_azure_resources.py b/tests/terraform/image_referencer/test_runner_azure_resources.py
--- a/tests/terraform/image_referencer/test_runner_azure_resources.py
+++ b/tests/terraform/image_referencer/test_runner_azure_resources.py
@@ -135,7 +135,7 @@ def test_app_service_linux_function_resources(mocker: MockerFixture, graph_frame
assert len(tf_report.resources) == 2
assert len(tf_report.passed_checks) == 2
- assert len(tf_report.failed_checks) == 2
+ assert len(tf_report.failed_checks) == 4
assert len(tf_report.skipped_checks) == 0
assert len(tf_report.parsing_errors) == 0
| Azure Function App Slots - Ensure web app redirects all HTTP traffic to HTTPS in Azure Function App Slots
**Describe the issue**
It seems that there are no checks that ensure that the following resource only allows HTTPS:
- azurerm_function_app_slot
- azurerm_linux_function_app_slot
- azurerm_windows_function_app_slot
**Examples**
````hcl
resource "azurerm_function_app_slot" "example" {
name = "test-azure-functions_slot"
location = azurerm_resource_group.example.location
resource_group_name = azurerm_resource_group.example.name
app_service_plan_id = azurerm_app_service_plan.example.id
function_app_name = azurerm_function_app.example.name
storage_account_name = azurerm_storage_account.example.name
storage_account_access_key = azurerm_storage_account.example.primary_access_key
https_only = true
}
resource "azurerm_linux_function_app_slot" "example" {
name = "example-linux-function-app-slot"
function_app_id = azurerm_linux_function_app.example.id
storage_account_name = azurerm_storage_account.example.name
site_config {
require_https = true
}
}
resource "azurerm_windows_function_app" "example" {
name = "example-windows-function-app"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_account_name = azurerm_storage_account.example.name
service_plan_id = azurerm_service_plan.example.id
site_config {
require_https = true
}
}
````
**Version (please complete the following information):**
- N/A
**Additional context**
N/A
| you can just extend check ID `CKV_AZURE_70` https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/azure/FunctionAppsAccessibleOverHttps.py
for the newer linux/windows variants you can leverage `self.entity_type` like here https://github.com/bridgecrewio/checkov/blob/main/checkov/terraform/checks/resource/azure/AppServiceClientCertificate.py | 2023-11-18T13:43:12 | -1.0 |
bridgecrewio/checkov | 5,886 | bridgecrewio__checkov-5886 | [
"5885"
] | 748f4b782851a820caa53ec380dbc7cbfef7d31f | diff --git a/checkov/terraform/checks/resource/azure/AzureDefenderDisabledForResManager.py b/checkov/terraform/checks/resource/azure/AzureDefenderDisabledForResManager.py
--- a/checkov/terraform/checks/resource/azure/AzureDefenderDisabledForResManager.py
+++ b/checkov/terraform/checks/resource/azure/AzureDefenderDisabledForResManager.py
@@ -16,9 +16,9 @@ def __init__(self) -> None:
def scan_resource_conf(self, conf: dict[str, list[Any]]) -> CheckResult:
return (
- CheckResult.PASSED
- if conf.get("resource_type", [""])[0].lower() == "arm" and conf.get("tier", [""])[0].lower() == "standard"
- else CheckResult.FAILED
+ CheckResult.FAILED
+ if conf.get("resource_type", [""])[0].lower() == "arm" and conf.get("tier", [""])[0].lower() != "standard"
+ else CheckResult.PASSED
)
def get_evaluated_keys(self) -> list[str]:
| diff --git a/tests/terraform/checks/resource/azure/example_AzureDefenderDisabledForResManager/main.tf b/tests/terraform/checks/resource/azure/example_AzureDefenderDisabledForResManager/main.tf
--- a/tests/terraform/checks/resource/azure/example_AzureDefenderDisabledForResManager/main.tf
+++ b/tests/terraform/checks/resource/azure/example_AzureDefenderDisabledForResManager/main.tf
@@ -1,7 +1,7 @@
# Case 1: Pass: tier is Standard and resource_type is Arm
-resource "azurerm_security_center_subscription_pricing" "pass" {
+resource "azurerm_security_center_subscription_pricing" "pass_1" {
tier = "Standard"
resource_type = "Arm"
}
@@ -13,10 +13,16 @@ resource "azurerm_security_center_subscription_pricing" "fail_1" {
resource_type = "arm"
}
-# Case 3: Fails as "resource_type" should be "Arm"
+# Case 3: Pass as policy should only check if the resource_type is "Arm"
-resource "azurerm_security_center_subscription_pricing" "fail_2" {
- tier = "Standard"
+resource "azurerm_security_center_subscription_pricing" "pass_2" {
+ tier = "Free"
resource_type = "Dns"
}
+# Case 4: Pass as policy should only check if the resource_type is "Arm"
+
+resource "azurerm_security_center_subscription_pricing" "pass_3" {
+ tier = "Free"
+ resource_type = "VirtualMachine"
+}
\ No newline at end of file
diff --git a/tests/terraform/checks/resource/azure/test_AzureDefenderDisabledForResManager.py b/tests/terraform/checks/resource/azure/test_AzureDefenderDisabledForResManager.py
--- a/tests/terraform/checks/resource/azure/test_AzureDefenderDisabledForResManager.py
+++ b/tests/terraform/checks/resource/azure/test_AzureDefenderDisabledForResManager.py
@@ -17,11 +17,12 @@ def test(self):
summary = report.get_summary()
passing_resources = {
- 'azurerm_security_center_subscription_pricing.pass',
+ 'azurerm_security_center_subscription_pricing.pass_1',
+ 'azurerm_security_center_subscription_pricing.pass_2',
+ 'azurerm_security_center_subscription_pricing.pass_3',
}
failing_resources = {
'azurerm_security_center_subscription_pricing.fail_1',
- 'azurerm_security_center_subscription_pricing.fail_2',
}
skipped_resources = {}
| CKV_AZURE_234 condition is incorrect
https://github.com/bridgecrewio/checkov/blob/dc6a7cd84c5e006c289f2710b960b7be96a29fae/checkov/terraform/checks/resource/azure/AzureDefenderDisabledForResManager.py#L20C110-L20C118
The condition used in this check is being triggered for all `azurerm_security_center_subscription_pricing` resources with **any** `resource_type`. For example,
```
resource "azurerm_security_center_subscription_pricing" "mdc_srvrs" {
tier = "Standard"
resource_type = "VirtualMachines"
subplan = "P2"
```
Would raise the `CKV_AZURE_234` finding. For any other `resource_type` we get a failure.
| 2023-12-19T22:53:41 | -1.0 |
|
bridgecrewio/checkov | 5,936 | bridgecrewio__checkov-5936 | [
"5935"
] | d07fdc994015772a9fa0dc1a12d1391b5765916c | diff --git a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
--- a/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
+++ b/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
@@ -14,8 +14,8 @@ def get_inspected_key(self):
return 'database_version'
def get_expected_values(self):
- return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2019_STANDARD", "SQLSERVER_2019_WEB",
- "SQLSERVER_2019_ENTERPRISE", "SQLSERVER_2019_EXPRESS"]
+ return ["POSTGRES_15", "MYSQL_8_0", "SQLSERVER_2022_STANDARD", "SQLSERVER_2022_WEB",
+ "SQLSERVER_2022_ENTERPRISE", "SQLSERVER_2022_EXPRESS"]
check = CloudSqlMajorVersion()
| diff --git a/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf b/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf
--- a/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf
+++ b/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf
@@ -154,7 +154,7 @@ resource "google_sql_database_instance" "pass2" {
}
resource "google_sql_database_instance" "fail3" {
- database_version = "SQLSERVER_2017_STANDARD"
+ database_version = "SQLSERVER_2019_STANDARD"
name = "general-sqlserver12"
project = "gcp-bridgecrew-deployment"
region = "us-central1"
@@ -210,7 +210,7 @@ resource "google_sql_database_instance" "fail3" {
}
resource "google_sql_database_instance" "pass3" {
- database_version = "SQLSERVER_2019_STANDARD"
+ database_version = "SQLSERVER_2022_STANDARD"
name = "general-sqlserver12"
project = "gcp-bridgecrew-deployment"
region = "us-central1"
| CKV_GCP_79 SQL Server latest version is 2022 instead of 2019
**Describe the issue**
The `CKV_GCP_79` about SQL server is pinned at 2019 but 2022 is the latest version :
https://learn.microsoft.com/en-us/troubleshoot/sql/releases/download-and-install-latest-updates
**Examples**
Related to this files :
https://github.com/bridgecrewio/checkov/blob/master/checkov/terraform/checks/resource/gcp/CloudSqlMajorVersion.py
https://github.com/bridgecrewio/checkov/blob/d07fdc994015772a9fa0dc1a12d1391b5765916c/tests/terraform/checks/resource/gcp/example_CloudSqlMajorVersion/main.tf#L213
| 2024-01-12T12:04:04 | -1.0 |
|
bridgecrewio/checkov | 6,145 | bridgecrewio__checkov-6145 | [
"6135"
] | 81636c3d5a519552dfd10dae4635377542e51ed5 | diff --git a/checkov/terraform/graph_builder/local_graph.py b/checkov/terraform/graph_builder/local_graph.py
--- a/checkov/terraform/graph_builder/local_graph.py
+++ b/checkov/terraform/graph_builder/local_graph.py
@@ -479,19 +479,23 @@ def _find_vertex_with_best_match(self, relevant_vertices_indexes: List[int], ori
vertex_realpath = os.path.realpath(vertex.path)
self._vertex_path_to_realpath_cache[vertex.path] = vertex_realpath
common_prefix = os.path.commonpath([vertex_realpath, origin_real_path])
+
+ # checks if module name is same for dest and origin vertex.
+ if origin_vertex_index is not None:
+ vertex_module_name = vertex.attributes.get(CustomAttributes.TF_RESOURCE_ADDRESS, '')
+ origin_module_name = self.vertices[origin_vertex_index].attributes.get(CustomAttributes.TF_RESOURCE_ADDRESS, '')
+ if vertex_module_name.startswith(BlockType.MODULE) and origin_module_name.startswith(BlockType.MODULE):
+ split_module_name = vertex_module_name.split('.')[1]
+ if origin_module_name.startswith(f'{BlockType.MODULE}.{split_module_name}'):
+ common_prefix = f"{common_prefix} {BlockType.MODULE}.{split_module_name}"
+
if len(common_prefix) > len(longest_common_prefix):
vertex_index_with_longest_common_prefix = vertex_index
longest_common_prefix = common_prefix
vertices_with_longest_common_prefix = [(vertex_index, vertex)]
elif len(common_prefix) == len(longest_common_prefix):
vertices_with_longest_common_prefix.append((vertex_index, vertex))
- if origin_vertex_index is not None:
- vertex_module_name = vertex.attributes.get(CustomAttributes.TF_RESOURCE_ADDRESS, '')
- origin_module_name = self.vertices[origin_vertex_index].attributes.get(CustomAttributes.TF_RESOURCE_ADDRESS, '')
- if vertex_module_name.startswith(BlockType.MODULE) and origin_module_name.startswith(BlockType.MODULE):
- split_module_name = vertex_module_name.split('.')[1]
- if origin_module_name.startswith(f'{BlockType.MODULE}.{split_module_name}'):
- vertex_index_with_longest_common_prefix = vertex_index
+
if len(vertices_with_longest_common_prefix) <= 1:
return vertex_index_with_longest_common_prefix
diff --git a/checkov/terraform/plan_parser.py b/checkov/terraform/plan_parser.py
--- a/checkov/terraform/plan_parser.py
+++ b/checkov/terraform/plan_parser.py
@@ -212,7 +212,7 @@ def _find_child_modules(
(
module_call_resource
for module_call_resource in module_call_resources
- if f"{module_address}.{module_call_resource['address']}" == resource["address"]
+ if f"{module_address}.{module_call_resource['address']}" == (resource["address"].rsplit('[', 1)[0] if resource["address"][-1] == "]" else resource["address"])
),
None
)
| diff --git a/tests/terraform/graph/graph_builder/test_graph_builder.py b/tests/terraform/graph/graph_builder/test_graph_builder.py
--- a/tests/terraform/graph/graph_builder/test_graph_builder.py
+++ b/tests/terraform/graph/graph_builder/test_graph_builder.py
@@ -9,6 +9,7 @@
from checkov.terraform.graph_manager import TerraformGraphManager
from checkov.common.graph.graph_builder import CustomAttributes
from checkov.terraform.modules.module_utils import external_modules_download_path
+from checkov.terraform.plan_utils import create_definitions
TEST_DIRNAME = os.path.dirname(os.path.realpath(__file__))
@@ -372,6 +373,13 @@ def test_build_rustworkx_graph(self):
self.check_edge(graph, resource_node, var_region_node, 'region')
self.check_edge(graph, provider_node, var_aws_profile_node, 'profile')
self.check_edge(graph, local_node, var_bucket_name_node, 'bucket_name')
+
+ def test_multiple_modules_with_connected_resources(self):
+ valid_plan_path = os.path.realpath(os.path.join(TEST_DIRNAME, '../resources/modules_edges_tfplan/tfplan.json'))
+ definitions, definitions_raw = create_definitions(root_folder=None, files=[valid_plan_path])
+ graph_manager = TerraformGraphManager(db_connector=RustworkxConnector())
+ tf_plan_local_graph = graph_manager.build_graph_from_definitions(definitions, render_variables=False)
+ self.assertTrue(tf_plan_local_graph.in_edges[2])
def build_new_key_for_tf_definition(key):
diff --git a/tests/terraform/graph/resources/modules_edges_tfplan/s3module.tf b/tests/terraform/graph/resources/modules_edges_tfplan/s3module.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/graph/resources/modules_edges_tfplan/s3module.tf
@@ -0,0 +1,9 @@
+module "s3-bucket-1" {
+ source = "terraform-aws-modules/s3-bucket/aws"
+ version = "4.0.1"
+}
+
+module "s3-bucket-2" {
+ source = "terraform-aws-modules/s3-bucket/aws"
+ version = "4.0.1"
+}
\ No newline at end of file
diff --git a/tests/terraform/graph/resources/modules_edges_tfplan/tfplan.json b/tests/terraform/graph/resources/modules_edges_tfplan/tfplan.json
new file mode 100644
--- /dev/null
+++ b/tests/terraform/graph/resources/modules_edges_tfplan/tfplan.json
@@ -0,0 +1,4131 @@
+{
+ "format_version": "0.2",
+ "terraform_version": "1.0.7",
+ "planned_values": {
+ "root_module": {
+ "child_modules": [
+ {
+ "resources": [
+ {
+ "address": "module.s3-bucket-1.aws_s3_bucket.this[0]",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "force_destroy": false,
+ "object_lock_enabled": false,
+ "tags": null,
+ "timeouts": null
+ },
+ "sensitive_values": {
+ "cors_rule": [],
+ "grant": [],
+ "lifecycle_rule": [],
+ "logging": [],
+ "object_lock_configuration": [],
+ "replication_configuration": [],
+ "server_side_encryption_configuration": [],
+ "tags_all": {},
+ "versioning": [],
+ "website": []
+ }
+ },
+ {
+ "address": "module.s3-bucket-1.aws_s3_bucket_public_access_block.this[0]",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "block_public_acls": true,
+ "block_public_policy": true,
+ "ignore_public_acls": true,
+ "restrict_public_buckets": true
+ },
+ "sensitive_values": {}
+ }
+ ],
+ "address": "module.s3-bucket-1"
+ },
+ {
+ "resources": [
+ {
+ "address": "module.s3-bucket-2.aws_s3_bucket.this[0]",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "force_destroy": false,
+ "object_lock_enabled": false,
+ "tags": null,
+ "timeouts": null
+ },
+ "sensitive_values": {
+ "cors_rule": [],
+ "grant": [],
+ "lifecycle_rule": [],
+ "logging": [],
+ "object_lock_configuration": [],
+ "replication_configuration": [],
+ "server_side_encryption_configuration": [],
+ "tags_all": {},
+ "versioning": [],
+ "website": []
+ }
+ },
+ {
+ "address": "module.s3-bucket-2.aws_s3_bucket_public_access_block.this[0]",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "block_public_acls": true,
+ "block_public_policy": true,
+ "ignore_public_acls": true,
+ "restrict_public_buckets": true
+ },
+ "sensitive_values": {}
+ }
+ ],
+ "address": "module.s3-bucket-2"
+ }
+ ]
+ }
+ },
+ "resource_changes": [
+ {
+ "address": "module.s3-bucket-1.aws_s3_bucket.this[0]",
+ "module_address": "module.s3-bucket-1",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "change": {
+ "actions": [
+ "create"
+ ],
+ "before": null,
+ "after": {
+ "force_destroy": false,
+ "object_lock_enabled": false,
+ "tags": null,
+ "timeouts": null
+ },
+ "after_unknown": {
+ "acceleration_status": true,
+ "acl": true,
+ "arn": true,
+ "bucket": true,
+ "bucket_domain_name": true,
+ "bucket_prefix": true,
+ "bucket_regional_domain_name": true,
+ "cors_rule": true,
+ "grant": true,
+ "hosted_zone_id": true,
+ "id": true,
+ "lifecycle_rule": true,
+ "logging": true,
+ "object_lock_configuration": true,
+ "policy": true,
+ "region": true,
+ "replication_configuration": true,
+ "request_payer": true,
+ "server_side_encryption_configuration": true,
+ "tags_all": true,
+ "versioning": true,
+ "website": true,
+ "website_domain": true,
+ "website_endpoint": true
+ },
+ "before_sensitive": false,
+ "after_sensitive": {
+ "cors_rule": [],
+ "grant": [],
+ "lifecycle_rule": [],
+ "logging": [],
+ "object_lock_configuration": [],
+ "replication_configuration": [],
+ "server_side_encryption_configuration": [],
+ "tags_all": {},
+ "versioning": [],
+ "website": []
+ }
+ }
+ },
+ {
+ "address": "module.s3-bucket-1.aws_s3_bucket_public_access_block.this[0]",
+ "module_address": "module.s3-bucket-1",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "change": {
+ "actions": [
+ "create"
+ ],
+ "before": null,
+ "after": {
+ "block_public_acls": true,
+ "block_public_policy": true,
+ "ignore_public_acls": true,
+ "restrict_public_buckets": true
+ },
+ "after_unknown": {
+ "bucket": true,
+ "id": true
+ },
+ "before_sensitive": false,
+ "after_sensitive": {}
+ }
+ },
+ {
+ "address": "module.s3-bucket-2.aws_s3_bucket.this[0]",
+ "module_address": "module.s3-bucket-2",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "change": {
+ "actions": [
+ "create"
+ ],
+ "before": null,
+ "after": {
+ "force_destroy": false,
+ "object_lock_enabled": false,
+ "tags": null,
+ "timeouts": null
+ },
+ "after_unknown": {
+ "acceleration_status": true,
+ "acl": true,
+ "arn": true,
+ "bucket": true,
+ "bucket_domain_name": true,
+ "bucket_prefix": true,
+ "bucket_regional_domain_name": true,
+ "cors_rule": true,
+ "grant": true,
+ "hosted_zone_id": true,
+ "id": true,
+ "lifecycle_rule": true,
+ "logging": true,
+ "object_lock_configuration": true,
+ "policy": true,
+ "region": true,
+ "replication_configuration": true,
+ "request_payer": true,
+ "server_side_encryption_configuration": true,
+ "tags_all": true,
+ "versioning": true,
+ "website": true,
+ "website_domain": true,
+ "website_endpoint": true
+ },
+ "before_sensitive": false,
+ "after_sensitive": {
+ "cors_rule": [],
+ "grant": [],
+ "lifecycle_rule": [],
+ "logging": [],
+ "object_lock_configuration": [],
+ "replication_configuration": [],
+ "server_side_encryption_configuration": [],
+ "tags_all": {},
+ "versioning": [],
+ "website": []
+ }
+ }
+ },
+ {
+ "address": "module.s3-bucket-2.aws_s3_bucket_public_access_block.this[0]",
+ "module_address": "module.s3-bucket-2",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "change": {
+ "actions": [
+ "create"
+ ],
+ "before": null,
+ "after": {
+ "block_public_acls": true,
+ "block_public_policy": true,
+ "ignore_public_acls": true,
+ "restrict_public_buckets": true
+ },
+ "after_unknown": {
+ "bucket": true,
+ "id": true
+ },
+ "before_sensitive": false,
+ "after_sensitive": {}
+ }
+ }
+ ],
+ "prior_state": {
+ "format_version": "0.2",
+ "terraform_version": "1.0.7",
+ "values": {
+ "root_module": {
+ "child_modules": [
+ {
+ "resources": [
+ {
+ "address": "module.s3-bucket-1.data.aws_caller_identity.current",
+ "mode": "data",
+ "type": "aws_caller_identity",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "account_id": "101860328116",
+ "arn": "arn:aws:iam::101860328116:user/atlantis",
+ "id": "101860328116",
+ "user_id": "AIDARPN2ZIK2PHMJSNYXG"
+ },
+ "sensitive_values": {}
+ },
+ {
+ "address": "module.s3-bucket-1.data.aws_partition.current",
+ "mode": "data",
+ "type": "aws_partition",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "dns_suffix": "amazonaws.com",
+ "id": "aws",
+ "partition": "aws",
+ "reverse_dns_prefix": "com.amazonaws"
+ },
+ "sensitive_values": {}
+ },
+ {
+ "address": "module.s3-bucket-1.data.aws_region.current",
+ "mode": "data",
+ "type": "aws_region",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "description": "Europe (Frankfurt)",
+ "endpoint": "ec2.eu-central-1.amazonaws.com",
+ "id": "eu-central-1",
+ "name": "eu-central-1"
+ },
+ "sensitive_values": {}
+ }
+ ],
+ "address": "module.s3-bucket-1"
+ },
+ {
+ "resources": [
+ {
+ "address": "module.s3-bucket-2.data.aws_caller_identity.current",
+ "mode": "data",
+ "type": "aws_caller_identity",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "account_id": "101860328116",
+ "arn": "arn:aws:iam::101860328116:user/atlantis",
+ "id": "101860328116",
+ "user_id": "AIDARPN2ZIK2PHMJSNYXG"
+ },
+ "sensitive_values": {}
+ },
+ {
+ "address": "module.s3-bucket-2.data.aws_partition.current",
+ "mode": "data",
+ "type": "aws_partition",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "dns_suffix": "amazonaws.com",
+ "id": "aws",
+ "partition": "aws",
+ "reverse_dns_prefix": "com.amazonaws"
+ },
+ "sensitive_values": {}
+ },
+ {
+ "address": "module.s3-bucket-2.data.aws_region.current",
+ "mode": "data",
+ "type": "aws_region",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "description": "Europe (Frankfurt)",
+ "endpoint": "ec2.eu-central-1.amazonaws.com",
+ "id": "eu-central-1",
+ "name": "eu-central-1"
+ },
+ "sensitive_values": {}
+ }
+ ],
+ "address": "module.s3-bucket-2"
+ }
+ ]
+ }
+ }
+ },
+ "configuration": {
+ "provider_config": {
+ "aws": {
+ "name": "aws",
+ "expressions": {
+ "profile": {
+ "constant_value": "razorpay-stage"
+ },
+ "region": {
+ "constant_value": "eu-central-1"
+ }
+ }
+ },
+ "module.s3-bucket-1:aws": {
+ "name": "aws",
+ "version_constraint": ">= 5.27.0",
+ "module_address": "module.s3-bucket-1"
+ },
+ "module.s3-bucket-2:aws": {
+ "name": "aws",
+ "version_constraint": ">= 5.27.0",
+ "module_address": "module.s3-bucket-2"
+ }
+ },
+ "root_module": {
+ "module_calls": {
+ "s3-bucket-1": {
+ "source": "terraform-aws-modules/s3-bucket/aws",
+ "module": {
+ "outputs": {
+ "s3_bucket_arn": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The ARN of the bucket. Will be of format arn:aws:s3:::bucketname."
+ },
+ "s3_bucket_bucket_domain_name": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].bucket_domain_name",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The bucket domain name. Will be of format bucketname.s3.amazonaws.com."
+ },
+ "s3_bucket_bucket_regional_domain_name": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].bucket_regional_domain_name",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The bucket region-specific domain name. The bucket domain name including the region name, please refer here for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent redirect issues from CloudFront to S3 Origin URL."
+ },
+ "s3_bucket_hosted_zone_id": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].hosted_zone_id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The Route 53 Hosted Zone ID for this bucket's region."
+ },
+ "s3_bucket_id": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_policy.this[0].id",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The name of the bucket."
+ },
+ "s3_bucket_lifecycle_configuration_rules": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_lifecycle_configuration.this[0].rule",
+ "aws_s3_bucket_lifecycle_configuration.this[0]",
+ "aws_s3_bucket_lifecycle_configuration.this"
+ ]
+ },
+ "description": "The lifecycle rules of the bucket, if the bucket is configured with lifecycle rules. If not, this will be an empty string."
+ },
+ "s3_bucket_policy": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_policy.this[0].policy",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this"
+ ]
+ },
+ "description": "The policy of the bucket, if the bucket is configured with a policy. If not, this will be an empty string."
+ },
+ "s3_bucket_region": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].region",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The AWS region this bucket resides in."
+ },
+ "s3_bucket_website_domain": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_website_configuration.this[0].website_domain",
+ "aws_s3_bucket_website_configuration.this[0]",
+ "aws_s3_bucket_website_configuration.this"
+ ]
+ },
+ "description": "The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records."
+ },
+ "s3_bucket_website_endpoint": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_website_configuration.this[0].website_endpoint",
+ "aws_s3_bucket_website_configuration.this[0]",
+ "aws_s3_bucket_website_configuration.this"
+ ]
+ },
+ "description": "The website endpoint, if the bucket is configured with a website. If not, this will be an empty string."
+ }
+ },
+ "resources": [
+ {
+ "address": "aws_s3_bucket.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "var.bucket"
+ ]
+ },
+ "bucket_prefix": {
+ "references": [
+ "var.bucket_prefix"
+ ]
+ },
+ "force_destroy": {
+ "references": [
+ "var.force_destroy"
+ ]
+ },
+ "object_lock_enabled": {
+ "references": [
+ "var.object_lock_enabled"
+ ]
+ },
+ "tags": {
+ "references": [
+ "var.tags"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_accelerate_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_accelerate_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "status": {
+ "references": [
+ "var.acceleration_status"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.acceleration_status"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_acl.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_acl",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "acl": {
+ "references": [
+ "var.acl",
+ "var.acl"
+ ]
+ },
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.create_bucket_acl"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_ownership_controls.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_analytics_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_analytics_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "var.analytics_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_cors_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_cors_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.cors_rules"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_intelligent_tiering_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_intelligent_tiering_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ },
+ "status": {
+ "references": [
+ "each.value.status",
+ "each.value",
+ "each.value.status",
+ "each.value"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "local.intelligent_tiering",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_inventory.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_inventory",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "each.value.bucket",
+ "each.value",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "destination": [
+ {
+ "bucket": [
+ {
+ "account_id": {
+ "references": [
+ "each.value.destination.account_id",
+ "each.value.destination",
+ "each.value"
+ ]
+ },
+ "bucket_arn": {
+ "references": [
+ "each.value.destination.bucket_arn",
+ "each.value.destination",
+ "each.value",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "format": {
+ "references": [
+ "each.value.destination.format",
+ "each.value.destination",
+ "each.value"
+ ]
+ },
+ "prefix": {
+ "references": [
+ "each.value.destination.prefix",
+ "each.value.destination",
+ "each.value"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "enabled": {
+ "references": [
+ "each.value.enabled",
+ "each.value"
+ ]
+ },
+ "included_object_versions": {
+ "references": [
+ "each.value.included_object_versions",
+ "each.value"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ },
+ "optional_fields": {
+ "references": [
+ "each.value.optional_fields",
+ "each.value"
+ ]
+ },
+ "schedule": [
+ {
+ "frequency": {
+ "references": [
+ "each.value.frequency",
+ "each.value"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "var.inventory_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_lifecycle_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_lifecycle_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.lifecycle_rules"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_versioning.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_logging.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_logging",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "target_bucket": {
+ "references": [
+ "var.logging[\"target_bucket\"]",
+ "var.logging"
+ ]
+ },
+ "target_prefix": {
+ "references": [
+ "var.logging[\"target_prefix\"]",
+ "var.logging"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.logging"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_metric.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_metric",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.value.name",
+ "each.value"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "local.metric_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_object_lock_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_object_lock_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "rule": [
+ {
+ "default_retention": [
+ {
+ "days": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.days",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ },
+ "mode": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.mode",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ },
+ "years": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.years",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "token": {
+ "references": [
+ "var.object_lock_configuration.token",
+ "var.object_lock_configuration"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.object_lock_enabled",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_ownership_controls.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_ownership_controls",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "local.attach_policy",
+ "aws_s3_bucket_policy.this[0].id",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "rule": [
+ {
+ "object_ownership": {
+ "references": [
+ "var.object_ownership"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.control_object_ownership"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket_public_access_block.this",
+ "aws_s3_bucket.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_policy.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_policy",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "policy": {
+ "references": [
+ "data.aws_iam_policy_document.combined[0].json",
+ "data.aws_iam_policy_document.combined[0]",
+ "data.aws_iam_policy_document.combined"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.attach_policy"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_public_access_block.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_public_access_block.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "block_public_acls": {
+ "references": [
+ "var.block_public_acls"
+ ]
+ },
+ "block_public_policy": {
+ "references": [
+ "var.block_public_policy"
+ ]
+ },
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "ignore_public_acls": {
+ "references": [
+ "var.ignore_public_acls"
+ ]
+ },
+ "restrict_public_buckets": {
+ "references": [
+ "var.restrict_public_buckets"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_public_policy"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_replication_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_replication_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "role": {
+ "references": [
+ "var.replication_configuration[\"role\"]",
+ "var.replication_configuration"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.replication_configuration"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_versioning.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_request_payment_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_request_payment_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "payer": {
+ "references": [
+ "var.request_payer"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.request_payer"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_server_side_encryption_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_server_side_encryption_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.server_side_encryption_configuration"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_versioning.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_versioning",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "mfa": {
+ "references": [
+ "var.versioning[\"mfa\"]",
+ "var.versioning"
+ ]
+ },
+ "versioning_configuration": [
+ {
+ "mfa_delete": {
+ "references": [
+ "var.versioning[\"mfa_delete\"]",
+ "var.versioning",
+ "var.versioning[\"mfa_delete\"]",
+ "var.versioning"
+ ]
+ },
+ "status": {
+ "references": [
+ "var.versioning[\"enabled\"]",
+ "var.versioning",
+ "var.versioning[\"status\"]",
+ "var.versioning",
+ "var.versioning[\"status\"]",
+ "var.versioning"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.versioning"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_website_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_website_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.website"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_caller_identity.current",
+ "mode": "data",
+ "type": "aws_caller_identity",
+ "name": "current",
+ "provider_config_key": "s3-bucket-1:aws",
+ "schema_version": 0
+ },
+ {
+ "address": "data.aws_canonical_user_id.this",
+ "mode": "data",
+ "type": "aws_canonical_user_id",
+ "name": "this",
+ "provider_config_key": "s3-bucket-1:aws",
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.create_bucket_acl",
+ "var.owner[\"id\"]",
+ "var.owner"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.access_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "access_log_delivery",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logging.s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSAccessLogDeliveryWrite"
+ }
+ },
+ {
+ "actions": {
+ "constant_value": [
+ "s3:GetBucketAcl"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logging.s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSAccessLogDeliveryAclCheck"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_access_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.combined",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "combined",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "source_policy_documents": {
+ "references": [
+ "var.attach_elb_log_delivery_policy",
+ "data.aws_iam_policy_document.elb_log_delivery[0].json",
+ "data.aws_iam_policy_document.elb_log_delivery[0]",
+ "data.aws_iam_policy_document.elb_log_delivery",
+ "var.attach_lb_log_delivery_policy",
+ "data.aws_iam_policy_document.lb_log_delivery[0].json",
+ "data.aws_iam_policy_document.lb_log_delivery[0]",
+ "data.aws_iam_policy_document.lb_log_delivery",
+ "var.attach_access_log_delivery_policy",
+ "data.aws_iam_policy_document.access_log_delivery[0].json",
+ "data.aws_iam_policy_document.access_log_delivery[0]",
+ "data.aws_iam_policy_document.access_log_delivery",
+ "var.attach_require_latest_tls_policy",
+ "data.aws_iam_policy_document.require_latest_tls[0].json",
+ "data.aws_iam_policy_document.require_latest_tls[0]",
+ "data.aws_iam_policy_document.require_latest_tls",
+ "var.attach_deny_insecure_transport_policy",
+ "data.aws_iam_policy_document.deny_insecure_transport[0].json",
+ "data.aws_iam_policy_document.deny_insecure_transport[0]",
+ "data.aws_iam_policy_document.deny_insecure_transport",
+ "var.attach_deny_unencrypted_object_uploads",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads[0].json",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads[0]",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads",
+ "var.attach_deny_incorrect_kms_key_sse",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse[0].json",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse[0]",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse",
+ "var.attach_deny_incorrect_encryption_headers",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers[0].json",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers[0]",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers",
+ "var.attach_inventory_destination_policy",
+ "var.attach_analytics_destination_policy",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy[0].json",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy[0]",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy",
+ "var.attach_policy",
+ "var.policy"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.attach_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_incorrect_encryption_headers",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_incorrect_encryption_headers",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringNotEquals"
+ },
+ "values": {
+ "references": [
+ "var.server_side_encryption_configuration.rule.apply_server_side_encryption_by_default.sse_algorithm",
+ "var.server_side_encryption_configuration.rule.apply_server_side_encryption_by_default",
+ "var.server_side_encryption_configuration.rule",
+ "var.server_side_encryption_configuration"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyIncorrectEncryptionHeaders"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_incorrect_encryption_headers"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_incorrect_kms_key_sse",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_incorrect_kms_key_sse",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringNotEquals"
+ },
+ "values": {
+ "references": [
+ "var.allowed_kms_key_arn"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption-aws-kms-key-id"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyIncorrectKmsKeySse"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_incorrect_kms_key_sse"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_insecure_transport",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_insecure_transport",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:*"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "Bool"
+ },
+ "values": {
+ "constant_value": [
+ "false"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SecureTransport"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyInsecureTransport"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_insecure_transport_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_unencrypted_object_uploads",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_unencrypted_object_uploads",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "Null"
+ },
+ "values": {
+ "constant_value": [
+ true
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyUnencryptedObjectUploads"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_unencrypted_object_uploads"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.elb_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "elb_log_delivery",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logdelivery.elasticloadbalancing.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": ""
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_elb_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.inventory_and_analytics_destination_policy",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "inventory_and_analytics_destination_policy",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "ArnLike"
+ },
+ "values": {
+ "references": [
+ "var.inventory_self_source_destination",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "var.inventory_source_bucket_arn",
+ "var.analytics_self_source_destination",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "var.analytics_source_bucket_arn"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SourceArn"
+ }
+ },
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "references": [
+ "var.inventory_self_source_destination",
+ "data.aws_caller_identity.current.id",
+ "data.aws_caller_identity.current",
+ "var.inventory_source_account_id",
+ "var.analytics_self_source_destination",
+ "data.aws_caller_identity.current.id",
+ "data.aws_caller_identity.current",
+ "var.analytics_source_account_id"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SourceAccount"
+ }
+ },
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "constant_value": [
+ "bucket-owner-full-control"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-acl"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "destinationInventoryAndAnalyticsPolicy"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_inventory_destination_policy",
+ "var.attach_analytics_destination_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.lb_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "lb_log_delivery",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "constant_value": [
+ "bucket-owner-full-control"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-acl"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "delivery.logs.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSLogDeliveryWrite"
+ }
+ },
+ {
+ "actions": {
+ "constant_value": [
+ "s3:GetBucketAcl",
+ "s3:ListBucket"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "delivery.logs.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSLogDeliveryAclCheck"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_lb_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.require_latest_tls",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "require_latest_tls",
+ "provider_config_key": "s3-bucket-1:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:*"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "NumericLessThan"
+ },
+ "values": {
+ "constant_value": [
+ "1.2"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:TlsVersion"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyOutdatedTLS"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_require_latest_tls_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_partition.current",
+ "mode": "data",
+ "type": "aws_partition",
+ "name": "current",
+ "provider_config_key": "s3-bucket-1:aws",
+ "schema_version": 0
+ },
+ {
+ "address": "data.aws_region.current",
+ "mode": "data",
+ "type": "aws_region",
+ "name": "current",
+ "provider_config_key": "s3-bucket-1:aws",
+ "schema_version": 0
+ }
+ ],
+ "variables": {
+ "acceleration_status": {
+ "default": null,
+ "description": "(Optional) Sets the accelerate configuration of an existing bucket. Can be Enabled or Suspended."
+ },
+ "access_log_delivery_policy_source_accounts": {
+ "default": [],
+ "description": "(Optional) List of AWS Account IDs should be allowed to deliver access logs to this bucket."
+ },
+ "access_log_delivery_policy_source_buckets": {
+ "default": [],
+ "description": "(Optional) List of S3 bucket ARNs wich should be allowed to deliver access logs to this bucket."
+ },
+ "acl": {
+ "default": null,
+ "description": "(Optional) The canned ACL to apply. Conflicts with `grant`"
+ },
+ "allowed_kms_key_arn": {
+ "default": null,
+ "description": "The ARN of KMS key which should be allowed in PutObject"
+ },
+ "analytics_configuration": {
+ "default": {},
+ "description": "Map containing bucket analytics configuration."
+ },
+ "analytics_self_source_destination": {
+ "default": false,
+ "description": "Whether or not the analytics source bucket is also the destination bucket."
+ },
+ "analytics_source_account_id": {
+ "default": null,
+ "description": "The analytics source account id."
+ },
+ "analytics_source_bucket_arn": {
+ "default": null,
+ "description": "The analytics source bucket ARN."
+ },
+ "attach_access_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have S3 access log delivery policy attached"
+ },
+ "attach_analytics_destination_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket analytics destination policy attached."
+ },
+ "attach_deny_incorrect_encryption_headers": {
+ "default": false,
+ "description": "Controls if S3 bucket should deny incorrect encryption headers policy attached."
+ },
+ "attach_deny_incorrect_kms_key_sse": {
+ "default": false,
+ "description": "Controls if S3 bucket policy should deny usage of incorrect KMS key SSE."
+ },
+ "attach_deny_insecure_transport_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have deny non-SSL transport policy attached"
+ },
+ "attach_deny_unencrypted_object_uploads": {
+ "default": false,
+ "description": "Controls if S3 bucket should deny unencrypted object uploads policy attached."
+ },
+ "attach_elb_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have ELB log delivery policy attached"
+ },
+ "attach_inventory_destination_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket inventory destination policy attached."
+ },
+ "attach_lb_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have ALB/NLB log delivery policy attached"
+ },
+ "attach_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket policy attached (set to `true` to use value of `policy` as bucket policy)"
+ },
+ "attach_public_policy": {
+ "default": true,
+ "description": "Controls if a user defined public bucket policy will be attached (set to `false` to allow upstream to apply defaults to the bucket)"
+ },
+ "attach_require_latest_tls_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should require the latest version of TLS"
+ },
+ "block_public_acls": {
+ "default": true,
+ "description": "Whether Amazon S3 should block public ACLs for this bucket."
+ },
+ "block_public_policy": {
+ "default": true,
+ "description": "Whether Amazon S3 should block public bucket policies for this bucket."
+ },
+ "bucket": {
+ "default": null,
+ "description": "(Optional, Forces new resource) The name of the bucket. If omitted, Terraform will assign a random, unique name."
+ },
+ "bucket_prefix": {
+ "default": null,
+ "description": "(Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with bucket."
+ },
+ "control_object_ownership": {
+ "default": false,
+ "description": "Whether to manage S3 Bucket Ownership Controls on this bucket."
+ },
+ "cors_rule": {
+ "default": [],
+ "description": "List of maps containing rules for Cross-Origin Resource Sharing."
+ },
+ "create_bucket": {
+ "default": true,
+ "description": "Controls if S3 bucket should be created"
+ },
+ "expected_bucket_owner": {
+ "default": null,
+ "description": "The account ID of the expected bucket owner"
+ },
+ "force_destroy": {
+ "default": false,
+ "description": "(Optional, Default:false ) A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are not recoverable."
+ },
+ "grant": {
+ "default": [],
+ "description": "An ACL policy grant. Conflicts with `acl`"
+ },
+ "ignore_public_acls": {
+ "default": true,
+ "description": "Whether Amazon S3 should ignore public ACLs for this bucket."
+ },
+ "intelligent_tiering": {
+ "default": {},
+ "description": "Map containing intelligent tiering configuration."
+ },
+ "inventory_configuration": {
+ "default": {},
+ "description": "Map containing S3 inventory configuration."
+ },
+ "inventory_self_source_destination": {
+ "default": false,
+ "description": "Whether or not the inventory source bucket is also the destination bucket."
+ },
+ "inventory_source_account_id": {
+ "default": null,
+ "description": "The inventory source account id."
+ },
+ "inventory_source_bucket_arn": {
+ "default": null,
+ "description": "The inventory source bucket ARN."
+ },
+ "lifecycle_rule": {
+ "default": [],
+ "description": "List of maps containing configuration of object lifecycle management."
+ },
+ "logging": {
+ "default": {},
+ "description": "Map containing access bucket logging configuration."
+ },
+ "metric_configuration": {
+ "default": [],
+ "description": "Map containing bucket metric configuration."
+ },
+ "object_lock_configuration": {
+ "default": {},
+ "description": "Map containing S3 object locking configuration."
+ },
+ "object_lock_enabled": {
+ "default": false,
+ "description": "Whether S3 bucket should have an Object Lock configuration enabled."
+ },
+ "object_ownership": {
+ "default": "BucketOwnerEnforced",
+ "description": "Object ownership. Valid values: BucketOwnerEnforced, BucketOwnerPreferred or ObjectWriter. 'BucketOwnerEnforced': ACLs are disabled, and the bucket owner automatically owns and has full control over every object in the bucket. 'BucketOwnerPreferred': Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the bucket-owner-full-control canned ACL. 'ObjectWriter': The uploading account will own the object if the object is uploaded with the bucket-owner-full-control canned ACL."
+ },
+ "owner": {
+ "default": {},
+ "description": "Bucket owner's display name and ID. Conflicts with `acl`"
+ },
+ "policy": {
+ "default": null,
+ "description": "(Optional) A valid bucket policy JSON document. Note that if the policy document is not specific enough (but still valid), Terraform may view the policy as constantly changing in a terraform plan. In this case, please make sure you use the verbose/specific version of the policy. For more information about building AWS IAM policy documents with Terraform, see the AWS IAM Policy Document Guide."
+ },
+ "putin_khuylo": {
+ "default": true,
+ "description": "Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo!"
+ },
+ "replication_configuration": {
+ "default": {},
+ "description": "Map containing cross-region replication configuration."
+ },
+ "request_payer": {
+ "default": null,
+ "description": "(Optional) Specifies who should bear the cost of Amazon S3 data transfer. Can be either BucketOwner or Requester. By default, the owner of the S3 bucket would incur the costs of any data transfer. See Requester Pays Buckets developer guide for more information."
+ },
+ "restrict_public_buckets": {
+ "default": true,
+ "description": "Whether Amazon S3 should restrict public bucket policies for this bucket."
+ },
+ "server_side_encryption_configuration": {
+ "default": {},
+ "description": "Map containing server-side encryption configuration."
+ },
+ "tags": {
+ "default": {},
+ "description": "(Optional) A mapping of tags to assign to the bucket."
+ },
+ "versioning": {
+ "default": {},
+ "description": "Map containing versioning configuration."
+ },
+ "website": {
+ "default": {},
+ "description": "Map containing static web-site hosting or redirect configuration."
+ }
+ }
+ },
+ "version_constraint": "4.0.1"
+ },
+ "s3-bucket-2": {
+ "source": "terraform-aws-modules/s3-bucket/aws",
+ "module": {
+ "outputs": {
+ "s3_bucket_arn": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The ARN of the bucket. Will be of format arn:aws:s3:::bucketname."
+ },
+ "s3_bucket_bucket_domain_name": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].bucket_domain_name",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The bucket domain name. Will be of format bucketname.s3.amazonaws.com."
+ },
+ "s3_bucket_bucket_regional_domain_name": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].bucket_regional_domain_name",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The bucket region-specific domain name. The bucket domain name including the region name, please refer here for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent redirect issues from CloudFront to S3 Origin URL."
+ },
+ "s3_bucket_hosted_zone_id": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].hosted_zone_id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The Route 53 Hosted Zone ID for this bucket's region."
+ },
+ "s3_bucket_id": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_policy.this[0].id",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The name of the bucket."
+ },
+ "s3_bucket_lifecycle_configuration_rules": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_lifecycle_configuration.this[0].rule",
+ "aws_s3_bucket_lifecycle_configuration.this[0]",
+ "aws_s3_bucket_lifecycle_configuration.this"
+ ]
+ },
+ "description": "The lifecycle rules of the bucket, if the bucket is configured with lifecycle rules. If not, this will be an empty string."
+ },
+ "s3_bucket_policy": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_policy.this[0].policy",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this"
+ ]
+ },
+ "description": "The policy of the bucket, if the bucket is configured with a policy. If not, this will be an empty string."
+ },
+ "s3_bucket_region": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].region",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The AWS region this bucket resides in."
+ },
+ "s3_bucket_website_domain": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_website_configuration.this[0].website_domain",
+ "aws_s3_bucket_website_configuration.this[0]",
+ "aws_s3_bucket_website_configuration.this"
+ ]
+ },
+ "description": "The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records."
+ },
+ "s3_bucket_website_endpoint": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_website_configuration.this[0].website_endpoint",
+ "aws_s3_bucket_website_configuration.this[0]",
+ "aws_s3_bucket_website_configuration.this"
+ ]
+ },
+ "description": "The website endpoint, if the bucket is configured with a website. If not, this will be an empty string."
+ }
+ },
+ "resources": [
+ {
+ "address": "aws_s3_bucket.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "var.bucket"
+ ]
+ },
+ "bucket_prefix": {
+ "references": [
+ "var.bucket_prefix"
+ ]
+ },
+ "force_destroy": {
+ "references": [
+ "var.force_destroy"
+ ]
+ },
+ "object_lock_enabled": {
+ "references": [
+ "var.object_lock_enabled"
+ ]
+ },
+ "tags": {
+ "references": [
+ "var.tags"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_accelerate_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_accelerate_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "status": {
+ "references": [
+ "var.acceleration_status"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.acceleration_status"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_acl.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_acl",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "acl": {
+ "references": [
+ "var.acl",
+ "var.acl"
+ ]
+ },
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.create_bucket_acl"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_ownership_controls.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_analytics_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_analytics_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "var.analytics_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_cors_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_cors_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.cors_rules"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_intelligent_tiering_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_intelligent_tiering_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ },
+ "status": {
+ "references": [
+ "each.value.status",
+ "each.value",
+ "each.value.status",
+ "each.value"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "local.intelligent_tiering",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_inventory.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_inventory",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "each.value.bucket",
+ "each.value",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "destination": [
+ {
+ "bucket": [
+ {
+ "account_id": {
+ "references": [
+ "each.value.destination.account_id",
+ "each.value.destination",
+ "each.value"
+ ]
+ },
+ "bucket_arn": {
+ "references": [
+ "each.value.destination.bucket_arn",
+ "each.value.destination",
+ "each.value",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "format": {
+ "references": [
+ "each.value.destination.format",
+ "each.value.destination",
+ "each.value"
+ ]
+ },
+ "prefix": {
+ "references": [
+ "each.value.destination.prefix",
+ "each.value.destination",
+ "each.value"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "enabled": {
+ "references": [
+ "each.value.enabled",
+ "each.value"
+ ]
+ },
+ "included_object_versions": {
+ "references": [
+ "each.value.included_object_versions",
+ "each.value"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ },
+ "optional_fields": {
+ "references": [
+ "each.value.optional_fields",
+ "each.value"
+ ]
+ },
+ "schedule": [
+ {
+ "frequency": {
+ "references": [
+ "each.value.frequency",
+ "each.value"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "var.inventory_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_lifecycle_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_lifecycle_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.lifecycle_rules"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_versioning.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_logging.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_logging",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "target_bucket": {
+ "references": [
+ "var.logging[\"target_bucket\"]",
+ "var.logging"
+ ]
+ },
+ "target_prefix": {
+ "references": [
+ "var.logging[\"target_prefix\"]",
+ "var.logging"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.logging"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_metric.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_metric",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.value.name",
+ "each.value"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "local.metric_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_object_lock_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_object_lock_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "rule": [
+ {
+ "default_retention": [
+ {
+ "days": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.days",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ },
+ "mode": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.mode",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ },
+ "years": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.years",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "token": {
+ "references": [
+ "var.object_lock_configuration.token",
+ "var.object_lock_configuration"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.object_lock_enabled",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_ownership_controls.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_ownership_controls",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "local.attach_policy",
+ "aws_s3_bucket_policy.this[0].id",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "rule": [
+ {
+ "object_ownership": {
+ "references": [
+ "var.object_ownership"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.control_object_ownership"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket_public_access_block.this",
+ "aws_s3_bucket.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_policy.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_policy",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "policy": {
+ "references": [
+ "data.aws_iam_policy_document.combined[0].json",
+ "data.aws_iam_policy_document.combined[0]",
+ "data.aws_iam_policy_document.combined"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.attach_policy"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_public_access_block.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_public_access_block.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "block_public_acls": {
+ "references": [
+ "var.block_public_acls"
+ ]
+ },
+ "block_public_policy": {
+ "references": [
+ "var.block_public_policy"
+ ]
+ },
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "ignore_public_acls": {
+ "references": [
+ "var.ignore_public_acls"
+ ]
+ },
+ "restrict_public_buckets": {
+ "references": [
+ "var.restrict_public_buckets"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_public_policy"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_replication_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_replication_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "role": {
+ "references": [
+ "var.replication_configuration[\"role\"]",
+ "var.replication_configuration"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.replication_configuration"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_versioning.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_request_payment_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_request_payment_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "payer": {
+ "references": [
+ "var.request_payer"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.request_payer"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_server_side_encryption_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_server_side_encryption_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.server_side_encryption_configuration"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_versioning.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_versioning",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "mfa": {
+ "references": [
+ "var.versioning[\"mfa\"]",
+ "var.versioning"
+ ]
+ },
+ "versioning_configuration": [
+ {
+ "mfa_delete": {
+ "references": [
+ "var.versioning[\"mfa_delete\"]",
+ "var.versioning",
+ "var.versioning[\"mfa_delete\"]",
+ "var.versioning"
+ ]
+ },
+ "status": {
+ "references": [
+ "var.versioning[\"enabled\"]",
+ "var.versioning",
+ "var.versioning[\"status\"]",
+ "var.versioning",
+ "var.versioning[\"status\"]",
+ "var.versioning"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.versioning"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_website_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_website_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.website"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_caller_identity.current",
+ "mode": "data",
+ "type": "aws_caller_identity",
+ "name": "current",
+ "provider_config_key": "s3-bucket-2:aws",
+ "schema_version": 0
+ },
+ {
+ "address": "data.aws_canonical_user_id.this",
+ "mode": "data",
+ "type": "aws_canonical_user_id",
+ "name": "this",
+ "provider_config_key": "s3-bucket-2:aws",
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.create_bucket_acl",
+ "var.owner[\"id\"]",
+ "var.owner"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.access_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "access_log_delivery",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logging.s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSAccessLogDeliveryWrite"
+ }
+ },
+ {
+ "actions": {
+ "constant_value": [
+ "s3:GetBucketAcl"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logging.s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSAccessLogDeliveryAclCheck"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_access_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.combined",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "combined",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "source_policy_documents": {
+ "references": [
+ "var.attach_elb_log_delivery_policy",
+ "data.aws_iam_policy_document.elb_log_delivery[0].json",
+ "data.aws_iam_policy_document.elb_log_delivery[0]",
+ "data.aws_iam_policy_document.elb_log_delivery",
+ "var.attach_lb_log_delivery_policy",
+ "data.aws_iam_policy_document.lb_log_delivery[0].json",
+ "data.aws_iam_policy_document.lb_log_delivery[0]",
+ "data.aws_iam_policy_document.lb_log_delivery",
+ "var.attach_access_log_delivery_policy",
+ "data.aws_iam_policy_document.access_log_delivery[0].json",
+ "data.aws_iam_policy_document.access_log_delivery[0]",
+ "data.aws_iam_policy_document.access_log_delivery",
+ "var.attach_require_latest_tls_policy",
+ "data.aws_iam_policy_document.require_latest_tls[0].json",
+ "data.aws_iam_policy_document.require_latest_tls[0]",
+ "data.aws_iam_policy_document.require_latest_tls",
+ "var.attach_deny_insecure_transport_policy",
+ "data.aws_iam_policy_document.deny_insecure_transport[0].json",
+ "data.aws_iam_policy_document.deny_insecure_transport[0]",
+ "data.aws_iam_policy_document.deny_insecure_transport",
+ "var.attach_deny_unencrypted_object_uploads",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads[0].json",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads[0]",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads",
+ "var.attach_deny_incorrect_kms_key_sse",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse[0].json",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse[0]",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse",
+ "var.attach_deny_incorrect_encryption_headers",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers[0].json",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers[0]",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers",
+ "var.attach_inventory_destination_policy",
+ "var.attach_analytics_destination_policy",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy[0].json",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy[0]",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy",
+ "var.attach_policy",
+ "var.policy"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.attach_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_incorrect_encryption_headers",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_incorrect_encryption_headers",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringNotEquals"
+ },
+ "values": {
+ "references": [
+ "var.server_side_encryption_configuration.rule.apply_server_side_encryption_by_default.sse_algorithm",
+ "var.server_side_encryption_configuration.rule.apply_server_side_encryption_by_default",
+ "var.server_side_encryption_configuration.rule",
+ "var.server_side_encryption_configuration"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyIncorrectEncryptionHeaders"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_incorrect_encryption_headers"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_incorrect_kms_key_sse",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_incorrect_kms_key_sse",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringNotEquals"
+ },
+ "values": {
+ "references": [
+ "var.allowed_kms_key_arn"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption-aws-kms-key-id"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyIncorrectKmsKeySse"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_incorrect_kms_key_sse"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_insecure_transport",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_insecure_transport",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:*"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "Bool"
+ },
+ "values": {
+ "constant_value": [
+ "false"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SecureTransport"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyInsecureTransport"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_insecure_transport_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_unencrypted_object_uploads",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_unencrypted_object_uploads",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "Null"
+ },
+ "values": {
+ "constant_value": [
+ true
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyUnencryptedObjectUploads"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_unencrypted_object_uploads"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.elb_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "elb_log_delivery",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logdelivery.elasticloadbalancing.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": ""
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_elb_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.inventory_and_analytics_destination_policy",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "inventory_and_analytics_destination_policy",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "ArnLike"
+ },
+ "values": {
+ "references": [
+ "var.inventory_self_source_destination",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "var.inventory_source_bucket_arn",
+ "var.analytics_self_source_destination",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "var.analytics_source_bucket_arn"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SourceArn"
+ }
+ },
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "references": [
+ "var.inventory_self_source_destination",
+ "data.aws_caller_identity.current.id",
+ "data.aws_caller_identity.current",
+ "var.inventory_source_account_id",
+ "var.analytics_self_source_destination",
+ "data.aws_caller_identity.current.id",
+ "data.aws_caller_identity.current",
+ "var.analytics_source_account_id"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SourceAccount"
+ }
+ },
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "constant_value": [
+ "bucket-owner-full-control"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-acl"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "destinationInventoryAndAnalyticsPolicy"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_inventory_destination_policy",
+ "var.attach_analytics_destination_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.lb_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "lb_log_delivery",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "constant_value": [
+ "bucket-owner-full-control"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-acl"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "delivery.logs.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSLogDeliveryWrite"
+ }
+ },
+ {
+ "actions": {
+ "constant_value": [
+ "s3:GetBucketAcl",
+ "s3:ListBucket"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "delivery.logs.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSLogDeliveryAclCheck"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_lb_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.require_latest_tls",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "require_latest_tls",
+ "provider_config_key": "s3-bucket-2:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:*"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "NumericLessThan"
+ },
+ "values": {
+ "constant_value": [
+ "1.2"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:TlsVersion"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyOutdatedTLS"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_require_latest_tls_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_partition.current",
+ "mode": "data",
+ "type": "aws_partition",
+ "name": "current",
+ "provider_config_key": "s3-bucket-2:aws",
+ "schema_version": 0
+ },
+ {
+ "address": "data.aws_region.current",
+ "mode": "data",
+ "type": "aws_region",
+ "name": "current",
+ "provider_config_key": "s3-bucket-2:aws",
+ "schema_version": 0
+ }
+ ],
+ "variables": {
+ "acceleration_status": {
+ "default": null,
+ "description": "(Optional) Sets the accelerate configuration of an existing bucket. Can be Enabled or Suspended."
+ },
+ "access_log_delivery_policy_source_accounts": {
+ "default": [],
+ "description": "(Optional) List of AWS Account IDs should be allowed to deliver access logs to this bucket."
+ },
+ "access_log_delivery_policy_source_buckets": {
+ "default": [],
+ "description": "(Optional) List of S3 bucket ARNs wich should be allowed to deliver access logs to this bucket."
+ },
+ "acl": {
+ "default": null,
+ "description": "(Optional) The canned ACL to apply. Conflicts with `grant`"
+ },
+ "allowed_kms_key_arn": {
+ "default": null,
+ "description": "The ARN of KMS key which should be allowed in PutObject"
+ },
+ "analytics_configuration": {
+ "default": {},
+ "description": "Map containing bucket analytics configuration."
+ },
+ "analytics_self_source_destination": {
+ "default": false,
+ "description": "Whether or not the analytics source bucket is also the destination bucket."
+ },
+ "analytics_source_account_id": {
+ "default": null,
+ "description": "The analytics source account id."
+ },
+ "analytics_source_bucket_arn": {
+ "default": null,
+ "description": "The analytics source bucket ARN."
+ },
+ "attach_access_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have S3 access log delivery policy attached"
+ },
+ "attach_analytics_destination_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket analytics destination policy attached."
+ },
+ "attach_deny_incorrect_encryption_headers": {
+ "default": false,
+ "description": "Controls if S3 bucket should deny incorrect encryption headers policy attached."
+ },
+ "attach_deny_incorrect_kms_key_sse": {
+ "default": false,
+ "description": "Controls if S3 bucket policy should deny usage of incorrect KMS key SSE."
+ },
+ "attach_deny_insecure_transport_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have deny non-SSL transport policy attached"
+ },
+ "attach_deny_unencrypted_object_uploads": {
+ "default": false,
+ "description": "Controls if S3 bucket should deny unencrypted object uploads policy attached."
+ },
+ "attach_elb_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have ELB log delivery policy attached"
+ },
+ "attach_inventory_destination_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket inventory destination policy attached."
+ },
+ "attach_lb_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have ALB/NLB log delivery policy attached"
+ },
+ "attach_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket policy attached (set to `true` to use value of `policy` as bucket policy)"
+ },
+ "attach_public_policy": {
+ "default": true,
+ "description": "Controls if a user defined public bucket policy will be attached (set to `false` to allow upstream to apply defaults to the bucket)"
+ },
+ "attach_require_latest_tls_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should require the latest version of TLS"
+ },
+ "block_public_acls": {
+ "default": true,
+ "description": "Whether Amazon S3 should block public ACLs for this bucket."
+ },
+ "block_public_policy": {
+ "default": true,
+ "description": "Whether Amazon S3 should block public bucket policies for this bucket."
+ },
+ "bucket": {
+ "default": null,
+ "description": "(Optional, Forces new resource) The name of the bucket. If omitted, Terraform will assign a random, unique name."
+ },
+ "bucket_prefix": {
+ "default": null,
+ "description": "(Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with bucket."
+ },
+ "control_object_ownership": {
+ "default": false,
+ "description": "Whether to manage S3 Bucket Ownership Controls on this bucket."
+ },
+ "cors_rule": {
+ "default": [],
+ "description": "List of maps containing rules for Cross-Origin Resource Sharing."
+ },
+ "create_bucket": {
+ "default": true,
+ "description": "Controls if S3 bucket should be created"
+ },
+ "expected_bucket_owner": {
+ "default": null,
+ "description": "The account ID of the expected bucket owner"
+ },
+ "force_destroy": {
+ "default": false,
+ "description": "(Optional, Default:false ) A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are not recoverable."
+ },
+ "grant": {
+ "default": [],
+ "description": "An ACL policy grant. Conflicts with `acl`"
+ },
+ "ignore_public_acls": {
+ "default": true,
+ "description": "Whether Amazon S3 should ignore public ACLs for this bucket."
+ },
+ "intelligent_tiering": {
+ "default": {},
+ "description": "Map containing intelligent tiering configuration."
+ },
+ "inventory_configuration": {
+ "default": {},
+ "description": "Map containing S3 inventory configuration."
+ },
+ "inventory_self_source_destination": {
+ "default": false,
+ "description": "Whether or not the inventory source bucket is also the destination bucket."
+ },
+ "inventory_source_account_id": {
+ "default": null,
+ "description": "The inventory source account id."
+ },
+ "inventory_source_bucket_arn": {
+ "default": null,
+ "description": "The inventory source bucket ARN."
+ },
+ "lifecycle_rule": {
+ "default": [],
+ "description": "List of maps containing configuration of object lifecycle management."
+ },
+ "logging": {
+ "default": {},
+ "description": "Map containing access bucket logging configuration."
+ },
+ "metric_configuration": {
+ "default": [],
+ "description": "Map containing bucket metric configuration."
+ },
+ "object_lock_configuration": {
+ "default": {},
+ "description": "Map containing S3 object locking configuration."
+ },
+ "object_lock_enabled": {
+ "default": false,
+ "description": "Whether S3 bucket should have an Object Lock configuration enabled."
+ },
+ "object_ownership": {
+ "default": "BucketOwnerEnforced",
+ "description": "Object ownership. Valid values: BucketOwnerEnforced, BucketOwnerPreferred or ObjectWriter. 'BucketOwnerEnforced': ACLs are disabled, and the bucket owner automatically owns and has full control over every object in the bucket. 'BucketOwnerPreferred': Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the bucket-owner-full-control canned ACL. 'ObjectWriter': The uploading account will own the object if the object is uploaded with the bucket-owner-full-control canned ACL."
+ },
+ "owner": {
+ "default": {},
+ "description": "Bucket owner's display name and ID. Conflicts with `acl`"
+ },
+ "policy": {
+ "default": null,
+ "description": "(Optional) A valid bucket policy JSON document. Note that if the policy document is not specific enough (but still valid), Terraform may view the policy as constantly changing in a terraform plan. In this case, please make sure you use the verbose/specific version of the policy. For more information about building AWS IAM policy documents with Terraform, see the AWS IAM Policy Document Guide."
+ },
+ "putin_khuylo": {
+ "default": true,
+ "description": "Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo!"
+ },
+ "replication_configuration": {
+ "default": {},
+ "description": "Map containing cross-region replication configuration."
+ },
+ "request_payer": {
+ "default": null,
+ "description": "(Optional) Specifies who should bear the cost of Amazon S3 data transfer. Can be either BucketOwner or Requester. By default, the owner of the S3 bucket would incur the costs of any data transfer. See Requester Pays Buckets developer guide for more information."
+ },
+ "restrict_public_buckets": {
+ "default": true,
+ "description": "Whether Amazon S3 should restrict public bucket policies for this bucket."
+ },
+ "server_side_encryption_configuration": {
+ "default": {},
+ "description": "Map containing server-side encryption configuration."
+ },
+ "tags": {
+ "default": {},
+ "description": "(Optional) A mapping of tags to assign to the bucket."
+ },
+ "versioning": {
+ "default": {},
+ "description": "Map containing versioning configuration."
+ },
+ "website": {
+ "default": {},
+ "description": "Map containing static web-site hosting or redirect configuration."
+ }
+ }
+ },
+ "version_constraint": "4.0.1"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/terraform/parser/resources/plan_module_with_connected_resources/s3module.tf b/tests/terraform/parser/resources/plan_module_with_connected_resources/s3module.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/parser/resources/plan_module_with_connected_resources/s3module.tf
@@ -0,0 +1,4 @@
+module "s3-bucket-1" {
+ source = "terraform-aws-modules/s3-bucket/aws"
+ version = "4.0.1"
+}
diff --git a/tests/terraform/parser/resources/plan_module_with_connected_resources/tfplan.json b/tests/terraform/parser/resources/plan_module_with_connected_resources/tfplan.json
new file mode 100644
--- /dev/null
+++ b/tests/terraform/parser/resources/plan_module_with_connected_resources/tfplan.json
@@ -0,0 +1,2086 @@
+{
+ "format_version": "0.2",
+ "terraform_version": "1.0.7",
+ "planned_values": {
+ "root_module": {
+ "child_modules": [
+ {
+ "resources": [
+ {
+ "address": "module.s3-bucket.aws_s3_bucket.this[0]",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "force_destroy": false,
+ "object_lock_enabled": false,
+ "tags": null,
+ "timeouts": null
+ },
+ "sensitive_values": {
+ "cors_rule": [],
+ "grant": [],
+ "lifecycle_rule": [],
+ "logging": [],
+ "object_lock_configuration": [],
+ "replication_configuration": [],
+ "server_side_encryption_configuration": [],
+ "tags_all": {},
+ "versioning": [],
+ "website": []
+ }
+ },
+ {
+ "address": "module.s3-bucket.aws_s3_bucket_public_access_block.this[0]",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "block_public_acls": true,
+ "block_public_policy": true,
+ "ignore_public_acls": true,
+ "restrict_public_buckets": true
+ },
+ "sensitive_values": {}
+ }
+ ],
+ "address": "module.s3-bucket"
+ }
+ ]
+ }
+ },
+ "resource_changes": [
+ {
+ "address": "module.s3-bucket.aws_s3_bucket.this[0]",
+ "module_address": "module.s3-bucket",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "change": {
+ "actions": [
+ "create"
+ ],
+ "before": null,
+ "after": {
+ "force_destroy": false,
+ "object_lock_enabled": false,
+ "tags": null,
+ "timeouts": null
+ },
+ "after_unknown": {
+ "acceleration_status": true,
+ "acl": true,
+ "arn": true,
+ "bucket": true,
+ "bucket_domain_name": true,
+ "bucket_prefix": true,
+ "bucket_regional_domain_name": true,
+ "cors_rule": true,
+ "grant": true,
+ "hosted_zone_id": true,
+ "id": true,
+ "lifecycle_rule": true,
+ "logging": true,
+ "object_lock_configuration": true,
+ "policy": true,
+ "region": true,
+ "replication_configuration": true,
+ "request_payer": true,
+ "server_side_encryption_configuration": true,
+ "tags_all": true,
+ "versioning": true,
+ "website": true,
+ "website_domain": true,
+ "website_endpoint": true
+ },
+ "before_sensitive": false,
+ "after_sensitive": {
+ "cors_rule": [],
+ "grant": [],
+ "lifecycle_rule": [],
+ "logging": [],
+ "object_lock_configuration": [],
+ "replication_configuration": [],
+ "server_side_encryption_configuration": [],
+ "tags_all": {},
+ "versioning": [],
+ "website": []
+ }
+ }
+ },
+ {
+ "address": "module.s3-bucket.aws_s3_bucket_public_access_block.this[0]",
+ "module_address": "module.s3-bucket",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "index": 0,
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "change": {
+ "actions": [
+ "create"
+ ],
+ "before": null,
+ "after": {
+ "block_public_acls": true,
+ "block_public_policy": true,
+ "ignore_public_acls": true,
+ "restrict_public_buckets": true
+ },
+ "after_unknown": {
+ "bucket": true,
+ "id": true
+ },
+ "before_sensitive": false,
+ "after_sensitive": {}
+ }
+ }
+ ],
+ "prior_state": {
+ "format_version": "0.2",
+ "terraform_version": "1.0.7",
+ "values": {
+ "root_module": {
+ "child_modules": [
+ {
+ "resources": [
+ {
+ "address": "module.s3-bucket.data.aws_caller_identity.current",
+ "mode": "data",
+ "type": "aws_caller_identity",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "account_id": "101860328116",
+ "arn": "arn:aws:iam::101860328116:user/atlantis",
+ "id": "101860328116",
+ "user_id": "AIDARPN2ZIK2PHMJSNYXG"
+ },
+ "sensitive_values": {}
+ },
+ {
+ "address": "module.s3-bucket.data.aws_partition.current",
+ "mode": "data",
+ "type": "aws_partition",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "dns_suffix": "amazonaws.com",
+ "id": "aws",
+ "partition": "aws",
+ "reverse_dns_prefix": "com.amazonaws"
+ },
+ "sensitive_values": {}
+ },
+ {
+ "address": "module.s3-bucket.data.aws_region.current",
+ "mode": "data",
+ "type": "aws_region",
+ "name": "current",
+ "provider_name": "registry.terraform.io/hashicorp/aws",
+ "schema_version": 0,
+ "values": {
+ "description": "Europe (Frankfurt)",
+ "endpoint": "ec2.eu-central-1.amazonaws.com",
+ "id": "eu-central-1",
+ "name": "eu-central-1"
+ },
+ "sensitive_values": {}
+ }
+ ],
+ "address": "module.s3-bucket"
+ }
+ ]
+ }
+ }
+ },
+ "configuration": {
+ "provider_config": {
+ "aws": {
+ "name": "aws",
+ "expressions": {
+ "profile": {
+ "constant_value": "razorpay-stage"
+ },
+ "region": {
+ "constant_value": "eu-central-1"
+ }
+ }
+ },
+ "module.s3-bucket:aws": {
+ "name": "aws",
+ "version_constraint": ">= 5.27.0",
+ "module_address": "module.s3-bucket"
+ }
+ },
+ "root_module": {
+ "module_calls": {
+ "s3-bucket": {
+ "source": "terraform-aws-modules/s3-bucket/aws",
+ "module": {
+ "outputs": {
+ "s3_bucket_arn": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The ARN of the bucket. Will be of format arn:aws:s3:::bucketname."
+ },
+ "s3_bucket_bucket_domain_name": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].bucket_domain_name",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The bucket domain name. Will be of format bucketname.s3.amazonaws.com."
+ },
+ "s3_bucket_bucket_regional_domain_name": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].bucket_regional_domain_name",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The bucket region-specific domain name. The bucket domain name including the region name, please refer here for format. Note: The AWS CloudFront allows specifying S3 region-specific endpoint when creating S3 origin, it will prevent redirect issues from CloudFront to S3 Origin URL."
+ },
+ "s3_bucket_hosted_zone_id": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].hosted_zone_id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The Route 53 Hosted Zone ID for this bucket's region."
+ },
+ "s3_bucket_id": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_policy.this[0].id",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The name of the bucket."
+ },
+ "s3_bucket_lifecycle_configuration_rules": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_lifecycle_configuration.this[0].rule",
+ "aws_s3_bucket_lifecycle_configuration.this[0]",
+ "aws_s3_bucket_lifecycle_configuration.this"
+ ]
+ },
+ "description": "The lifecycle rules of the bucket, if the bucket is configured with lifecycle rules. If not, this will be an empty string."
+ },
+ "s3_bucket_policy": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_policy.this[0].policy",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this"
+ ]
+ },
+ "description": "The policy of the bucket, if the bucket is configured with a policy. If not, this will be an empty string."
+ },
+ "s3_bucket_region": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket.this[0].region",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "description": "The AWS region this bucket resides in."
+ },
+ "s3_bucket_website_domain": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_website_configuration.this[0].website_domain",
+ "aws_s3_bucket_website_configuration.this[0]",
+ "aws_s3_bucket_website_configuration.this"
+ ]
+ },
+ "description": "The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records."
+ },
+ "s3_bucket_website_endpoint": {
+ "expression": {
+ "references": [
+ "aws_s3_bucket_website_configuration.this[0].website_endpoint",
+ "aws_s3_bucket_website_configuration.this[0]",
+ "aws_s3_bucket_website_configuration.this"
+ ]
+ },
+ "description": "The website endpoint, if the bucket is configured with a website. If not, this will be an empty string."
+ }
+ },
+ "resources": [
+ {
+ "address": "aws_s3_bucket.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "var.bucket"
+ ]
+ },
+ "bucket_prefix": {
+ "references": [
+ "var.bucket_prefix"
+ ]
+ },
+ "force_destroy": {
+ "references": [
+ "var.force_destroy"
+ ]
+ },
+ "object_lock_enabled": {
+ "references": [
+ "var.object_lock_enabled"
+ ]
+ },
+ "tags": {
+ "references": [
+ "var.tags"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_accelerate_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_accelerate_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "status": {
+ "references": [
+ "var.acceleration_status"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.acceleration_status"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_acl.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_acl",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "acl": {
+ "references": [
+ "var.acl",
+ "var.acl"
+ ]
+ },
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.create_bucket_acl"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_ownership_controls.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_analytics_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_analytics_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "var.analytics_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_cors_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_cors_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.cors_rules"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_intelligent_tiering_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_intelligent_tiering_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ },
+ "status": {
+ "references": [
+ "each.value.status",
+ "each.value",
+ "each.value.status",
+ "each.value"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "local.intelligent_tiering",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_inventory.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_inventory",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "each.value.bucket",
+ "each.value",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "destination": [
+ {
+ "bucket": [
+ {
+ "account_id": {
+ "references": [
+ "each.value.destination.account_id",
+ "each.value.destination",
+ "each.value"
+ ]
+ },
+ "bucket_arn": {
+ "references": [
+ "each.value.destination.bucket_arn",
+ "each.value.destination",
+ "each.value",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "format": {
+ "references": [
+ "each.value.destination.format",
+ "each.value.destination",
+ "each.value"
+ ]
+ },
+ "prefix": {
+ "references": [
+ "each.value.destination.prefix",
+ "each.value.destination",
+ "each.value"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "enabled": {
+ "references": [
+ "each.value.enabled",
+ "each.value"
+ ]
+ },
+ "included_object_versions": {
+ "references": [
+ "each.value.included_object_versions",
+ "each.value"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.key"
+ ]
+ },
+ "optional_fields": {
+ "references": [
+ "each.value.optional_fields",
+ "each.value"
+ ]
+ },
+ "schedule": [
+ {
+ "frequency": {
+ "references": [
+ "each.value.frequency",
+ "each.value"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "var.inventory_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_lifecycle_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_lifecycle_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.lifecycle_rules"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_versioning.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_logging.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_logging",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "target_bucket": {
+ "references": [
+ "var.logging[\"target_bucket\"]",
+ "var.logging"
+ ]
+ },
+ "target_prefix": {
+ "references": [
+ "var.logging[\"target_prefix\"]",
+ "var.logging"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.logging"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_metric.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_metric",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "name": {
+ "references": [
+ "each.value.name",
+ "each.value"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "for_each_expression": {
+ "references": [
+ "local.metric_configuration",
+ "local.create_bucket"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_object_lock_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_object_lock_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "rule": [
+ {
+ "default_retention": [
+ {
+ "days": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.days",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ },
+ "mode": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.mode",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ },
+ "years": {
+ "references": [
+ "var.object_lock_configuration.rule.default_retention.years",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ }
+ }
+ ]
+ }
+ ],
+ "token": {
+ "references": [
+ "var.object_lock_configuration.token",
+ "var.object_lock_configuration"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.object_lock_enabled",
+ "var.object_lock_configuration.rule.default_retention",
+ "var.object_lock_configuration.rule",
+ "var.object_lock_configuration"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_ownership_controls.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_ownership_controls",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "local.attach_policy",
+ "aws_s3_bucket_policy.this[0].id",
+ "aws_s3_bucket_policy.this[0]",
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "rule": [
+ {
+ "object_ownership": {
+ "references": [
+ "var.object_ownership"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.control_object_ownership"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_policy.this",
+ "aws_s3_bucket_public_access_block.this",
+ "aws_s3_bucket.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_policy.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_policy",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "policy": {
+ "references": [
+ "data.aws_iam_policy_document.combined[0].json",
+ "data.aws_iam_policy_document.combined[0]",
+ "data.aws_iam_policy_document.combined"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.attach_policy"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_public_access_block.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_public_access_block.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_public_access_block",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "block_public_acls": {
+ "references": [
+ "var.block_public_acls"
+ ]
+ },
+ "block_public_policy": {
+ "references": [
+ "var.block_public_policy"
+ ]
+ },
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "ignore_public_acls": {
+ "references": [
+ "var.ignore_public_acls"
+ ]
+ },
+ "restrict_public_buckets": {
+ "references": [
+ "var.restrict_public_buckets"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_public_policy"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_replication_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_replication_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "role": {
+ "references": [
+ "var.replication_configuration[\"role\"]",
+ "var.replication_configuration"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.replication_configuration"
+ ]
+ },
+ "depends_on": [
+ "aws_s3_bucket_versioning.this"
+ ]
+ },
+ {
+ "address": "aws_s3_bucket_request_payment_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_request_payment_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "payer": {
+ "references": [
+ "var.request_payer"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.request_payer"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_server_side_encryption_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_server_side_encryption_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.server_side_encryption_configuration"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_versioning.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_versioning",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ },
+ "mfa": {
+ "references": [
+ "var.versioning[\"mfa\"]",
+ "var.versioning"
+ ]
+ },
+ "versioning_configuration": [
+ {
+ "mfa_delete": {
+ "references": [
+ "var.versioning[\"mfa_delete\"]",
+ "var.versioning",
+ "var.versioning[\"mfa_delete\"]",
+ "var.versioning"
+ ]
+ },
+ "status": {
+ "references": [
+ "var.versioning[\"enabled\"]",
+ "var.versioning",
+ "var.versioning[\"status\"]",
+ "var.versioning",
+ "var.versioning[\"status\"]",
+ "var.versioning"
+ ]
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.versioning"
+ ]
+ }
+ },
+ {
+ "address": "aws_s3_bucket_website_configuration.this",
+ "mode": "managed",
+ "type": "aws_s3_bucket_website_configuration",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "bucket": {
+ "references": [
+ "aws_s3_bucket.this[0].id",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "expected_bucket_owner": {
+ "references": [
+ "var.expected_bucket_owner"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.website"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_caller_identity.current",
+ "mode": "data",
+ "type": "aws_caller_identity",
+ "name": "current",
+ "provider_config_key": "s3-bucket:aws",
+ "schema_version": 0
+ },
+ {
+ "address": "data.aws_canonical_user_id.this",
+ "mode": "data",
+ "type": "aws_canonical_user_id",
+ "name": "this",
+ "provider_config_key": "s3-bucket:aws",
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.create_bucket_acl",
+ "var.owner[\"id\"]",
+ "var.owner"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.access_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "access_log_delivery",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logging.s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSAccessLogDeliveryWrite"
+ }
+ },
+ {
+ "actions": {
+ "constant_value": [
+ "s3:GetBucketAcl"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logging.s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSAccessLogDeliveryAclCheck"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_access_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.combined",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "combined",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "source_policy_documents": {
+ "references": [
+ "var.attach_elb_log_delivery_policy",
+ "data.aws_iam_policy_document.elb_log_delivery[0].json",
+ "data.aws_iam_policy_document.elb_log_delivery[0]",
+ "data.aws_iam_policy_document.elb_log_delivery",
+ "var.attach_lb_log_delivery_policy",
+ "data.aws_iam_policy_document.lb_log_delivery[0].json",
+ "data.aws_iam_policy_document.lb_log_delivery[0]",
+ "data.aws_iam_policy_document.lb_log_delivery",
+ "var.attach_access_log_delivery_policy",
+ "data.aws_iam_policy_document.access_log_delivery[0].json",
+ "data.aws_iam_policy_document.access_log_delivery[0]",
+ "data.aws_iam_policy_document.access_log_delivery",
+ "var.attach_require_latest_tls_policy",
+ "data.aws_iam_policy_document.require_latest_tls[0].json",
+ "data.aws_iam_policy_document.require_latest_tls[0]",
+ "data.aws_iam_policy_document.require_latest_tls",
+ "var.attach_deny_insecure_transport_policy",
+ "data.aws_iam_policy_document.deny_insecure_transport[0].json",
+ "data.aws_iam_policy_document.deny_insecure_transport[0]",
+ "data.aws_iam_policy_document.deny_insecure_transport",
+ "var.attach_deny_unencrypted_object_uploads",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads[0].json",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads[0]",
+ "data.aws_iam_policy_document.deny_unencrypted_object_uploads",
+ "var.attach_deny_incorrect_kms_key_sse",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse[0].json",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse[0]",
+ "data.aws_iam_policy_document.deny_incorrect_kms_key_sse",
+ "var.attach_deny_incorrect_encryption_headers",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers[0].json",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers[0]",
+ "data.aws_iam_policy_document.deny_incorrect_encryption_headers",
+ "var.attach_inventory_destination_policy",
+ "var.attach_analytics_destination_policy",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy[0].json",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy[0]",
+ "data.aws_iam_policy_document.inventory_and_analytics_destination_policy",
+ "var.attach_policy",
+ "var.policy"
+ ]
+ }
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "local.attach_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_incorrect_encryption_headers",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_incorrect_encryption_headers",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringNotEquals"
+ },
+ "values": {
+ "references": [
+ "var.server_side_encryption_configuration.rule.apply_server_side_encryption_by_default.sse_algorithm",
+ "var.server_side_encryption_configuration.rule.apply_server_side_encryption_by_default",
+ "var.server_side_encryption_configuration.rule",
+ "var.server_side_encryption_configuration"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyIncorrectEncryptionHeaders"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_incorrect_encryption_headers"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_incorrect_kms_key_sse",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_incorrect_kms_key_sse",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringNotEquals"
+ },
+ "values": {
+ "references": [
+ "var.allowed_kms_key_arn"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption-aws-kms-key-id"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyIncorrectKmsKeySse"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_incorrect_kms_key_sse"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_insecure_transport",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_insecure_transport",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:*"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "Bool"
+ },
+ "values": {
+ "constant_value": [
+ "false"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SecureTransport"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyInsecureTransport"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_insecure_transport_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.deny_unencrypted_object_uploads",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "deny_unencrypted_object_uploads",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "Null"
+ },
+ "values": {
+ "constant_value": [
+ true
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-server-side-encryption"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyUnencryptedObjectUploads"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_deny_unencrypted_object_uploads"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.elb_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "elb_log_delivery",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "logdelivery.elasticloadbalancing.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": ""
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_elb_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.inventory_and_analytics_destination_policy",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "inventory_and_analytics_destination_policy",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "ArnLike"
+ },
+ "values": {
+ "references": [
+ "var.inventory_self_source_destination",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "var.inventory_source_bucket_arn",
+ "var.analytics_self_source_destination",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "var.analytics_source_bucket_arn"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SourceArn"
+ }
+ },
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "references": [
+ "var.inventory_self_source_destination",
+ "data.aws_caller_identity.current.id",
+ "data.aws_caller_identity.current",
+ "var.inventory_source_account_id",
+ "var.analytics_self_source_destination",
+ "data.aws_caller_identity.current.id",
+ "data.aws_caller_identity.current",
+ "var.analytics_source_account_id"
+ ]
+ },
+ "variable": {
+ "constant_value": "aws:SourceAccount"
+ }
+ },
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "constant_value": [
+ "bucket-owner-full-control"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-acl"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "s3.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "destinationInventoryAndAnalyticsPolicy"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_inventory_destination_policy",
+ "var.attach_analytics_destination_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.lb_log_delivery",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "lb_log_delivery",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:PutObject"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "StringEquals"
+ },
+ "values": {
+ "constant_value": [
+ "bucket-owner-full-control"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:x-amz-acl"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "delivery.logs.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSLogDeliveryWrite"
+ }
+ },
+ {
+ "actions": {
+ "constant_value": [
+ "s3:GetBucketAcl",
+ "s3:ListBucket"
+ ]
+ },
+ "effect": {
+ "constant_value": "Allow"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "delivery.logs.amazonaws.com"
+ ]
+ },
+ "type": {
+ "constant_value": "Service"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "AWSLogDeliveryAclCheck"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_lb_log_delivery_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_iam_policy_document.require_latest_tls",
+ "mode": "data",
+ "type": "aws_iam_policy_document",
+ "name": "require_latest_tls",
+ "provider_config_key": "s3-bucket:aws",
+ "expressions": {
+ "statement": [
+ {
+ "actions": {
+ "constant_value": [
+ "s3:*"
+ ]
+ },
+ "condition": [
+ {
+ "test": {
+ "constant_value": "NumericLessThan"
+ },
+ "values": {
+ "constant_value": [
+ "1.2"
+ ]
+ },
+ "variable": {
+ "constant_value": "s3:TlsVersion"
+ }
+ }
+ ],
+ "effect": {
+ "constant_value": "Deny"
+ },
+ "principals": [
+ {
+ "identifiers": {
+ "constant_value": [
+ "*"
+ ]
+ },
+ "type": {
+ "constant_value": "*"
+ }
+ }
+ ],
+ "resources": {
+ "references": [
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this",
+ "aws_s3_bucket.this[0].arn",
+ "aws_s3_bucket.this[0]",
+ "aws_s3_bucket.this"
+ ]
+ },
+ "sid": {
+ "constant_value": "denyOutdatedTLS"
+ }
+ }
+ ]
+ },
+ "schema_version": 0,
+ "count_expression": {
+ "references": [
+ "local.create_bucket",
+ "var.attach_require_latest_tls_policy"
+ ]
+ }
+ },
+ {
+ "address": "data.aws_partition.current",
+ "mode": "data",
+ "type": "aws_partition",
+ "name": "current",
+ "provider_config_key": "s3-bucket:aws",
+ "schema_version": 0
+ },
+ {
+ "address": "data.aws_region.current",
+ "mode": "data",
+ "type": "aws_region",
+ "name": "current",
+ "provider_config_key": "s3-bucket:aws",
+ "schema_version": 0
+ }
+ ],
+ "variables": {
+ "acceleration_status": {
+ "default": null,
+ "description": "(Optional) Sets the accelerate configuration of an existing bucket. Can be Enabled or Suspended."
+ },
+ "access_log_delivery_policy_source_accounts": {
+ "default": [],
+ "description": "(Optional) List of AWS Account IDs should be allowed to deliver access logs to this bucket."
+ },
+ "access_log_delivery_policy_source_buckets": {
+ "default": [],
+ "description": "(Optional) List of S3 bucket ARNs wich should be allowed to deliver access logs to this bucket."
+ },
+ "acl": {
+ "default": null,
+ "description": "(Optional) The canned ACL to apply. Conflicts with `grant`"
+ },
+ "allowed_kms_key_arn": {
+ "default": null,
+ "description": "The ARN of KMS key which should be allowed in PutObject"
+ },
+ "analytics_configuration": {
+ "default": {},
+ "description": "Map containing bucket analytics configuration."
+ },
+ "analytics_self_source_destination": {
+ "default": false,
+ "description": "Whether or not the analytics source bucket is also the destination bucket."
+ },
+ "analytics_source_account_id": {
+ "default": null,
+ "description": "The analytics source account id."
+ },
+ "analytics_source_bucket_arn": {
+ "default": null,
+ "description": "The analytics source bucket ARN."
+ },
+ "attach_access_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have S3 access log delivery policy attached"
+ },
+ "attach_analytics_destination_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket analytics destination policy attached."
+ },
+ "attach_deny_incorrect_encryption_headers": {
+ "default": false,
+ "description": "Controls if S3 bucket should deny incorrect encryption headers policy attached."
+ },
+ "attach_deny_incorrect_kms_key_sse": {
+ "default": false,
+ "description": "Controls if S3 bucket policy should deny usage of incorrect KMS key SSE."
+ },
+ "attach_deny_insecure_transport_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have deny non-SSL transport policy attached"
+ },
+ "attach_deny_unencrypted_object_uploads": {
+ "default": false,
+ "description": "Controls if S3 bucket should deny unencrypted object uploads policy attached."
+ },
+ "attach_elb_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have ELB log delivery policy attached"
+ },
+ "attach_inventory_destination_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket inventory destination policy attached."
+ },
+ "attach_lb_log_delivery_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have ALB/NLB log delivery policy attached"
+ },
+ "attach_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should have bucket policy attached (set to `true` to use value of `policy` as bucket policy)"
+ },
+ "attach_public_policy": {
+ "default": true,
+ "description": "Controls if a user defined public bucket policy will be attached (set to `false` to allow upstream to apply defaults to the bucket)"
+ },
+ "attach_require_latest_tls_policy": {
+ "default": false,
+ "description": "Controls if S3 bucket should require the latest version of TLS"
+ },
+ "block_public_acls": {
+ "default": true,
+ "description": "Whether Amazon S3 should block public ACLs for this bucket."
+ },
+ "block_public_policy": {
+ "default": true,
+ "description": "Whether Amazon S3 should block public bucket policies for this bucket."
+ },
+ "bucket": {
+ "default": null,
+ "description": "(Optional, Forces new resource) The name of the bucket. If omitted, Terraform will assign a random, unique name."
+ },
+ "bucket_prefix": {
+ "default": null,
+ "description": "(Optional, Forces new resource) Creates a unique bucket name beginning with the specified prefix. Conflicts with bucket."
+ },
+ "control_object_ownership": {
+ "default": false,
+ "description": "Whether to manage S3 Bucket Ownership Controls on this bucket."
+ },
+ "cors_rule": {
+ "default": [],
+ "description": "List of maps containing rules for Cross-Origin Resource Sharing."
+ },
+ "create_bucket": {
+ "default": true,
+ "description": "Controls if S3 bucket should be created"
+ },
+ "expected_bucket_owner": {
+ "default": null,
+ "description": "The account ID of the expected bucket owner"
+ },
+ "force_destroy": {
+ "default": false,
+ "description": "(Optional, Default:false ) A boolean that indicates all objects should be deleted from the bucket so that the bucket can be destroyed without error. These objects are not recoverable."
+ },
+ "grant": {
+ "default": [],
+ "description": "An ACL policy grant. Conflicts with `acl`"
+ },
+ "ignore_public_acls": {
+ "default": true,
+ "description": "Whether Amazon S3 should ignore public ACLs for this bucket."
+ },
+ "intelligent_tiering": {
+ "default": {},
+ "description": "Map containing intelligent tiering configuration."
+ },
+ "inventory_configuration": {
+ "default": {},
+ "description": "Map containing S3 inventory configuration."
+ },
+ "inventory_self_source_destination": {
+ "default": false,
+ "description": "Whether or not the inventory source bucket is also the destination bucket."
+ },
+ "inventory_source_account_id": {
+ "default": null,
+ "description": "The inventory source account id."
+ },
+ "inventory_source_bucket_arn": {
+ "default": null,
+ "description": "The inventory source bucket ARN."
+ },
+ "lifecycle_rule": {
+ "default": [],
+ "description": "List of maps containing configuration of object lifecycle management."
+ },
+ "logging": {
+ "default": {},
+ "description": "Map containing access bucket logging configuration."
+ },
+ "metric_configuration": {
+ "default": [],
+ "description": "Map containing bucket metric configuration."
+ },
+ "object_lock_configuration": {
+ "default": {},
+ "description": "Map containing S3 object locking configuration."
+ },
+ "object_lock_enabled": {
+ "default": false,
+ "description": "Whether S3 bucket should have an Object Lock configuration enabled."
+ },
+ "object_ownership": {
+ "default": "BucketOwnerEnforced",
+ "description": "Object ownership. Valid values: BucketOwnerEnforced, BucketOwnerPreferred or ObjectWriter. 'BucketOwnerEnforced': ACLs are disabled, and the bucket owner automatically owns and has full control over every object in the bucket. 'BucketOwnerPreferred': Objects uploaded to the bucket change ownership to the bucket owner if the objects are uploaded with the bucket-owner-full-control canned ACL. 'ObjectWriter': The uploading account will own the object if the object is uploaded with the bucket-owner-full-control canned ACL."
+ },
+ "owner": {
+ "default": {},
+ "description": "Bucket owner's display name and ID. Conflicts with `acl`"
+ },
+ "policy": {
+ "default": null,
+ "description": "(Optional) A valid bucket policy JSON document. Note that if the policy document is not specific enough (but still valid), Terraform may view the policy as constantly changing in a terraform plan. In this case, please make sure you use the verbose/specific version of the policy. For more information about building AWS IAM policy documents with Terraform, see the AWS IAM Policy Document Guide."
+ },
+ "putin_khuylo": {
+ "default": true,
+ "description": "Do you agree that Putin doesn't respect Ukrainian sovereignty and territorial integrity? More info: https://en.wikipedia.org/wiki/Putin_khuylo!"
+ },
+ "replication_configuration": {
+ "default": {},
+ "description": "Map containing cross-region replication configuration."
+ },
+ "request_payer": {
+ "default": null,
+ "description": "(Optional) Specifies who should bear the cost of Amazon S3 data transfer. Can be either BucketOwner or Requester. By default, the owner of the S3 bucket would incur the costs of any data transfer. See Requester Pays Buckets developer guide for more information."
+ },
+ "restrict_public_buckets": {
+ "default": true,
+ "description": "Whether Amazon S3 should restrict public bucket policies for this bucket."
+ },
+ "server_side_encryption_configuration": {
+ "default": {},
+ "description": "Map containing server-side encryption configuration."
+ },
+ "tags": {
+ "default": {},
+ "description": "(Optional) A mapping of tags to assign to the bucket."
+ },
+ "versioning": {
+ "default": {},
+ "description": "Map containing versioning configuration."
+ },
+ "website": {
+ "default": {},
+ "description": "Map containing static web-site hosting or redirect configuration."
+ }
+ }
+ },
+ "version_constraint": "4.0.1"
+ }
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/tests/terraform/parser/test_plan_parser.py b/tests/terraform/parser/test_plan_parser.py
--- a/tests/terraform/parser/test_plan_parser.py
+++ b/tests/terraform/parser/test_plan_parser.py
@@ -60,6 +60,14 @@ def test_provisioners(self):
resource_attributes = next(iter(resource_definition.values()))
self.assertTrue(resource_attributes['provisioner'])
+ def test_module_with_connected_resources(self):
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+ valid_plan_path = current_dir + "/resources/plan_module_with_connected_resources/tfplan.json"
+ tf_definition, _ = parse_tf_plan(valid_plan_path, {})
+ file_resource_definition = tf_definition['resource'][1]
+ resource_definition = next(iter(file_resource_definition.values()))
+ resource_attributes = next(iter(resource_definition.values()))
+ self.assertTrue(resource_attributes['references_'])
def test_large_file(mocker: MockerFixture):
# given
| False positives when using Terraform AWS modules
**Describe the issue**
Checks are failing when using terraform defined aws modules even when resolution is in place. This is creating a lot of unnecessary noise.
I am using terraform s3 module and I have explicitly defined a lifecycle configuration. However, following checks are still failing in checkov -
`CKV2_AWS_61: "Ensure that an S3 bucket has a lifecycle configuration"`
`CKV2_AWS_6: "Ensure that S3 bucket has a Public Access block"`
Both of these checks should obviously pass. I experienced this issue with both `terraform` and `terraform_plan` framework. Hence, I suspect a bug in the graph builder.
**Examples**
```
module "s3-bucket_example_complete" {
source = "terraform-aws-modules/s3-bucket/aws"
version = "3.0.0"
lifecycle_rule = [
{
id = "log1"
enabled = true
abort_incomplete_multipart_upload_days = 7
noncurrent_version_transition = [
{
days = 90
storage_class = "GLACIER"
}
]
noncurrent_version_expiration = {
days = 300
}
}
]
}
```
**Desktop (please complete the following information):**
- OS: macOS
- Checkov Version: 3.2.48
| 2024-04-04T20:35:18 | -1.0 |
|
bridgecrewio/checkov | 6,188 | bridgecrewio__checkov-6188 | [
"6187"
] | 7f56591897ec931d68c1b64dae1d5a8bfde84007 | diff --git a/checkov/terraform/checks/resource/azure/ACRDedicatedDataEndpointEnabled.py b/checkov/terraform/checks/resource/azure/ACRDedicatedDataEndpointEnabled.py
new file mode 100644
--- /dev/null
+++ b/checkov/terraform/checks/resource/azure/ACRDedicatedDataEndpointEnabled.py
@@ -0,0 +1,17 @@
+from checkov.common.models.enums import CheckCategories
+from checkov.terraform.checks.resource.base_resource_value_check import BaseResourceValueCheck
+
+
+class ACRDedicatedDataEndpointEnabled(BaseResourceValueCheck):
+ def __init__(self) -> None:
+ name = "Ensure dedicated data endpoints are enabled."
+ id = "CKV_AZURE_237"
+ supported_resources = ("azurerm_container_registry",)
+ categories = (CheckCategories.GENERAL_SECURITY,)
+ super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
+
+ def get_inspected_key(self) -> str:
+ return "data_endpoint_enabled"
+
+
+check = ACRDedicatedDataEndpointEnabled()
| diff --git a/tests/terraform/checks/resource/azure/example_ACRDedicatedDataEndpointEnabled/main.tf b/tests/terraform/checks/resource/azure/example_ACRDedicatedDataEndpointEnabled/main.tf
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/example_ACRDedicatedDataEndpointEnabled/main.tf
@@ -0,0 +1,26 @@
+
+resource "azurerm_container_registry" "pass" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.rg.name
+ location = azurerm_resource_group.rg.location
+ sku = "Premium"
+ anonymous_pull_enabled = false
+ data_endpoint_enabled = true
+}
+
+
+resource "azurerm_container_registry" "fail" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.rg.name
+ location = azurerm_resource_group.rg.location
+ sku = "Premium"
+}
+
+
+resource "azurerm_container_registry" "fail2" {
+ name = "containerRegistry1"
+ resource_group_name = azurerm_resource_group.rg.name
+ location = azurerm_resource_group.rg.location
+ sku = "Standard"
+ data_endpoint_enabled = false
+}
diff --git a/tests/terraform/checks/resource/azure/test_ACRDedicatedDataEndpointEnabled.py b/tests/terraform/checks/resource/azure/test_ACRDedicatedDataEndpointEnabled.py
new file mode 100644
--- /dev/null
+++ b/tests/terraform/checks/resource/azure/test_ACRDedicatedDataEndpointEnabled.py
@@ -0,0 +1,42 @@
+import os
+import unittest
+
+from checkov.runner_filter import RunnerFilter
+from checkov.terraform.runner import Runner
+from checkov.terraform.checks.resource.azure.ACRDedicatedDataEndpointEnabled import check
+
+
+class TestACRDedicatedDataEndpointEnabled(unittest.TestCase):
+
+ def test(self):
+ runner = Runner()
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+
+ test_files_dir = os.path.join(current_dir, "example_ACRDedicatedDataEndpointEnabled")
+ report = runner.run(root_folder=test_files_dir,
+ runner_filter=RunnerFilter(checks=[check.id]))
+ summary = report.get_summary()
+
+ passing_resources = {
+ 'azurerm_container_registry.pass',
+ }
+ failing_resources = {
+ 'azurerm_container_registry.fail',
+ 'azurerm_container_registry.fail2'
+ }
+ skipped_resources = {}
+
+ passed_check_resources = set([c.resource for c in report.passed_checks])
+ failed_check_resources = set([c.resource for c in report.failed_checks])
+
+ self.assertEqual(summary['passed'], len(passing_resources))
+ self.assertEqual(summary['failed'], len(failing_resources))
+ self.assertEqual(summary['skipped'], len(skipped_resources))
+ self.assertEqual(summary['parsing_errors'], 0)
+
+ self.assertEqual(passing_resources, passed_check_resources)
+ self.assertEqual(failing_resources, failed_check_resources)
+
+
+if __name__ == '__main__':
+ unittest.main()
\ No newline at end of file
| Azure Container Registry - Dedicated data endpoint should be enabled
**Describe the issue**
It seems that there are no Checkov test to ensure that dedicated data endpoint are enabled.
**Examples**
````hcl
resource "azurerm_container_registry" "acr" {
name = "containerRegistry1"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
sku = "Premium"
data_endpoint_enabled = true
}
````
**Version (please complete the following information):**
- Current
**Additional context**
Azure Container Registry introduces dedicated data endpoints. The feature enables tightly scoped client firewall rules to specific registries, minimizing data exfiltration concerns.
Dedicated data endpoints, help retrieve layers from the Azure Container Registry service, with fully qualified domain names representing the registry domain.
Source: https://learn.microsoft.com/en-us/azure/container-registry/container-registry-dedicated-data-endpoints
| 2024-04-18T14:50:42 | -1.0 |
|
common-workflow-language/cwltool | 1,964 | common-workflow-language__cwltool-1964 | [
"1963"
] | 8ff1fd05a12a124d104a67b7fd8ac3382c3e5212 | diff --git a/cwltool/main.py b/cwltool/main.py
--- a/cwltool/main.py
+++ b/cwltool/main.py
@@ -682,8 +682,8 @@ def formatTime(self, record: logging.LogRecord, datefmt: Optional[str] = None) -
def setup_provenance(
args: argparse.Namespace,
- argsl: List[str],
runtimeContext: RuntimeContext,
+ argsl: Optional[List[str]] = None,
) -> Tuple[ProvOut, "logging.StreamHandler[ProvOut]"]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
@@ -1049,10 +1049,8 @@ def main(
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
- if argsl is None:
- raise Exception("argsl cannot be None")
try:
- prov_log_stream, prov_log_handler = setup_provenance(args, argsl, runtimeContext)
+ prov_log_stream, prov_log_handler = setup_provenance(args, runtimeContext, argsl)
except ArgumentException:
return 1
| diff --git a/tests/test_main_parsed_args.py b/tests/test_main_parsed_args.py
new file mode 100644
--- /dev/null
+++ b/tests/test_main_parsed_args.py
@@ -0,0 +1,40 @@
+import io
+from pathlib import Path
+
+from cwltool.argparser import arg_parser
+from cwltool.main import main
+
+from .util import get_data
+
+
+def test_main_parsed_args(tmp_path: Path) -> None:
+ """Affirm that main can be called with parsed args only."""
+ stdout = io.StringIO()
+ stderr = io.StringIO()
+
+ unparsed_args = [get_data("tests/echo.cwl"), "--inp", "Hello"]
+ parsed_args = arg_parser().parse_args(unparsed_args)
+
+ try:
+ assert main(args=parsed_args, stdout=stdout, stderr=stderr) == 0
+ except SystemExit as err:
+ assert err.code == 0
+
+
+def test_main_parsed_args_provenance(tmp_path: Path) -> None:
+ """Affirm that main can be called with parsed args only, requesting provenance."""
+ stdout = io.StringIO()
+ stderr = io.StringIO()
+
+ prov_folder = tmp_path / "provenance" # will be created if necessary
+
+ unparsed_args = ["--provenance", str(prov_folder), get_data("tests/echo.cwl"), "--inp", "Hello"]
+ parsed_args = arg_parser().parse_args(unparsed_args)
+
+ try:
+ assert main(args=parsed_args, stdout=stdout, stderr=stderr) == 0
+ except SystemExit as err:
+ assert err.code == 0
+
+ manifest_file = prov_folder / "metadata" / "manifest.json"
+ assert manifest_file.is_file(), f"Can't find RO-Crate manifest {manifest_file}"
| Calling cwltool.main() with preparsed args and provenance fails unless unparsed args are also provided
## Expected Behavior
Calling `cwltool.main()` with preparsed args only, but including `--provenance`, should execute and create a RO-Crate, the same as when running as a script.
## Actual Behavior
The execution fails, raising `Exception("argsl cannot be None")`
## Workflow Code
Program `prov-no-argsl.py`:
```python
import sys
from cwltool.main import main as cwlmain
from cwltool.argparser import arg_parser
unparsed_args = ["--provenance", "ro-crate", "hello_world.cwl", "--message=Hello"]
parsed_args = arg_parser().parse_args(unparsed_args)
cwlmain(args=parsed_args)
```
Assuming [`hello_world.cwl`](https://github.com/common-workflow-language/user_guide/blob/main/src/_includes/cwl/hello_world.cwl) is in the working directory
```yaml
cwlVersion: v1.2
class: CommandLineTool
baseCommand: echo
inputs:
message:
type: string
default: Hello World
inputBinding:
position: 1
outputs: []
```
## Full Traceback
```pytb
INFO prov-no-argsl.py 3.1.20230719185429
Traceback (most recent call last):
File "/private/tmp/cwl-prov/prov-no-argsl.py", line 9, in <module>
cwlmain(args=parsed_args)
File "/opt/homebrew/lib/python3.11/site-packages/cwltool/main.py", line 1052, in main
raise Exception("argsl cannot be None")
Exception: argsl cannot be None
```
## Your Environment
* cwltool version:
/opt/homebrew/bin/cwltool 3.1.20230719185429
## Remarks
Note that if the unparsed arguments are also provided, then execution proceeds as expected
Program `prov-with-argsl.py`:
```python
import sys
from cwltool.argparser import arg_parser
from cwltool.main import main as cwlmain
unparsed_args = ["--provenance", "ro-crate", "hello_world.cwl", "--message=Hello"]
parsed_args = arg_parser().parse_args(unparsed_args)
cwlmain(args=parsed_args, argsl=unparsed_args)
```
```
INFO prov-with-argsl.py 3.1.20230719185429
INFO [cwltool] prov-with-argsl.py --provenance ro-crate hello_world.cwl --message=Hello
INFO Resolved 'hello_world.cwl' to 'file:///private/tmp/cwl-prov/hello_world.cwl'
INFO [job hello_world.cwl] /private/tmp/docker_tmpg0yk4mky$ echo \
Hello
Hello
INFO [job hello_world.cwl] completed success
/opt/homebrew/lib/python3.11/site-packages/rdflib/plugins/serializers/nt.py:40: UserWarning: NTSerializer always uses UTF-8 encoding. Given encoding was: None
warnings.warn(
{}INFO Final process status is success
INFO [provenance] Finalizing Research Object
INFO [provenance] Deleting existing /private/tmp/cwl-prov/ro-crate
INFO [provenance] Research Object saved to /private/tmp/cwl-prov/ro-crate
```
Note further that it is only when requesting provenance that this error occurs.
Program `no-prov.py`:
```python
import sys
from cwltool.main import main as cwlmain
from cwltool.argparser import arg_parser
unparsed_args = ["hello_world.cwl", "--message=Hello"]
parsed_args = arg_parser().parse_args(unparsed_args)
cwlmain(args=parsed_args)
```
```
INFO no-prov.py 3.1.20230719185429
INFO Resolved 'hello_world.cwl' to 'file:///private/tmp/cwl-prov/hello_world.cwl'
INFO [job hello_world.cwl] /private/tmp/docker_tmpt2b9sgm2$ echo \
Hello
Hello
INFO [job hello_world.cwl] completed success
{}INFO Final process status is success
```
| This blocks requesting provenance when using [Calrissian](https://github.com/Duke-GCB/calrissian), which calls `cwltool.main` directly with preparsed arguments and no unparsed arguments.
This issue has been mentioned on **Common Workflow Language Discourse**. There might be relevant details there:
https://cwl.discourse.group/t/cwltool-main-text-arguments-a-hard-requirement-for-provenance/862/2
Dear @davidjsherman , thank you for the issue and your interest in supporting CWLProv on Calrissian!
I think this is a typing error and I would accept a PR to fix it by removing the exception and adjusting the types to be `Optional`. | 2024-01-06T13:47:31 | -1.0 |
marshmallow-code/webargs | 541 | marshmallow-code__webargs-541 | [
"527"
] | e62f478ae39efa55f363b389f5c69b583bf420f8 | diff --git a/src/webargs/core.py b/src/webargs/core.py
--- a/src/webargs/core.py
+++ b/src/webargs/core.py
@@ -283,10 +283,9 @@ def parse(
error_status_code=error_status_code,
error_headers=error_headers,
)
- warnings.warn(
- "_on_validation_error hook did not raise an exception and flow "
- "of control returned to parse(). You may get unexpected results"
- )
+ raise ValueError(
+ "_on_validation_error hook did not raise an exception"
+ ) from error
return data
def get_default_request(self):
| diff --git a/tests/test_core.py b/tests/test_core.py
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -289,14 +289,19 @@ def test_value_error_raised_if_parse_called_with_invalid_location(parser, web_re
@mock.patch("webargs.core.Parser.handle_error")
def test_handle_error_called_when_parsing_raises_error(handle_error, web_request):
+ # handle_error must raise an error to be valid
+ handle_error.side_effect = ValidationError("parsing failed")
+
def always_fail(*args, **kwargs):
raise ValidationError("error occurred")
p = Parser()
assert handle_error.call_count == 0
- p.parse({"foo": fields.Field()}, web_request, validate=always_fail)
+ with pytest.raises(ValidationError):
+ p.parse({"foo": fields.Field()}, web_request, validate=always_fail)
assert handle_error.call_count == 1
- p.parse({"foo": fields.Field()}, web_request, validate=always_fail)
+ with pytest.raises(ValidationError):
+ p.parse({"foo": fields.Field()}, web_request, validate=always_fail)
assert handle_error.call_count == 2
@@ -360,6 +365,25 @@ def handle_error(error, req, schema, *, error_status_code, error_headers):
parser.parse(mock_schema, web_request)
+def test_custom_error_handler_must_reraise(web_request):
+ class CustomError(Exception):
+ pass
+
+ mock_schema = mock.Mock(spec=Schema)
+ mock_schema.strict = True
+ mock_schema.load.side_effect = ValidationError("parsing json failed")
+ parser = Parser()
+
+ @parser.error_handler
+ def handle_error(error, req, schema, *, error_status_code, error_headers):
+ pass
+
+ # because the handler above does not raise a new error, the parser should
+ # raise a ValueError -- indicating a programming error
+ with pytest.raises(ValueError):
+ parser.parse(mock_schema, web_request)
+
+
def test_custom_location_loader(web_request):
web_request.data = {"foo": 42}
diff --git a/tests/test_flaskparser.py b/tests/test_flaskparser.py
--- a/tests/test_flaskparser.py
+++ b/tests/test_flaskparser.py
@@ -1,4 +1,4 @@
-from werkzeug.exceptions import HTTPException
+from werkzeug.exceptions import HTTPException, BadRequest
import pytest
from flask import Flask
@@ -85,6 +85,9 @@ def test_parsing_unexpected_headers_when_raising(self, testapp):
@mock.patch("webargs.flaskparser.abort")
def test_abort_called_on_validation_error(mock_abort):
+ # error handling must raise an error to be valid
+ mock_abort.side_effect = BadRequest("foo")
+
app = Flask("testapp")
def validate(x):
@@ -97,7 +100,8 @@ def validate(x):
data=json.dumps({"value": 41}),
content_type="application/json",
):
- parser.parse(argmap)
+ with pytest.raises(HTTPException):
+ parser.parse(argmap)
mock_abort.assert_called()
abort_args, abort_kwargs = mock_abort.call_args
assert abort_args[0] == 422
| Failing to (re)raise an error when handling validation errors should raise a new error
Per #525 , we're going to start warning if you setup an error handler which does not, itself, raise an error. The result of failing to raise in your handler is that parsing "falls through" and returns incorrect data (`None` today, but the behavior is not well-defined) to the caller.
In version 7.0 of webargs, we should change this to raise an error, because it's not correct usage of webargs.
I think the most developer-friendly thing for us to do in this case is to raise `ValueError` or something similar (i.e. not a `ValidationError`, which they might be catching and handling outside of the parsing context). That will make it immediately apparent to the developer that they've done something wrong.
We can't make any guesses about whether or not people are testing their error handlers well, so no matter what we do here, there's a risk that people's applications crashfail in these scenarios.
| 2020-09-08T18:42:44 | -1.0 |
|
marshmallow-code/webargs | 555 | marshmallow-code__webargs-555 | [
"253"
] | 60a4a27143b4844294eb80fa3e8e29653d8f5a5f | diff --git a/src/webargs/falconparser.py b/src/webargs/falconparser.py
--- a/src/webargs/falconparser.py
+++ b/src/webargs/falconparser.py
@@ -3,6 +3,8 @@
import falcon
from falcon.util.uri import parse_query_string
+import marshmallow as ma
+
from webargs import core
from webargs.multidictproxy import MultiDictProxy
@@ -69,7 +71,21 @@ def to_dict(self, *args, **kwargs):
class FalconParser(core.Parser):
- """Falcon request argument parser."""
+ """Falcon request argument parser.
+
+ Defaults to using the `media` location. See :py:meth:`~FalconParser.load_media` for
+ details on the media location."""
+
+ # by default, Falcon will use the 'media' location to load data
+ #
+ # this effectively looks the same as loading JSON data by default, but if
+ # you add a handler for a different media type to Falcon, webargs will
+ # automatically pick up on that capability
+ DEFAULT_LOCATION = "media"
+ DEFAULT_UNKNOWN_BY_LOCATION = dict(
+ media=ma.RAISE, **core.Parser.DEFAULT_UNKNOWN_BY_LOCATION
+ )
+ __location_map__ = dict(media="load_media", **core.Parser.__location_map__)
# Note on the use of MultiDictProxy throughout:
# Falcon parses query strings and form values into ordinary dicts, but with
@@ -95,6 +111,25 @@ def load_form(self, req, schema):
return form
return MultiDictProxy(form, schema)
+ def load_media(self, req, schema):
+ """Return data unpacked and parsed by one of Falcon's media handlers.
+ By default, Falcon only handles JSON payloads.
+
+ To configure additional media handlers, see the
+ `Falcon documentation on media types`__.
+
+ .. _FalconMedia: https://falcon.readthedocs.io/en/stable/api/media.html
+ __ FalconMedia_
+
+ .. note::
+
+ The request stream will be read and left at EOF.
+ """
+ # if there is no body, return missing instead of erroring
+ if req.content_length in (None, 0):
+ return core.missing
+ return req.media
+
def _raw_load_json(self, req):
"""Return a json payload from the request for the core parser's load_json
| diff --git a/src/webargs/testing.py b/src/webargs/testing.py
--- a/src/webargs/testing.py
+++ b/src/webargs/testing.py
@@ -62,9 +62,6 @@ def test_parse_json_or_form(self, testapp):
def test_parse_querystring_default(self, testapp):
assert testapp.get("/echo").json == {"name": "World"}
- def test_parse_json_default(self, testapp):
- assert testapp.post_json("/echo_json", {}).json == {"name": "World"}
-
def test_parse_json_with_charset(self, testapp):
res = testapp.post(
"/echo_json",
diff --git a/tests/apps/falcon_app.py b/tests/apps/falcon_app.py
--- a/tests/apps/falcon_app.py
+++ b/tests/apps/falcon_app.py
@@ -37,6 +37,12 @@ def on_post(self, req, resp):
resp.body = json.dumps(parsed)
+class EchoMedia:
+ def on_post(self, req, resp):
+ parsed = parser.parse(hello_args, req, location="media")
+ resp.body = json.dumps(parsed)
+
+
class EchoJSONOrForm:
def on_post(self, req, resp):
parsed = parser.parse(hello_args, req, location="json_or_form")
@@ -161,6 +167,7 @@ def create_app():
app.add_route("/echo", Echo())
app.add_route("/echo_form", EchoForm())
app.add_route("/echo_json", EchoJSON())
+ app.add_route("/echo_media", EchoMedia())
app.add_route("/echo_json_or_form", EchoJSONOrForm())
app.add_route("/echo_use_args", EchoUseArgs())
app.add_route("/echo_use_kwargs", EchoUseKwargs())
diff --git a/tests/test_falconparser.py b/tests/test_falconparser.py
--- a/tests/test_falconparser.py
+++ b/tests/test_falconparser.py
@@ -16,28 +16,47 @@ def test_parse_files(self, testapp):
def test_use_args_hook(self, testapp):
assert testapp.get("/echo_use_args_hook?name=Fred").json == {"name": "Fred"}
+ def test_parse_media(self, testapp):
+ assert testapp.post_json("/echo_media", {"name": "Fred"}).json == {
+ "name": "Fred"
+ }
+
+ def test_parse_media_missing(self, testapp):
+ assert testapp.post("/echo_media", "").json == {"name": "World"}
+
+ def test_parse_media_empty(self, testapp):
+ assert testapp.post_json("/echo_media", {}).json == {"name": "World"}
+
+ def test_parse_media_error_unexpected_int(self, testapp):
+ res = testapp.post_json("/echo_media", 1, expect_errors=True)
+ assert res.status_code == 422
+
# https://github.com/marshmallow-code/webargs/issues/427
- def test_parse_json_with_nonutf8_chars(self, testapp):
+ @pytest.mark.parametrize("path", ["/echo_json", "/echo_media"])
+ def test_parse_json_with_nonutf8_chars(self, testapp, path):
res = testapp.post(
- "/echo_json",
+ path,
b"\xfe",
headers={"Accept": "application/json", "Content-Type": "application/json"},
expect_errors=True,
)
assert res.status_code == 400
- assert res.json["errors"] == {"json": ["Invalid JSON body."]}
+ if path.endswith("json"):
+ assert res.json["errors"] == {"json": ["Invalid JSON body."]}
# https://github.com/sloria/webargs/issues/329
- def test_invalid_json(self, testapp):
+ @pytest.mark.parametrize("path", ["/echo_json", "/echo_media"])
+ def test_invalid_json(self, testapp, path):
res = testapp.post(
- "/echo_json",
+ path,
'{"foo": "bar", }',
headers={"Accept": "application/json", "Content-Type": "application/json"},
expect_errors=True,
)
assert res.status_code == 400
- assert res.json["errors"] == {"json": ["Invalid JSON body."]}
+ if path.endswith("json"):
+ assert res.json["errors"] == {"json": ["Invalid JSON body."]}
# Falcon converts headers to all-caps
def test_parsing_headers(self, testapp):
| FalconParser should ideally support falcon's native media decoding
Falcon has a native media handling mechanism which can decode an incoming request body based on the `Content-Type` header and adding the dictionary of resulting key-value pairs as a cached property `req.media`. I've written my own FalconParser subclass that (very naively) uses this, but it seems like something that might be worth supporting out of the box.
```python
def parse_json(self, req, name, field):
"""
Pull a JSON body value from the request.
uses falcon's native req.media
"""
json_data = self._cache.get("json_data")
if json_data is None:
self._cache["json_data"] = json_data = req.media
return core.get_value(json_data, name, field, allow_many_nested=True)
```
This could probably be improved upon; since the `media` property is already cached on the request object, we could just access `req.media` directly without caching on the parser. (Not sure if this impacts other things that might use that cache, though; I haven't dug deep enough to fully understand that implication.) Also, since `media` was added in 1.3, if webargs still wanted to support older versions of falcon we could add a check for it and fall back to the existing behavior.
Maybe something like:
```python
def parse_json(self, req, name, field):
"""Pull a JSON body value from the request.
.. note::
The request stream will be read and left at EOF.
"""
json_data = req.media if hasattr(req, 'media') else self._cache.get("json_data")
if json_data is None:
self._cache["json_data"] = json_data = parse_json_body(req)
return core.get_value(json_data, name, field, allow_many_nested=True)
```
| I'm open to this, though it might make Falcon's parsing inconsistent with the other parsers. But perhaps that's OK, since Falcon users may expect `req.media` to be used.
I'd certainly review/merge a PR.
I'm looking at this as something we can clean out of the backlog.
After 6.0, we now are supporting falcon 2.x only, so I think it's quite easy.
I'd rather define `media` as a distinct location. So you could use a falcon parser with `location="media"` and it will do basically what was proposed above (modified suitably to fit with webargs v6).
We could even add a note for webargs 7.0 to upgrade to have `FalconParser.DEFAULT_LOCATION = "media"`, which would tweak the default. Everything remains consistent that way, and we don't muddle `json` with the `media` behavior. I don't know that I'd like to make that change in 6.x -- it feels too breaky for too specific of a niche for me.
If someone submitted a PR with that, I'd happily review, but I'll try to remember to get to it at some point soon.
@lafrech, I haven't looked at this in a while, but I think maybe we should do it as part of 7.0. I'm adding it to the milestone and we can take it out if desired.
Falcon is the framework I'm probably least familiar with, so I'll want to read up on this feature again to make sure I understand what `req.media` does/is. | 2020-11-05T20:57:01 | -1.0 |
marshmallow-code/webargs | 557 | marshmallow-code__webargs-557 | [
"540"
] | ccd9737cd0e5abbd6bfacd6de107b1a3da6ee394 | diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,7 +3,7 @@
FRAMEWORKS = [
"Flask>=0.12.2",
- "Django>=1.11.16",
+ "Django>=2.2.0",
"bottle>=0.12.13",
"tornado>=4.5.2",
"pyramid>=1.9.1",
diff --git a/src/webargs/djangoparser.py b/src/webargs/djangoparser.py
--- a/src/webargs/djangoparser.py
+++ b/src/webargs/djangoparser.py
@@ -59,9 +59,10 @@ def load_cookies(self, req, schema):
return req.COOKIES
def load_headers(self, req, schema):
- raise NotImplementedError(
- f"Header parsing not supported by {self.__class__.__name__}"
- )
+ """Return headers from the request."""
+ # Django's HttpRequest.headers is a case-insensitive dict type, but it
+ # isn't a multidict, so this is not proxied
+ return req.headers
def load_files(self, req, schema):
"""Return files from the request as a MultiDictProxy."""
| diff --git a/tests/test_djangoparser.py b/tests/test_djangoparser.py
--- a/tests/test_djangoparser.py
+++ b/tests/test_djangoparser.py
@@ -14,10 +14,6 @@ def create_app(self):
def test_use_args_with_validation(self):
pass
- @pytest.mark.skip(reason="headers location not supported by DjangoParser")
- def test_parsing_headers(self, testapp):
- pass
-
def test_parsing_in_class_based_view(self, testapp):
assert testapp.get("/echo_cbv?name=Fred").json == {"name": "Fred"}
assert testapp.post_json("/echo_cbv", {"name": "Fred"}).json == {"name": "Fred"}
| Add support for headers to DjangoParser
```
NotImplementedError: Header parsing not supported by DjangoParser
```
| The decision not to support this was made 7 years ago (i.e. circa Django 1.5), and I think django didn't support this well.
I think we can add support today -- I'll try to find the time to do it and test properly.
For now, I think you could just go with the following snippet to get support
```python
from webargs.djangoparser import DjangoParser
class Parser(DjangoParser):
def load_headers(self, req, schema):
return req.META
djangoparser = Parser()
use_args = djangoparser.use_args
use_kwargs = djangoparser.use_kwarg
```
I just noticed that this was closed incorrectly.
I'd like to fix this in 7.0. Reopening and adding to the milestone. | 2020-11-05T21:42:03 | -1.0 |
marshmallow-code/webargs | 583 | marshmallow-code__webargs-583 | [
"234"
] | f953dff5c77b5eeb96046aef4a29fd9d097085c3 | diff --git a/src/webargs/core.py b/src/webargs/core.py
--- a/src/webargs/core.py
+++ b/src/webargs/core.py
@@ -322,7 +322,10 @@ def parse(
location_data = self._load_location_data(
schema=schema, req=req, location=location
)
- data = schema.load(location_data, **load_kwargs)
+ preprocessed_data = self.pre_load(
+ location_data, schema=schema, req=req, location=location
+ )
+ data = schema.load(preprocessed_data, **load_kwargs)
self._validate_arguments(data, validators)
except ma.exceptions.ValidationError as error:
self._on_validation_error(
@@ -523,6 +526,15 @@ def handle_error(error, req, schema, *, error_status_code, error_headers):
self.error_callback = func
return func
+ def pre_load(
+ self, location_data: Mapping, *, schema: ma.Schema, req: Request, location: str
+ ) -> Mapping:
+ """A method of the parser which can transform data after location
+ loading is done. By default it does nothing, but users can subclass
+ parsers and override this method.
+ """
+ return location_data
+
def _handle_invalid_json_error(
self,
error: typing.Union[json.JSONDecodeError, UnicodeDecodeError],
| diff --git a/tests/test_core.py b/tests/test_core.py
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -1,4 +1,5 @@
import datetime
+import typing
from unittest import mock
import pytest
@@ -37,6 +38,9 @@ class MockRequestParser(Parser):
def load_querystring(self, req, schema):
return self._makeproxy(req.query, schema)
+ def load_form(self, req, schema):
+ return MultiDictProxy(req.form, schema)
+
def load_json(self, req, schema):
return req.json
@@ -1224,3 +1228,84 @@ class CustomParser(Parser):
p = CustomParser()
ret = p.parse(argmap, web_request)
assert ret == {"value": "hello world"}
+
+
+def test_parser_pre_load(web_request):
+ class CustomParser(MockRequestParser):
+ # pre-load hook to strip whitespace from query params
+ def pre_load(self, data, *, schema, req, location):
+ if location == "query":
+ return {k: v.strip() for k, v in data.items()}
+ return data
+
+ parser = CustomParser()
+
+ # mock data for both query and json
+ web_request.query = web_request.json = {"value": " hello "}
+ argmap = {"value": fields.Str()}
+
+ # data gets through for 'json' just fine
+ ret = parser.parse(argmap, web_request)
+ assert ret == {"value": " hello "}
+
+ # but for 'query', the pre_load hook changes things
+ ret = parser.parse(argmap, web_request, location="query")
+ assert ret == {"value": "hello"}
+
+
+# this test is meant to be a run of the WhitspaceStrippingFlaskParser we give
+# in the docs/advanced.rst examples for how to use pre_load
+# this helps ensure that the example code is correct
+# rather than a FlaskParser, we're working with the mock parser, but it's
+# otherwise the same
+def test_whitespace_stripping_parser_example(web_request):
+ def _strip_whitespace(value):
+ if isinstance(value, str):
+ value = value.strip()
+ elif isinstance(value, typing.Mapping):
+ return {k: _strip_whitespace(value[k]) for k in value}
+ elif isinstance(value, (list, tuple)):
+ return type(value)(map(_strip_whitespace, value))
+ return value
+
+ class WhitspaceStrippingParser(MockRequestParser):
+ def pre_load(self, location_data, *, schema, req, location):
+ if location in ("query", "form"):
+ ret = _strip_whitespace(location_data)
+ return ret
+ return location_data
+
+ parser = WhitspaceStrippingParser()
+
+ # mock data for query, form, and json
+ web_request.form = web_request.query = web_request.json = {"value": " hello "}
+ argmap = {"value": fields.Str()}
+
+ # data gets through for 'json' just fine
+ ret = parser.parse(argmap, web_request)
+ assert ret == {"value": " hello "}
+
+ # but for 'query' and 'form', the pre_load hook changes things
+ for loc in ("query", "form"):
+ ret = parser.parse(argmap, web_request, location=loc)
+ assert ret == {"value": "hello"}
+
+ # check that it applies in the case where the field is a list type
+ # applied to an argument (logic for `tuple` is effectively the same)
+ web_request.form = web_request.query = web_request.json = {
+ "ids": [" 1", "3", " 4"],
+ "values": [" foo ", " bar"],
+ }
+ schema = Schema.from_dict(
+ {"ids": fields.List(fields.Int), "values": fields.List(fields.Str)}
+ )
+ for loc in ("query", "form"):
+ ret = parser.parse(schema, web_request, location=loc)
+ assert ret == {"ids": [1, 3, 4], "values": ["foo", "bar"]}
+
+ # json loading should also work even though the pre_load hook above
+ # doesn't strip whitespace from JSON data
+ # - values=[" foo ", ...] will have whitespace preserved
+ # - ids=[" 1", ...] will still parse okay because " 1" is valid for fields.Int
+ ret = parser.parse(schema, web_request, location="json")
+ assert ret == {"ids": [1, 3, 4], "values": [" foo ", " bar"]}
| Automatically trim leading/trailing whitespace from argument values
Does webargs provide any clean way to do this? I guess leading/trailing whitespace are almost never something you want (especially when having required fields that must not be empty)...
| I don't think we'll be adding this into webargs core. It's simple enough to override `parse_arg` for this use case.
```python
from webargs.flaskparser import FlaskParser
class CustomParser(FlaskParser):
def parse_arg(self, name, field, req, locations=None):
ret = super().parse_arg(name, field, req, locations=locations)
if hasattr(ret, 'strip'):
return ret.strip()
return ret
parser = CustomParser()
use_args = parser.use_args
use_kwargs = parser.use_kwargs
```
This doesn't work anymore in the latest version, since `parse_arg` is gone. I could override `_load_location_data` and do it in there, but that's an internal API. Is there a better way for it?
```python
def strip_whitespace(value):
if isinstance(value, str):
value = value.strip()
elif isinstance(value, dict):
return {k: strip_whitespace(v) for k, v in value.items()}
elif isinstance(value, (list, set)):
return type(value)(map(strip_whitespace, value))
return value
class StrippingFlaskParser(FlaskParser):
def _load_location_data(self, **kwargs):
data = super()._load_location_data(**kwargs)
return strip_whitespace(data)
```
@sloria is the workaround im my previous comment here still the way to go in v6/v7?
Actually, this no longer works due to the MultiDictProxy and it does not seem straightforward at all to get this to work without ugly hacks. Maybe it'd be a good time to revisit supporting this in the core?
```python
def _strip_whitespace(value):
if isinstance(value, str):
value = value.strip()
elif isinstance(value, MultiDict):
return type(value)((k, _strip_whitespace(v)) for k, vals in value.lists() for v in vals)
elif isinstance(value, dict):
return {k: strip_whitespace(v) for k, v in value.items()}
elif isinstance(value, (list, set)):
return type(value)(map(strip_whitespace, value))
return value
class StrippingFlaskParser(FlaskParser):
def load_querystring(self, req, schema):
return MultiDictProxy(_strip_whitespace(req.args), schema)
def load_form(self, req, schema):
return MultiDictProxy(_strip_whitespace(req.form), schema)
def load_json(self, req, schema):
return _strip_whitespace(super().load_json(req, schema))
```
this seems to work. but feel much more fragile than the old solution with `parse_arg`
Reopening, but can't give this the attention it requires at this very moment. Perhaps @lafrech or @sirosen can give this some thought
My instinct when reading this is that stripping whitespace would be the job of the fields in use in your schemas.
Something like
```python
class StrippedString(fields.String):
def _deserialize(self, value, attr, data, **kwargs):
value = super()._deserialize(value, attr, data, **kwargs)
return value.strip()
```
That works for some simple cases, but obviously is not as easy to globally apply as the 5.x solution of changing the way that arg parsing works.
*If* you're only using dict-schemas, you can set a custom schema class on your parser, and then you should be able to do it like so:
```python
# modified slightly from @ThiefMaster's example, but untested
def _strip_whitespace(value):
if isinstance(value, str):
value = value.strip()
# you'll be getting a MultiDictProxy here potentially, but it should work
elif isinstance(value, Mapping):
return {k: strip_whitespace(value[k]) for k in value}
elif isinstance(value, (list, set)):
return type(value)(map(strip_whitespace, value))
return value
class StrippingSchema(Schema):
@pre_load
def strip_whitespace(self, data, **kwargs):
return _strip_whitespace(data)
class StrippingFlaskParser(FlaskParser):
# or use `schema_class=...` when instantiating a parser
DEFAULT_SCHEMA_CLASS = StrippingSchema
```
I'm pretty sure that works if you look at [`MultiDictProxy.__getitem__`](https://github.com/marshmallow-code/webargs/blob/4cb3b19cb16a7717eeb0efb0a8a6000d5c63c5b5/src/webargs/multidictproxy.py#L29-L41).
But it only works for dict-schemas. For any cases where a full Schema class is used, it won't apply.
So I'm wondering what the right approach is here.
This "feels" to me like a pre-load hook, but applied on a per-parser basis, rather than a per-schema basis.
Should we allow users to define a "parser pre-load hook"?
I'm imagining something in the Parser like
```python
location_data = self._load_location_data(
schema=schema, req=req, location=location
)
if self.pre_load_hook: # <---- New!
location_data = self.pre_load_hook(location_data, schema=schema, req=req, location=location)
data = schema.load(location_data, **load_kwargs)
```
I'm still thinking about this, but sharing my thoughts thusfar. I don't think this leads towards parser-post-load hooks or anything like that -- the problem is unique to pre-load because what's wanted is something which goes between `_load_location_data` and `schema.load()`, and you can't do it by just subclassing and wrapping `parse()`.
> My instinct when reading this is that stripping whitespace would be the job of the fields in use in your schemas.
While that would work, it requires being much more explicit all the time - so not amazing. And in almost all cases there's no interest point in having surrounding whitespace (the only case that comes to my mind where it may be useful is for a markdown field where someone wants to put an indented code block at the very beginning).
> But it only works for dict-schemas. For any cases where a full Schema class is used, it won't apply.
Hm, that may not be such a big problem since one could easily use a custom base schema and inherit all their schemas from that one. Drawback; AFAIK the execution order when having multiple `pre_load` hooks is undefined. So having such a hook in the base schema would not guarantee that it doesn't run before some other more specific `pre_load` hook..
I assume that solving this using custom fields is simply too laborious and too error prone for your use-case.
And I've run into exactly that issue with pre-load hooks in the past: having a base schema with `pre_load` or `post_load` is usually safe only if no child schema defines hooks because marshmallow doesn't guarantee hook order.
How would you feel about the possibility of this being a "parser pre-load hook"? I'm imagining some usage like
```python
@parser.pre_load
def strip_whitespace(data, schema, req, location):
if location == "json": # as an example of the possibilities this opens up, skip json
return data
return _strip_whitespace(data)
```
I'm more keen on this + documentation covering `strip_whitespace` as an example than I am on something like adding `Parser.strip_whitespace = True` and making it do this. It's more flexible and doesn't bake in as many details into webargs.
Maybe it needs a better/less confusing name than "pre_load", and maybe I'm over-engineering here. Hard for me to see! 😅
Looks like a good solution :) And it's more flexible than a hardcoded "strip whitespace" feature. | 2021-02-01T23:11:50 | -1.0 |
marshmallow-code/webargs | 584 | marshmallow-code__webargs-584 | [
"563"
] | d4fbbb7e70648af961ba0c5214812bc8cf3426f1 | diff --git a/src/webargs/core.py b/src/webargs/core.py
--- a/src/webargs/core.py
+++ b/src/webargs/core.py
@@ -8,8 +8,6 @@
from marshmallow import ValidationError
from marshmallow.utils import missing
-from webargs.fields import DelimitedList
-
logger = logging.getLogger(__name__)
@@ -34,6 +32,12 @@
# generic type var with no particular meaning
T = typing.TypeVar("T")
+# a set of fields which are known to satisfy the `is_multiple` criteria, but
+# which come from marshmallow and therefore don't know about webargs (and
+# do not set `is_multiple=True`)
+# TODO: `ma.fields.Tuple` should be added here in v8.0
+KNOWN_MULTI_FIELDS: typing.List[typing.Type] = [ma.fields.List]
+
# a value used as the default for arguments, so that when `None` is passed, it
# can be distinguished from the default value
@@ -59,7 +63,12 @@ def _callable_or_raise(obj: typing.Optional[T]) -> typing.Optional[T]:
def is_multiple(field: ma.fields.Field) -> bool:
"""Return whether or not `field` handles repeated/multi-value arguments."""
- return isinstance(field, ma.fields.List) and not isinstance(field, DelimitedList)
+ # fields which set `is_multiple = True/False` will have the value selected,
+ # otherwise, we check for explicit criteria
+ is_multiple_attr = getattr(field, "is_multiple", None)
+ if is_multiple_attr is not None:
+ return is_multiple_attr
+ return isinstance(field, tuple(KNOWN_MULTI_FIELDS))
def get_mimetype(content_type: str) -> str:
diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -55,6 +55,8 @@ class DelimitedFieldMixin:
"""
delimiter: str = ","
+ # delimited fields set is_multiple=False for webargs.core.is_multiple
+ is_multiple: bool = False
def _serialize(self, value, attr, obj, **kwargs):
# serializing will start with parent-class serialization, so that we correctly
| diff --git a/tests/test_core.py b/tests/test_core.py
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -1032,6 +1032,72 @@ def test_type_conversion_with_multiple_required(web_request, parser):
parser.parse(args, web_request)
+@pytest.mark.parametrize("input_dict", multidicts)
+@pytest.mark.parametrize(
+ "setting",
+ ["is_multiple_true", "is_multiple_false", "is_multiple_notset", "list_field"],
+)
+def test_is_multiple_detection(web_request, parser, input_dict, setting):
+ # define a custom List-like type which deserializes string lists
+ str_instance = fields.String()
+
+ # this custom class "multiplexes" in that it can be given a single value or
+ # list of values -- a single value is treated as a string, and a list of
+ # values is treated as a list of strings
+ class CustomMultiplexingField(fields.Field):
+ def _deserialize(self, value, attr, data, **kwargs):
+ if isinstance(value, str):
+ return str_instance.deserialize(value, **kwargs)
+ return [str_instance.deserialize(v, **kwargs) for v in value]
+
+ def _serialize(self, value, attr, data, **kwargs):
+ if isinstance(value, str):
+ return str_instance._serialize(value, **kwargs)
+ return [str_instance._serialize(v, **kwargs) for v in value]
+
+ class CustomMultipleField(CustomMultiplexingField):
+ is_multiple = True
+
+ class CustomNonMultipleField(CustomMultiplexingField):
+ is_multiple = False
+
+ # the request's query params are the input multidict
+ web_request.query = input_dict
+
+ # case 1: is_multiple=True
+ if setting == "is_multiple_true":
+ # the multidict should unpack to a list of strings
+ #
+ # order is not necessarily guaranteed by the multidict implementations, but
+ # both values must be present
+ args = {"foos": CustomMultipleField()}
+ result = parser.parse(args, web_request, location="query")
+ assert result["foos"] in (["a", "b"], ["b", "a"])
+ # case 2: is_multiple=False
+ elif setting == "is_multiple_false":
+ # the multidict should unpack to a string
+ #
+ # either value may be returned, depending on the multidict implementation,
+ # but not both
+ args = {"foos": CustomNonMultipleField()}
+ result = parser.parse(args, web_request, location="query")
+ assert result["foos"] in ("a", "b")
+ # case 3: is_multiple is not set
+ elif setting == "is_multiple_notset":
+ # this should be the same as is_multiple=False
+ args = {"foos": CustomMultiplexingField()}
+ result = parser.parse(args, web_request, location="query")
+ assert result["foos"] in ("a", "b")
+ # case 4: the field is a List (special case)
+ elif setting == "list_field":
+ # this should behave like the is_multiple=True case
+ args = {"foos": fields.List(fields.Str())}
+ result = parser.parse(args, web_request, location="query")
+ assert result["foos"] in (["a", "b"], ["b", "a"])
+ else:
+ raise NotImplementedError
+
+
def test_validation_errors_in_validator_are_passed_to_handle_error(parser, web_request):
def validate(value):
raise ValidationError("Something went wrong.")
| `is_multiple` vs custom fields
Right now I don't get multiple values in a custom field that does not inherit from the `List` field because `is_multiple` only checks for this.
And for some cases rewriting the field to be nested in an actual List field is not feasible, for example a `SQLAlchemyModelList`-like field where sending individual queries would be inefficient compared to one query with an SQL `IN` filter.
I propose adding a `MultiValueField` which would simply inherit from Field and do nothing itself, and instead of checking for isinstance List check for that one in `is_multiple`. `List` would then of course inherit from that instead of `Field`.
Alternatively, check for something like `isinstance(field, List) or getattr(field, 'is_multiple', False)`.
I can send a PR if there's interest in this.
| I admit that I might not be well enough educated about marshmallow-sqlalchemy (I've never used it) to be able to make a good judgement about that part of this issue. But clearly, `is_multiple` isn't very extensible right now, and that's a problem. (Also, I think it doesn't handle `Tuple`? So that's a bug.)
If we put `MultiValueField` into `marshmallow`, that might get confusing with `Nested(..., many=True)` as a possibility. I think `is_multiple` makes more sense as a characteristic of a field instance than a field class -- that makes it possible (at least in theory) to have a field whose instances vary with respect to this.
As a result, I think the `getattr` solution is the right one. `is_multiple` seems like a reasonable name for the attribute to me. @lafrech, do you maybe have a different opinion about this, or any thoughts about naming? | 2021-02-02T04:12:35 | -1.0 |
marshmallow-code/webargs | 594 | marshmallow-code__webargs-594 | [
"585"
] | 2f05e314163825b57018ac9682d7e6463dbfcd35 | diff --git a/src/webargs/core.py b/src/webargs/core.py
--- a/src/webargs/core.py
+++ b/src/webargs/core.py
@@ -142,9 +142,8 @@ class Parser:
DEFAULT_VALIDATION_STATUS: int = DEFAULT_VALIDATION_STATUS
#: Default error message for validation errors
DEFAULT_VALIDATION_MESSAGE: str = "Invalid value."
- # TODO: add ma.fields.Tuple in v8.0
#: field types which should always be treated as if they set `is_multiple=True`
- KNOWN_MULTI_FIELDS: typing.List[typing.Type] = [ma.fields.List]
+ KNOWN_MULTI_FIELDS: typing.List[typing.Type] = [ma.fields.List, ma.fields.Tuple]
#: Maps location => method name
__location_map__: typing.Dict[str, typing.Union[str, typing.Callable]] = {
diff --git a/src/webargs/multidictproxy.py b/src/webargs/multidictproxy.py
--- a/src/webargs/multidictproxy.py
+++ b/src/webargs/multidictproxy.py
@@ -18,7 +18,10 @@ def __init__(
self,
multidict,
schema: ma.Schema,
- known_multi_fields: typing.Tuple[typing.Type, ...] = (ma.fields.List,),
+ known_multi_fields: typing.Tuple[typing.Type, ...] = (
+ ma.fields.List,
+ ma.fields.Tuple,
+ ),
):
self.data = multidict
self.known_multi_fields = known_multi_fields
| diff --git a/tests/test_core.py b/tests/test_core.py
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -35,7 +35,7 @@ class MockRequestParser(Parser):
"""A minimal parser implementation that parses mock requests."""
def load_querystring(self, req, schema):
- return MultiDictProxy(req.query, schema)
+ return self._makeproxy(req.query, schema)
def load_json(self, req, schema):
return req.json
@@ -1040,6 +1040,7 @@ def test_type_conversion_with_multiple_required(web_request, parser):
"is_multiple_false",
"is_multiple_notset",
"list_field",
+ "tuple_field",
"added_to_known",
],
)
@@ -1103,13 +1104,20 @@ class CustomNonMultipleField(CustomMultiplexingField):
args = {"foos": fields.List(fields.Str())}
result = parser.parse(args, web_request, location="query")
assert result["foos"] in (["a", "b"], ["b", "a"])
+ # case 5: the field is a Tuple (special case)
+ elif setting == "tuple_field":
+ # this should behave like the is_multiple=True case and produce a tuple
+ args = {"foos": fields.Tuple((fields.Str, fields.Str))}
+ result = parser.parse(args, web_request, location="query")
+ assert result["foos"] in (("a", "b"), ("b", "a"))
+ # case 6: the field is custom, but added to the known fields of the parser
elif setting == "added_to_known":
# if it's included in the known multifields and is_multiple is not set, behave
# like is_multiple=True
parser.KNOWN_MULTI_FIELDS.append(CustomMultiplexingField)
args = {"foos": CustomMultiplexingField()}
result = parser.parse(args, web_request, location="query")
- assert result["foos"] in ("a", "b")
+ assert result["foos"] in (["a", "b"], ["b", "a"])
else:
raise NotImplementedError
| Add marshmallow.fields.Tuple to detected `is_multiple` fields
I noticed this while working on #584 . Currently, `is_multiple(ma.fields.List(...)) == True`, but `is_multiple(ma.fields.Tuple(...)) == False`.
We should add `Tuple` so that `is_multiple` returns true.
It is possible that users have subclassed `fields.Tuple` or done something else which could make that a breaking change. I don't think that's very likely, but we are already queuing things up for 8.0 . I'm going to put this in the 8.0 milestone. We could probably do it sooner if there's a reason to do so.
I don't think using `Tuple` for a query param is a good idea. Supporting `...?foo=1&foo=2` but *not* `...?foo=1` or `...?foo=1&foo=2&foo=3` is unusual and therefore likely to be confusing. I'd advise webargs users not to do that. However! It's surprising that `Tuple` is not treated as `is_multiple=True`, and I'm just aiming for the least surprising behavior for webargs here.
| 2021-03-12T22:30:26 | -1.0 |
|
marshmallow-code/webargs | 682 | marshmallow-code__webargs-682 | [
"679"
] | ef8a34ae75ef200d7006ada35770aa170dae5902 | diff --git a/src/webargs/asyncparser.py b/src/webargs/asyncparser.py
--- a/src/webargs/asyncparser.py
+++ b/src/webargs/asyncparser.py
@@ -5,7 +5,6 @@
import functools
import inspect
import typing
-from collections.abc import Mapping
from marshmallow import Schema, ValidationError
import marshmallow as ma
@@ -150,8 +149,8 @@ def use_args(
request_obj = req
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
- if isinstance(argmap, Mapping):
- argmap = self.schema_class.from_dict(dict(argmap))()
+ if isinstance(argmap, dict):
+ argmap = self.schema_class.from_dict(argmap)()
def decorator(func: typing.Callable) -> typing.Callable:
req_ = request_obj
diff --git a/src/webargs/core.py b/src/webargs/core.py
--- a/src/webargs/core.py
+++ b/src/webargs/core.py
@@ -3,7 +3,6 @@
import functools
import typing
import logging
-from collections.abc import Mapping
import json
import marshmallow as ma
@@ -26,7 +25,7 @@
Request = typing.TypeVar("Request")
ArgMap = typing.Union[
ma.Schema,
- typing.Mapping[str, ma.fields.Field],
+ typing.Dict[str, typing.Union[ma.fields.Field, typing.Type[ma.fields.Field]]],
typing.Callable[[Request], ma.Schema],
]
ValidateArg = typing.Union[None, typing.Callable, typing.Iterable[typing.Callable]]
@@ -34,6 +33,11 @@
ErrorHandler = typing.Callable[..., typing.NoReturn]
# generic type var with no particular meaning
T = typing.TypeVar("T")
+# type var for callables, to make type-preserving decorators
+C = typing.TypeVar("C", bound=typing.Callable)
+# type var for a callable which is an error handler
+# used to ensure that the error_handler decorator is type preserving
+ErrorHandlerT = typing.TypeVar("ErrorHandlerT", bound=ErrorHandler)
# a value used as the default for arguments, so that when `None` is passed, it
@@ -257,8 +261,10 @@ def _get_schema(self, argmap: ArgMap, req: Request) -> ma.Schema:
schema = argmap()
elif callable(argmap):
schema = argmap(req)
+ elif isinstance(argmap, dict):
+ schema = self.schema_class.from_dict(argmap)()
else:
- schema = self.schema_class.from_dict(dict(argmap))()
+ raise TypeError(f"argmap was of unexpected type {type(argmap)}")
return schema
def parse(
@@ -417,8 +423,8 @@ def greet(args):
request_obj = req
# Optimization: If argmap is passed as a dictionary, we only need
# to generate a Schema once
- if isinstance(argmap, Mapping):
- argmap = self.schema_class.from_dict(dict(argmap))()
+ if isinstance(argmap, dict):
+ argmap = self.schema_class.from_dict(argmap)()
def decorator(func):
req_ = request_obj
@@ -468,7 +474,7 @@ def greet(name):
kwargs["as_kwargs"] = True
return self.use_args(*args, **kwargs)
- def location_loader(self, name: str):
+ def location_loader(self, name: str) -> typing.Callable[[C], C]:
"""Decorator that registers a function for loading a request location.
The wrapped function receives a schema and a request.
@@ -489,13 +495,13 @@ def load_data(request, schema):
:param str name: The name of the location to register.
"""
- def decorator(func):
+ def decorator(func: C) -> C:
self.__location_map__[name] = func
return func
return decorator
- def error_handler(self, func: ErrorHandler) -> ErrorHandler:
+ def error_handler(self, func: ErrorHandlerT) -> ErrorHandlerT:
"""Decorator that registers a custom error handling function. The
function should receive the raised error, request object,
`marshmallow.Schema` instance used to parse the request, error status code,
@@ -523,8 +529,13 @@ def handle_error(error, req, schema, *, error_status_code, error_headers):
return func
def pre_load(
- self, location_data: Mapping, *, schema: ma.Schema, req: Request, location: str
- ) -> Mapping:
+ self,
+ location_data: typing.Mapping,
+ *,
+ schema: ma.Schema,
+ req: Request,
+ location: str,
+ ) -> typing.Mapping:
"""A method of the parser which can transform data after location
loading is done. By default it does nothing, but users can subclass
parsers and override this method.
| diff --git a/tests/test_core.py b/tests/test_core.py
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -1,3 +1,4 @@
+import collections
import datetime
import typing
from unittest import mock
@@ -1321,3 +1322,15 @@ def pre_load(self, location_data, *, schema, req, location):
# - ids=[" 1", ...] will still parse okay because " 1" is valid for fields.Int
ret = parser.parse(schema, web_request, location="json")
assert ret == {"ids": [1, 3, 4], "values": [" foo ", " bar"]}
+
+
+def test_parse_rejects_non_dict_argmap_mapping(parser, web_request):
+ web_request.json = {"username": 42, "password": 42}
+ argmap = collections.UserDict(
+ {"username": fields.Field(), "password": fields.Field()}
+ )
+
+ # UserDict is dict-like in all meaningful ways, but not a subclass of `dict`
+ # it will therefore be rejected with a TypeError when used
+ with pytest.raises(TypeError):
+ parser.parse(argmap, web_request)
| The type of "argmap" allows for `Mapping[str, Field]`, but `Schema.from_dict` only supports `Dict[str, Field]`
I ran into this while looking at getting #663 merged.
We have arguments annotated as allowing a `Mapping`. The most likely usage for users is just a dict, and that is all that our examples show.
`Schema.from_dict` uses `dict.copy`. That's not part of the `Mapping` protocol in `collections.abc`.
I almost suggested that `Schema.from_dict` be modified to allow for a `Mapping` object. It's not particularly hard (switch `input_object.copy()` to `dict(input_object)`), but it seems wrong. There's no particular reason to support non-dict mappings for either `marshmallow` or `webargs`. I think our annotations in `webargs` are just inaccurate.
I think we should consider updating annotations in `webargs` from `Mapping` to `Dict`.
@lafrech, for now, I'm going to make an update to the #663 branch and add `dict(argmap)` calls to make the types align.
| > I think we should consider updating annotations in webargs from Mapping to Dict.
Sure. This would be a fix since the code always expected a dict. | 2022-01-05T17:32:36 | -1.0 |
marshmallow-code/webargs | 832 | marshmallow-code__webargs-832 | [
"823"
] | 44e2037a5607f3655f47d475272eab01d49aaaa0 | diff --git a/src/webargs/fields.py b/src/webargs/fields.py
--- a/src/webargs/fields.py
+++ b/src/webargs/fields.py
@@ -15,6 +15,8 @@
"""
from __future__ import annotations
+import typing
+
import marshmallow as ma
# Expose all fields from marshmallow.fields.
@@ -64,6 +66,8 @@ class DelimitedFieldMixin:
delimiter: str = ","
# delimited fields set is_multiple=False for webargs.core.is_multiple
is_multiple: bool = False
+ # NOTE: in 8.x this defaults to "" but in 9.x it will be 'missing'
+ empty_value: typing.Any = ""
def _serialize(self, value, attr, obj, **kwargs):
# serializing will start with parent-class serialization, so that we correctly
@@ -77,6 +81,8 @@ def _deserialize(self, value, attr, data, **kwargs):
if not isinstance(value, (str, bytes)):
raise self.make_error("invalid")
values = value.split(self.delimiter) if value else []
+ # convert empty strings to the empty value; typically "" and therefore a no-op
+ values = [v or self.empty_value for v in values]
return super()._deserialize(values, attr, data, **kwargs)
@@ -117,6 +123,12 @@ class DelimitedTuple(DelimitedFieldMixin, ma.fields.Tuple):
default_error_messages = {"invalid": "Not a valid delimited tuple."}
- def __init__(self, tuple_fields, *, delimiter: str | None = None, **kwargs):
+ def __init__(
+ self,
+ tuple_fields,
+ *,
+ delimiter: str | None = None,
+ **kwargs,
+ ):
self.delimiter = delimiter or self.delimiter
super().__init__(tuple_fields, **kwargs)
| diff --git a/tests/test_core.py b/tests/test_core.py
--- a/tests/test_core.py
+++ b/tests/test_core.py
@@ -11,6 +11,7 @@
INCLUDE,
RAISE,
Schema,
+ missing,
post_load,
pre_load,
validates_schema,
@@ -1106,6 +1107,47 @@ def test_delimited_tuple_passed_invalid_type(web_request, parser):
assert excinfo.value.messages == {"json": {"ids": ["Not a valid delimited tuple."]}}
+def test_delimited_list_custom_empty_value(web_request, parser):
+ class ZeroList(fields.DelimitedList):
+ empty_value = 0
+
+ web_request.json = {"ids": "1,,3"}
+ schema_cls = Schema.from_dict({"ids": ZeroList(fields.Int())})
+ schema = schema_cls()
+
+ parsed = parser.parse(schema, web_request)
+ assert parsed["ids"] == [1, 0, 3]
+
+
+def test_delimited_tuple_custom_empty_value(web_request, parser):
+ class ZeroTuple(fields.DelimitedTuple):
+ empty_value = 0
+
+ web_request.json = {"ids": "1,,3"}
+ schema_cls = Schema.from_dict(
+ {"ids": ZeroTuple((fields.Int, fields.Int, fields.Int))}
+ )
+ schema = schema_cls()
+
+ parsed = parser.parse(schema, web_request)
+ assert parsed["ids"] == (1, 0, 3)
+
+
+def test_delimited_list_using_missing_for_empty(web_request, parser):
+ # this is "future" because we plan to make this the default for webargs v9.0
+ class FutureList(fields.DelimitedList):
+ empty_value = missing
+
+ web_request.json = {"ids": "foo,,bar"}
+ schema_cls = Schema.from_dict(
+ {"ids": FutureList(fields.String(load_default="nil"))}
+ )
+ schema = schema_cls()
+
+ parsed = parser.parse(schema, web_request)
+ assert parsed["ids"] == ["foo", "nil", "bar"]
+
+
def test_missing_list_argument_not_in_parsed_result(web_request, parser):
# arg missing in request
web_request.json = {}
| Dealing with empty values in `DelimitedFieldMixin`
`DelimitedList(String())` deserializes "a,,c" as `["a", "", "c"]`.
I guess this meets user expectations.
My expectation with integers would be that
`DelimitedList(Integer(allow_none=True))` deserializes `"1,,3"` as `[1,None,3]`
but it errors.
The reason for this is that when the string is split, it is turned into `["1", "", "3"]`. This is why it works in the string case.
I'm not sure this was really intended. It may be a side effect of the `split` function that happens to do well with strings.
We could change that to replace empty values with `None`. But it would break the string use case, unless the user overloads `String` field to deserialize `None` as `""`.
Likewise, users may already overload `Integer` to deserialize `""` as `None` and no modification is required to `DelimitedFieldMixin`.
Just been caught by this and wondering out loud. Advice welcome.
In any case, there is an intrinsic limitation in the delimited string format: one can't distinguish empty string from missing value (as opposed to a JSON payload). It is not clear to me how OpenAPI (for instance) deals with the case of an empty element in an array (in a query argument).
| We could add another argument/instance variable just for the empty case. I don't know if I like it yet as an idea, but we'd get...
```python
DelimitedList(String(), empty="") # default
DelimitedList(Integer(), empty=0)
DelimitedList(Integer(allow_none=True), empty=None)
```
For the record, I think allowing for None has utility. I've seen APIs do things like
```
range=0,100
range=1, # no upper bound
```
Having written that, I'm starting to think that I like adding an argument for this. It's not much new API surface and it ought to work. Are there other ideas we should consider (e.g. a classvar)?
Indeed, adding an argument here makes total sense. Thanks.
Argument should be enough IMHO. Class var would allow someone to subclass to avoid the argument duplication but I don't think it is worth it.
I don't need this right now. But it is nice to keep this in the backlog so that anyone needing it can work on it. It is a pretty easy contribution.
It would be interesting to wonder how this can be documented in OpenAPI via apispec.
| 2023-04-26T19:58:02 | -1.0 |
marshmallow-code/webargs | 943 | marshmallow-code__webargs-943 | [
"941"
] | 847293910d5e9059ab4a728e8342f86f0b16246f | diff --git a/src/webargs/__init__.py b/src/webargs/__init__.py
--- a/src/webargs/__init__.py
+++ b/src/webargs/__init__.py
@@ -14,8 +14,8 @@
__version__ = importlib.metadata.version("webargs")
__parsed_version__ = Version(__version__)
__version_info__: tuple[int, int, int] | tuple[int, int, int, str, int] = (
- __parsed_version__.release
-) # type: ignore[assignment]
+ __parsed_version__.release # type: ignore[assignment]
+)
if __parsed_version__.pre:
__version_info__ += __parsed_version__.pre # type: ignore[assignment]
__all__ = ("ValidationError", "fields", "missing", "validate")
| diff --git a/tests/test_tornadoparser.py b/tests/test_tornadoparser.py
--- a/tests/test_tornadoparser.py
+++ b/tests/test_tornadoparser.py
@@ -8,8 +8,8 @@
import tornado.httpserver
import tornado.httputil
import tornado.ioloop
+import tornado.testing
import tornado.web
-from tornado.testing import AsyncHTTPTestCase
from webargs import fields, missing
from webargs.core import json, parse_json
@@ -20,6 +20,22 @@
use_kwargs,
)
+
+class BaseAsyncTestCase(tornado.testing.AsyncHTTPTestCase):
+ # this isn't a real test case itself
+ __test__ = False
+
+ # Workaround for https://github.com/pytest-dev/pytest/issues/12263.
+ #
+ # this was suggested by one of the pytest maintainers while a patch
+ # for Tornado is pending
+ #
+ # we may need it even after the patch, since we want to support testing on
+ # older Tornado versions until we drop support for them
+ def runTest(self):
+ pass
+
+
name = "name"
value = "value"
@@ -460,7 +476,7 @@ def get(self, id, args):
)
-class TestApp(AsyncHTTPTestCase):
+class TestApp(BaseAsyncTestCase):
def get_app(self):
return echo_app
@@ -528,7 +544,7 @@ def post(self, args):
)
-class TestValidateApp(AsyncHTTPTestCase):
+class TestValidateApp(BaseAsyncTestCase):
def get_app(self):
return validate_app
| Testsuite fails under pytest==8.2.0 with `'AsyncHTTPTestCase' has no attribute 'runTest'`
We currently have some test failures in basically all python versions, starting ~2 days ago. At first glance, it looks like an issue with `tornado`'s `AsyncHTTPTestCase`, but `tornado` doesn't have a recent release.
Looking at what projects updated recently, I flagged `pytest` as a good candidate for investigation, and testing with `pytest=8.1.2` works fine. So something related to unittest TestCases changed in 8.2.0 in a way that breaks tornado tests.
For reference, here's one of the error traces:
```
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'AsyncHTTPTestCase' object has no attribute 'runTest'
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'TestApp' object has no attribute 'runTest'
____________________ ERROR collecting tests/test_tornadoparser.py ____________________
.tox/py311/lib/python3.11/site-packages/tornado/testing.py:180: in __init__
setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName)))
E AttributeError: 'TestValidateApp' object has no attribute 'runTest'
```
`runTest` looks like a dynamically created method in unittest, whose name gets passed on init to TestCase.
I started looking at changes to `pytest`'s unittest module ( https://github.com/pytest-dev/pytest/blame/main/src/_pytest/unittest.py ), but I'm out of time for digging into this right now.
| 2024-05-17T19:11:00 | -1.0 |
|
sql-machine-learning/elasticdl | 742 | sql-machine-learning__elasticdl-742 | [
"723"
] | c062cfe14459883d0fcc908fe175fefdf15b6151 | diff --git a/elasticdl/python/elasticdl/client/client.py b/elasticdl/python/elasticdl/client/client.py
--- a/elasticdl/python/elasticdl/client/client.py
+++ b/elasticdl/python/elasticdl/client/client.py
@@ -236,17 +236,16 @@ def _submit(image_name, model_file, job_name, args, argv):
job_name=job_name,
event_callback=None,
).create_master(
- job_name,
- image_name,
- _m_file_in_docker(model_file),
- args.master_resource_request,
- args.master_resource_limit,
- args.master_pod_priority,
- args.image_pull_policy,
- args.volume_name,
- args.mount_path,
- args.restart_policy,
- container_args,
+ job_name=job_name,
+ image_name=image_name,
+ resource_requests=args.master_resource_request,
+ resource_limits=args.master_resource_limit,
+ pod_priority=args.master_pod_priority,
+ image_pull_policy=args.image_pull_policy,
+ volume_name=args.volume_name,
+ mount_path=args.mount_path,
+ restart_policy=args.restart_policy,
+ args=container_args,
)
diff --git a/elasticdl/python/elasticdl/common/k8s_client.py b/elasticdl/python/elasticdl/common/k8s_client.py
--- a/elasticdl/python/elasticdl/common/k8s_client.py
+++ b/elasticdl/python/elasticdl/common/k8s_client.py
@@ -65,56 +65,43 @@ def get_master_pod_name(self):
def get_worker_pod_name(self, worker_id):
return "elasticdl-%s-worker-%s" % (self._job_name, str(worker_id))
- def _create_pod(
- self,
- pod_name,
- job_name,
- image_name,
- command,
- resource_requests,
- resource_limits,
- container_args,
- pod_priority,
- image_pull_policy,
- restart_policy,
- volume_name,
- mount_path,
- owner_pod,
- env,
- ):
+ def _create_pod(self, **kargs):
# Container
container = client.V1Container(
- name=pod_name,
- image=image_name,
- command=command,
+ name=kargs["pod_name"],
+ image=kargs["image_name"],
+ command=kargs["command"],
resources=client.V1ResourceRequirements(
- requests=resource_requests, limits=resource_limits
+ requests=kargs["resource_requests"],
+ limits=kargs["resource_limits"],
),
- args=container_args,
- image_pull_policy=image_pull_policy,
- env=env,
+ args=kargs["container_args"],
+ image_pull_policy=kargs["image_pull_policy"],
+ env=kargs["env"],
)
# Pod
spec = client.V1PodSpec(
containers=[container],
- restart_policy=restart_policy,
- priority_class_name=pod_priority,
+ restart_policy=kargs["restart_policy"],
+ priority_class_name=kargs["pod_priority"],
)
# Mount data path
- if all([volume_name, mount_path]):
+ if all([kargs["volume_name"], kargs["mount_path"]]):
volume = client.V1Volume(
- name=volume_name,
+ name=kargs["volume_name"],
persistent_volume_claim=pvcVolumeSource(
claim_name="fileserver-claim", read_only=False
),
)
spec.volumes = [volume]
container.volume_mounts = [
- client.V1VolumeMount(name=volume_name, mount_path=mount_path)
+ client.V1VolumeMount(
+ name=kargs["volume_name"], mount_path=kargs["mount_path"]
+ )
]
- elif any([volume_name, mount_path]):
+ elif any([kargs["volume_name"], kargs["mount_path"]]):
raise ValueError(
"Not both of the parameters volume_name and "
"mount_path are provided."
@@ -126,19 +113,22 @@ def _create_pod(
api_version="v1",
block_owner_deletion=True,
kind="Pod",
- name=owner_pod[0].metadata.name,
- uid=owner_pod[0].metadata.uid,
+ name=kargs["owner_pod"][0].metadata.name,
+ uid=kargs["owner_pod"][0].metadata.uid,
)
]
- if owner_pod
+ if kargs["owner_pod"]
else None
)
pod = client.V1Pod(
spec=spec,
metadata=client.V1ObjectMeta(
- name=pod_name,
- labels={"app": "elasticdl", ELASTICDL_JOB_KEY: job_name},
+ name=kargs["pod_name"],
+ labels={
+ "app": "elasticdl",
+ ELASTICDL_JOB_KEY: kargs["job_name"],
+ },
# TODO: Add tests for this once we've done refactoring on
# k8s client code and the constant strings
owner_references=owner_ref,
@@ -147,20 +137,7 @@ def _create_pod(
)
return pod
- def create_master(
- self,
- job_name,
- image_name,
- model_file,
- master_resource_requests,
- master_resource_limits,
- master_pod_priority,
- image_pull_policy,
- volume_name,
- mount_path,
- restart_policy,
- args,
- ):
+ def create_master(self, **kargs):
env = [
V1EnvVar(
name="MY_POD_IP",
@@ -170,38 +147,26 @@ def create_master(
)
]
pod = self._create_pod(
- "elasticdl-" + job_name + "-master",
- job_name,
- image_name,
- ["python"],
- parse_resource(master_resource_requests),
- parse_resource(master_resource_limits),
- args,
- master_pod_priority,
- image_pull_policy,
- restart_policy,
- volume_name,
- mount_path,
- None,
- env,
+ pod_name="elasticdl-" + kargs["job_name"] + "-master",
+ job_name=kargs["job_name"],
+ image_name=kargs["image_name"],
+ command=["python"],
+ resource_requests=parse_resource(kargs["resource_requests"]),
+ resource_limits=parse_resource(kargs["resource_limits"]),
+ container_args=kargs["args"],
+ pod_priority=kargs["pod_priority"],
+ image_pull_policy=kargs["image_pull_policy"],
+ restart_policy=kargs["restart_policy"],
+ volume_name=kargs["volume_name"],
+ mount_path=kargs["mount_path"],
+ owner_pod=None,
+ env=env,
)
resp = self._v1.create_namespaced_pod(self._ns, pod)
self._logger.info("Master launched. status='%s'" % str(resp.status))
- def create_worker(
- self,
- worker_id,
- resource_requests,
- resource_limits,
- priority=None,
- mount_path=None,
- volume_name=None,
- image_pull_policy=None,
- command=None,
- args=None,
- restart_policy="Never",
- ):
- self._logger.info("Creating worker: " + str(worker_id))
+ def create_worker(self, **kargs):
+ self._logger.info("Creating worker: " + str(kargs["worker_id"]))
# Find that master pod that will be used as the owner reference
# for this worker pod.
pods = self._v1.list_namespaced_pod(
@@ -214,20 +179,20 @@ def create_worker(
if (pod.metadata.name == self.get_master_pod_name())
]
pod = self._create_pod(
- self.get_worker_pod_name(worker_id),
- self._job_name,
- self._image,
- command,
- resource_requests,
- resource_limits,
- args,
- priority,
- image_pull_policy,
- restart_policy,
- volume_name,
- mount_path,
- master_pod,
- None,
+ pod_name=self.get_worker_pod_name(kargs["worker_id"]),
+ job_name=self._job_name,
+ image_name=self._image,
+ command=kargs["command"],
+ resource_requests=kargs["resource_requests"],
+ resource_limits=kargs["resource_limits"],
+ container_args=kargs["args"],
+ pod_priority=kargs["priority"],
+ image_pull_policy=kargs["image_pull_policy"],
+ restart_policy=kargs["restart_policy"],
+ volume_name=kargs["volume_name"],
+ mount_path=kargs["mount_path"],
+ owner_pod=master_pod,
+ env=None,
)
return self._v1.create_namespaced_pod(self._ns, pod)
diff --git a/elasticdl/python/elasticdl/master/k8s_worker_manager.py b/elasticdl/python/elasticdl/master/k8s_worker_manager.py
--- a/elasticdl/python/elasticdl/master/k8s_worker_manager.py
+++ b/elasticdl/python/elasticdl/master/k8s_worker_manager.py
@@ -64,13 +64,13 @@ def _start_worker(self, worker_id):
self._logger.info("Starting worker: %d" % worker_id)
with self._lock:
pod = self._k8s_client.create_worker(
- worker_id,
- self._resource_requests,
- self._resource_limits,
- self._pod_priority,
- self._mount_path,
- self._volume_name,
- self._image_pull_policy,
+ worker_id=worker_id,
+ resource_requests=self._resource_requests,
+ resource_limits=self._resource_limits,
+ priority=self._pod_priority,
+ mount_path=self._mount_path,
+ volume_name=self._volume_name,
+ image_pull_policy=self._image_pull_policy,
command=self._command,
args=self._args + ["--worker_id", str(worker_id)],
restart_policy=self._restart_policy,
| diff --git a/elasticdl/python/tests/k8s_client_test.py b/elasticdl/python/tests/k8s_client_test.py
--- a/elasticdl/python/tests/k8s_client_test.py
+++ b/elasticdl/python/tests/k8s_client_test.py
@@ -38,7 +38,16 @@ def test_client(self):
resource = {"cpu": "100m", "memory": "64M"}
for i in range(3):
_ = c.create_worker(
- "worker-%d" % i, resource, resource, command=["echo"]
+ worker_id="worker-%d" % i,
+ resource_requests=resource,
+ resource_limits=resource,
+ command=["echo"],
+ priority=None,
+ args=None,
+ mount_path=None,
+ volume_name=None,
+ image_pull_policy="Never",
+ restart_policy="Never"
)
time.sleep(5)
| Package parameters in functions of pod creation
The signature of pod creation functions like create_master below is too complex and all the parameters should be packaged into one class to make code more clean.
```bash
def create_master(
self,
job_name,
image_name,
model_file,
master_resource_requests,
master_resource_limits,
master_pod_priority,
image_pull_policy,
volume_name,
mount_path,
restart_policy,
args,
):
...
```
| 2019-06-21T09:13:08 | -1.0 |
|
sql-machine-learning/elasticdl | 761 | sql-machine-learning__elasticdl-761 | [
"711"
] | 18381d011e291466c7d2edb50dcbe1a8ad8e89ab | diff --git a/elasticdl/python/elasticdl/common/k8s_utils.py b/elasticdl/python/elasticdl/common/k8s_utils.py
--- a/elasticdl/python/elasticdl/common/k8s_utils.py
+++ b/elasticdl/python/elasticdl/common/k8s_utils.py
@@ -2,6 +2,10 @@
_ALLOWED_RESOURCE_TYPES = ["memory", "disk", "ephemeral-storage", "cpu", "gpu"]
+# Any domain name is (syntactically) valid if it's a dot-separated list of
+# identifiers, each no longer than 63 characters, and made up of letters,
+# digits and dashes (no underscores).
+_GPU_VENDOR_REGEX_STR = r"^[a-zA-Z\d-]{,63}(\.[a-zA-Z\d-]{,63})*/gpu$"
def _is_numeric(n):
@@ -57,7 +61,14 @@ def parse_resource(resource_str):
_valid_mem_spec(v)
elif k == "cpu":
_valid_cpu_spec(v)
- elif k == "gpu":
+ elif "gpu" in k:
+ if k == "gpu":
+ k = "nvidia.com/gpu"
+ elif not re.compile(_GPU_VENDOR_REGEX_STR).match(k):
+ raise ValueError(
+ "gpu resource name does not have a valid vendor name: %s"
+ % k
+ )
_valid_gpu_spec(v)
else:
raise ValueError(
| diff --git a/elasticdl/python/tests/k8s_utils_test.py b/elasticdl/python/tests/k8s_utils_test.py
--- a/elasticdl/python/tests/k8s_utils_test.py
+++ b/elasticdl/python/tests/k8s_utils_test.py
@@ -11,7 +11,7 @@ def test_parse_resource(self):
"cpu": "250m",
"memory": "32Mi",
"disk": "64Mi",
- "gpu": "1",
+ "nvidia.com/gpu": "1",
"ephemeral-storage": "32Mi",
},
parse_resource(
@@ -20,17 +20,32 @@ def test_parse_resource(self):
)
# When cpu is non-numeric, parse_resource works as expected
self.assertEqual(
- {"cpu": "250m", "memory": "32Mi", "disk": "64Mi", "gpu": "1"},
+ {
+ "cpu": "250m",
+ "memory": "32Mi",
+ "disk": "64Mi",
+ "nvidia.com/gpu": "1",
+ },
parse_resource("cpu=250m,memory=32Mi,disk=64Mi,gpu=1"),
)
# When cpu is integer, parse_resource works as expected
self.assertEqual(
- {"cpu": "1", "memory": "32Mi", "disk": "64Mi", "gpu": "1"},
+ {
+ "cpu": "1",
+ "memory": "32Mi",
+ "disk": "64Mi",
+ "nvidia.com/gpu": "1",
+ },
parse_resource("cpu=1,memory=32Mi,disk=64Mi,gpu=1"),
)
# When cpu is float, parse_resource works as expected
self.assertEqual(
- {"cpu": "0.1", "memory": "32Mi", "disk": "64Mi", "gpu": "1"},
+ {
+ "cpu": "0.1",
+ "memory": "32Mi",
+ "disk": "64Mi",
+ "nvidia.com/gpu": "1",
+ },
parse_resource("cpu=0.1,memory=32Mi,disk=64Mi,gpu=1"),
)
# When cpu is non-numeric, raise an error
@@ -54,6 +69,45 @@ def test_parse_resource(self):
parse_resource,
"cpu=2,memory=32Mi,disk=64Mi,gpu=0.1",
)
+ # When gpu resource name has a valid vendor name,
+ # parse_resource works as expected
+ self.assertEqual(
+ {
+ "cpu": "0.1",
+ "memory": "32Mi",
+ "disk": "64Mi",
+ "amd.com/gpu": "1",
+ },
+ parse_resource("cpu=0.1,memory=32Mi,disk=64Mi,amd.com/gpu=1"),
+ )
+ # When gpu resource name does not have a valid vendor name,
+ # raise an error
+ self.assertRaisesRegex(
+ ValueError,
+ "gpu resource name does not have a valid vendor name: blah-gpu",
+ parse_resource,
+ "cpu=2,memory=32Mi,disk=64Mi,blah-gpu=1",
+ )
+ # When gpu resource name does not have a valid vendor name,
+ # raise an error
+ self.assertRaisesRegex(
+ ValueError,
+ "gpu resource name does not have a valid vendor name: @#/gpu",
+ parse_resource,
+ "cpu=2,memory=32Mi,disk=64Mi,@#/gpu=1",
+ )
+ self.assertRaisesRegex(
+ ValueError,
+ "gpu resource name does not have a valid vendor name: a_d.com/gpu",
+ parse_resource,
+ "cpu=2,memory=32Mi,disk=64Mi,a_d.com/gpu=1",
+ )
+ self.assertRaisesRegex(
+ ValueError,
+ "gpu resource name does not have a valid vendor name: *",
+ parse_resource,
+ "cpu=2,memory=32Mi,disk=64Mi," + "a" * 64 + "/gpu=1",
+ )
# When memory does not contain expected regex, raise an error
self.assertRaisesRegex(
ValueError,
| Provide default gpu resource name and validation
Currently users can only pass "gpu" as part of the resource name. However, k8s requires it to be either `"nvidia.com/gpu"` or `"amd.com/gpu"` if AMD plugin is enabled. There are other different vendors as well but a pattern to use for validation would be `"<vendor>.com/gpu"`.
We should consider adding `"nvidia.com/gpu"` as the default and validate for user provided gpu resource name based on the pattern `"<vendor>.com/gpu"`.
Reference: https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/
| 2019-06-24T14:12:03 | -1.0 |
|
aio-libs/aiohttp | 2,775 | aio-libs__aiohttp-2775 | [
"2773"
] | 5d5e9fd78034cabddc6b392fb454367f5d91f4bf | diff --git a/aiohttp/web_protocol.py b/aiohttp/web_protocol.py
--- a/aiohttp/web_protocol.py
+++ b/aiohttp/web_protocol.py
@@ -250,8 +250,11 @@ def data_received(self, data):
self._request_count += 1
self._messages.append((msg, payload))
- if self._waiter is not None:
- self._waiter.set_result(None)
+ waiter = self._waiter
+ if waiter is not None:
+ if not waiter.done():
+ # don't set result twice
+ waiter.set_result(None)
self._upgraded = upgraded
if upgraded and tail:
| diff --git a/tests/test_web_protocol.py b/tests/test_web_protocol.py
--- a/tests/test_web_protocol.py
+++ b/tests/test_web_protocol.py
@@ -797,3 +797,23 @@ def test__process_keepalive_force_close(loop, srv):
with mock.patch.object(loop, "call_at") as call_at_patched:
srv._process_keepalive()
assert not call_at_patched.called
+
+
+def test_two_data_received_without_waking_up_start_task(srv, loop):
+ # make a chance to srv.start() method start waiting for srv._waiter
+ loop.run_until_complete(asyncio.sleep(0.01))
+ assert srv._waiter is not None
+
+ srv.data_received(
+ b'GET / HTTP/1.1\r\n'
+ b'Host: ex.com\r\n'
+ b'Content-Length: 1\r\n\r\n'
+ b'a')
+ srv.data_received(
+ b'GET / HTTP/1.1\r\n'
+ b'Host: ex.com\r\n'
+ b'Content-Length: 1\r\n\r\n'
+ b'b')
+
+ assert len(srv._messages) == 2
+ assert srv._waiter.done()
| "InvalidStateError: invalid state" when data is chunked
I am use simple aiohttp (3.0.2) server for Telegram bot:
```
API_TOKEN = 'XXXXxxXXXXxxxxxx-XXXXXXX_xxxxXXXXXx'
bot = telebot.TeleBot(API_TOKEN)
app = web.Application()
async def handle(request):
if request.match_info.get('token') == bot.token:
request_body_dict = await request.json()
update = telebot.types.Update.de_json(request_body_dict)
bot.process_new_updates([update])
return web.Response()
else:
return web.Response(status=403)
app.router.add_post('/{token}/', handle)
bot.remove_webhook()
bot.set_webhook(url=WEBHOOK_URL_BASE+WEBHOOK_URL_PATH,
certificate=open(WEBHOOK_SSL_CERT, 'r'))
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
web.run_app(
app,
host=WEBHOOK_LISTEN,
port=WEBHOOK_PORT,
ssl_context=context)
```
When message send from Telegram server I receive this error:
> ERROR:asyncio:Exception in callback _SelectorSocketTransport._read_ready()
> handle: <Handle _SelectorSocketTransport._read_ready()>
> Traceback (most recent call last):
> File "/usr/lib64/python3.6/asyncio/events.py", line 127, in _run
> self._callback(*self._args)
> File "/usr/lib64/python3.6/asyncio/selector_events.py", line 731, in _read_ready
> self._protocol.data_received(data)
> File "/usr/lib64/python3.6/asyncio/sslproto.py", line 516, in data_received
> self._app_protocol.data_received(chunk)
> File "/home/telegram/bwb_env/lib64/python3.6/site-packages/aiohttp/web_protocol.py", line 252, in data_received
> self._waiter.set_result(None)
> asyncio.base_futures.InvalidStateError: invalid state
I get the response completely, but further after a number of responses there are big delays and denial of service
When I use curl for make test request I did not receive this error
`curl -X POST -H 'Content-Type: application/json' -H 'Connection: keep-alive' -H 'User-Agent:' -H 'Accept-Encoding: gzip, deflate' -H 'Accept:' -H 'Host: 111.111.111.111' -k -d '{"update_id":285112997,
"message":{"message_id":802,"from":{"id":111111111,"is_bot":false,"first_name":"X","last_name":"X","username":"XXXXXXXXXXX","language_code":"uk-UA"},"chat":{"id":111111111,"first_name":"X","last_name":"X","username":"XXXXXXXXXXX","type":"private"},"date":1519540489,"text":"\u0440 \u0440\u0440\u0440\u0440\u0440\u0440\u0440\u0440\u0440"}}' https://111.111.111.111:8443/222222222:XXXXxxXXXXxxxxxx-XXXXXXX_xxxxXXXXXx/`
I am writing server for example
```import ssl
from config import *
import asyncio
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(WEBHOOK_SSL_CERT, WEBHOOK_SSL_PRIV)
class Serv(asyncio.Protocol):
def connection_made(self, transport):
self.transport = transport
self.peername = transport.get_extra_info("peername")
print("connection_made: {}".format(self.peername))
def data_received(self, data):
txid = data.decode()
print("data_received: {}".format(txid))
def eof_received(self):
print('eof_received')
def connection_lost(self, ex):
print("connection_lost: {}".format(self.peername))
loop = asyncio.get_event_loop()
coro = loop.create_server(Serv, host='0.0.0.0', port=8443, ssl=context)
asyncio.ensure_future(coro)
loop.run_forever()
```
This request from curl:
connection_made: ('121.121.121.121', 60939)
data_received: POST /222222222:XXXXxxXXXXxxxxxx-XXXXXXX_xxxxXXXXXx/ HTTP/1.1
Host: 111.111.111.111
Content-Type: application/json
Connection: keep-alive
Accept-Encoding: gzip, deflate
Content-Length: 362
{"update_id":285112997,
"message":{"message_id":802,"from":{"id":111111111,"is_bot":false,"first_name":"X","last_name":"X","username":"XXXXXXXXXXX","language_code":"uk-UA"},"chat":{"id":111111111,"first_name":"X","last_name":"X","username":"XXXXXXXXXXX","type":"private"},"date":1519540489,"text":"\u0440 \u0440\u0440\u0440\u0440\u0440\u0440\u0440\u0440\u0440"}}
connection_lost: ('121.121.121.121', 60939)
This from Telegram server:
connection_made: ('149.154.167.231', 33538)
data_received: POST /222222222:XXXXxxXXXXxxxxxx-XXXXXXX_xxxxXXXXXx/ HTTP/1.1
Host: 111.111.111.111
Content-Type: application/json
Content-Length: 362
Connection: keep-alive
Accept-Encoding: gzip, deflate
data_received: {"update_id":285113004,
"message":{"message_id":822,"from":{"id":111111111,"is_bot":false,"first_name":"X","last_name":"X","username":"XXXXXXXXXXX","language_code":"uk-UA"},"chat":{"id":111111111,"first_name":"X","last_name":"X","username":"XXXXXXXXXXX","type":"private"},"date":1519544321,"text":"\u0440 \u0440\u0440\u0440\u0440\u0440\u0440\u0440\u0440\u0440"}}
As see from Telegram response, have two calls data_received
In aiohttp==2.0.7 does not have this error.
| Thanks for report
I think we should add a check for `self._waiter.done()` before `self._waiter.set_result()` call | 2018-02-27T13:07:06 | -1.0 |
aio-libs/aiohttp | 2,794 | aio-libs__aiohttp-2794 | [
"2789"
] | ca22766c79f820a97c19c7be78393b89a61b51cd | diff --git a/demos/chat/aiohttpdemo_chat/views.py b/demos/chat/aiohttpdemo_chat/views.py
--- a/demos/chat/aiohttpdemo_chat/views.py
+++ b/demos/chat/aiohttpdemo_chat/views.py
@@ -12,8 +12,8 @@
async def index(request):
resp = web.WebSocketResponse()
- ok, protocol = resp.can_start(request)
- if not ok:
+ is_ws = resp.can_prepare(request)
+ if not is_ws:
return aiohttp_jinja2.render_template('index.html', request, {})
await resp.prepare(request)
| diff --git a/tests/autobahn/server.py b/tests/autobahn/server.py
--- a/tests/autobahn/server.py
+++ b/tests/autobahn/server.py
@@ -8,8 +8,8 @@
async def wshandler(request):
ws = web.WebSocketResponse(autoclose=False)
- ok, protocol = ws.can_start(request)
- if not ok:
+ is_ws = ws.can_prepare(request)
+ if not is_ws:
return web.HTTPBadRequest()
await ws.prepare(request)
| Chat demo not working in aiohttp 3
## Long story short
Want to learn websockets with example code. There's a lack of simple and runnable example code on the net demonstrating websockets in python.
## Expected behaviour
Server runs and page loads
## Actual behaviour
Server runs but page fails to load with following error:
```
~/work/aiohttp/demos/chat$ ~/venv3/bin/python aiohttpdemo_chat/main.py
DEBUG:asyncio:Using selector: EpollSelector
======== Running on http://0.0.0.0:8080 ========
(Press CTRL+C to quit)
ERROR:aiohttp.server:Error handling request
Traceback (most recent call last):
File "/home/robin/venv3/lib/python3.6/site-packages/aiohttp/web_protocol.py", line 381, in start
resp = await self._request_handler(request)
File "/home/robin/venv3/lib/python3.6/site-packages/aiohttp/web_app.py", line 310, in _handle
resp = await handler(request)
File "/home/robin/venv3/lib/python3.6/site-packages/aiohttpdemo_chat-0.0.1-py3.6.egg/aiohttpdemo_chat/views.py", line 15, in index
ok, protocol = resp.can_start(request)
AttributeError: 'WebSocketResponse' object has no attribute 'can_start'
```
## Steps to reproduce
Just run `aiohttpdemo_chat/main.py`
## Your environment
archlinux, aiohttp 3.0.5 server?
| The poll demo also does not work
This should probably be on https://github.com/aio-libs/aiohttp-demos.
The proper solution here is to add unit tests to that app and remember to run them (as we already do for other related projects) before major deploys.
https://github.com/aio-libs/aiohttp-demos gives the same error | 2018-03-04T19:24:35 | -1.0 |
aio-libs/aiohttp | 2,812 | aio-libs__aiohttp-2812 | [
"2805"
] | 6c31dc9182f7abe587fdd72caee06fdd58b4feec | diff --git a/aiohttp/web_fileresponse.py b/aiohttp/web_fileresponse.py
--- a/aiohttp/web_fileresponse.py
+++ b/aiohttp/web_fileresponse.py
@@ -193,9 +193,9 @@ async def prepare(self, request):
# If a range request has been made, convert start, end slice notation
# into file pointer offset and count
if start is not None or end is not None:
- if start is None and end < 0: # return tail of file
- start = file_size + end
- count = -end
+ if start < 0 and end is None: # return tail of file
+ start = file_size + start
+ count = file_size - start
else:
count = (end or file_size) - start
diff --git a/aiohttp/web_request.py b/aiohttp/web_request.py
--- a/aiohttp/web_request.py
+++ b/aiohttp/web_request.py
@@ -439,7 +439,8 @@ def http_range(self, *, _RANGE=hdrs.RANGE):
if start is None and end is not None:
# end with no start is to return tail of content
- end = -end
+ start = -end
+ end = None
if start is not None and end is not None:
# end is inclusive in range header, exclusive for slice
@@ -450,6 +451,7 @@ def http_range(self, *, _RANGE=hdrs.RANGE):
if start is end is None: # No valid range supplied
raise ValueError('No start or end of range specified')
+
return slice(start, end, 1)
@property
| diff --git a/tests/test_web_request.py b/tests/test_web_request.py
--- a/tests/test_web_request.py
+++ b/tests/test_web_request.py
@@ -110,6 +110,58 @@ def test_content_length():
assert 123 == req.content_length
+def test_range_to_slice_head():
+ def bytes_gen(size):
+ for i in range(size):
+ yield i % 256
+ payload = bytearray(bytes_gen(10000))
+ req = make_mocked_request(
+ 'GET', '/',
+ headers=CIMultiDict([('RANGE', 'bytes=0-499')]),
+ payload=payload)
+ assert isinstance(req.http_range, slice)
+ assert req.content[req.http_range] == payload[:500]
+
+
+def test_range_to_slice_mid():
+ def bytes_gen(size):
+ for i in range(size):
+ yield i % 256
+ payload = bytearray(bytes_gen(10000))
+ req = make_mocked_request(
+ 'GET', '/',
+ headers=CIMultiDict([('RANGE', 'bytes=500-999')]),
+ payload=payload)
+ assert isinstance(req.http_range, slice)
+ assert req.content[req.http_range] == payload[500:1000]
+
+
+def test_range_to_slice_tail_start():
+ def bytes_gen(size):
+ for i in range(size):
+ yield i % 256
+ payload = bytearray(bytes_gen(10000))
+ req = make_mocked_request(
+ 'GET', '/',
+ headers=CIMultiDict([('RANGE', 'bytes=9500-')]),
+ payload=payload)
+ assert isinstance(req.http_range, slice)
+ assert req.content[req.http_range] == payload[-500:]
+
+
+def test_range_to_slice_tail_stop():
+ def bytes_gen(size):
+ for i in range(size):
+ yield i % 256
+ payload = bytearray(bytes_gen(10000))
+ req = make_mocked_request(
+ 'GET', '/',
+ headers=CIMultiDict([('RANGE', 'bytes=-500')]),
+ payload=payload)
+ assert isinstance(req.http_range, slice)
+ assert req.content[req.http_range] == payload[-500:]
+
+
def test_non_keepalive_on_http10():
req = make_mocked_request('GET', '/', version=HttpVersion(1, 0))
assert not req.keep_alive
| BaseRequest.http_range returns the wrong slice when "start" is missing
## Long story short
<!-- Please describe your problem and why the fix is important. -->
When streaming range based requests from file, the method [http_range](
https://docs.aiohttp.org/en/stable/web_reference.html#aiohttp.web.BaseRequest.http_range) comes quite handy, unfortunately negative indexes don't work as expected.
The offending line 442 is in [web_request.py](https://github.com/aio-libs/aiohttp/blame/20928e1d6ba4887b9137eacd8ad7dda2dbbb09e6/aiohttp/web_request.py#L442) it should be "start = -end" instead of "end = -end"
Other valid ranges are described in https://tools.ietf.org/html/rfc7233#section-2.1
I don't know if it's allowed to just return a 416 on the more exotic ones though:)
## Expected behaviour
[Request Header]
Range: bytes=-5678
[BaseRequest.http_range]
slice(start=-5678, stop=None, step=1)
<!-- What is the behaviour you expect? -->
## Actual behaviour
[Request Header]
Range: bytes=-5678
[BaseRequest.http_range]
slice(start=None, stop=-5678, step=1)
<!-- What's actually happening? -->
## Steps to reproduce
<!-- Please describe steps to reproduce the issue.
If you have a script that does that please include it here within
markdown code markup -->
run an aiohttp server and request a resource using the range header.
(do I have to post the whole setup, code included?)
## Your environment
<!-- Describe the environment you have that lead to your issue.
This includes aiohttp version, OS, proxy server and other bits that
are related to your case.
IMPORTANT: aiohttp is both server framework and client library.
For getting rid of confusing please put 'server', 'client' or 'both'
word here.
-->
Running aiohttp 1.2.0 on debian 9 (stretch) ... I know, I know :)
| Would you make a pull request?
While what @Fahrradkette reported would be a nice and clean pull request to fix (and if he doesn't have the time/need I can take a stab at that) the problem is deeper than what is described in this ticket.
According to the RFC https://tools.ietf.org/html/rfc7233 section 2.1 the following are all valid range requests (when concerning bytes):
bytes=0-499
bytes=500-999
bytes=-500
bytes=9500-
The above can all be easily supported and are as you expect. However the following are also valid:
bytes=500-600,601-999
bytes=500-700,601-999
Where the response would be a multipart with those byte ranges. However currently in the above mentioned function only a single slice can be returned :)
What are your ideas on supporting the entire Range grammar which would likely be much more complex versus just fixing the first part of it.
My opinion is to either support all of Range or none.
I doubt if multi-range requests are wide spread. Lets keep the single range for simplicity.
Multiranges require a new property anyway, we cannot do it with existing property without breaking backward compatibility.
Raising an exception in case of multirange is pretty fibe.
In that case if @Fahrradkette wants to create a pull request they can, otherwise I can. | 2018-03-07T19:17:54 | -1.0 |
aio-libs/aiohttp | 2,819 | aio-libs__aiohttp-2819 | [
"2206"
] | 7f7b12af3b684396738e52762d64b7eb58e7930c | diff --git a/aiohttp/client_reqrep.py b/aiohttp/client_reqrep.py
--- a/aiohttp/client_reqrep.py
+++ b/aiohttp/client_reqrep.py
@@ -2,6 +2,7 @@
import codecs
import io
import json
+import re
import sys
import traceback
import warnings
@@ -39,6 +40,9 @@
__all__ = ('ClientRequest', 'ClientResponse', 'RequestInfo', 'Fingerprint')
+json_re = re.compile('^application/(?:[\w.+-]+?\+)?json')
+
+
@attr.s(frozen=True, slots=True)
class ContentDisposition:
type = attr.ib(type=str)
@@ -131,6 +135,12 @@ def _merge_ssl_params(ssl, verify_ssl, ssl_context, fingerprint):
ConnectionKey = namedtuple('ConnectionKey', ['host', 'port', 'ssl'])
+def _is_expected_content_type(response_content_type, expected_content_type):
+ if expected_content_type == 'application/json':
+ return json_re.match(response_content_type)
+ return expected_content_type in response_content_type
+
+
class ClientRequest:
GET_METHODS = {
hdrs.METH_GET,
@@ -859,7 +869,7 @@ async def json(self, *, encoding=None, loads=json.loads,
if content_type:
ctype = self.headers.get(hdrs.CONTENT_TYPE, '').lower()
- if content_type not in ctype:
+ if not _is_expected_content_type(ctype, content_type):
raise ContentTypeError(
self.request_info,
self.history,
| diff --git a/tests/test_client_response.py b/tests/test_client_response.py
--- a/tests/test_client_response.py
+++ b/tests/test_client_response.py
@@ -345,6 +345,45 @@ def side_effect(*args, **kwargs):
assert response._connection is None
+async def test_json_extended_content_type(loop, session):
+ response = ClientResponse('get', URL('http://def-cl-resp.org'))
+ response._post_init(loop, session)
+
+ def side_effect(*args, **kwargs):
+ fut = loop.create_future()
+ fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
+ return fut
+
+ response.headers = {
+ 'Content-Type':
+ 'application/this.is-1_content+subtype+json;charset=cp1251'}
+ content = response.content = mock.Mock()
+ content.read.side_effect = side_effect
+
+ res = await response.json()
+ assert res == {'тест': 'пройден'}
+ assert response._connection is None
+
+
+async def test_json_custom_content_type(loop, session):
+ response = ClientResponse('get', URL('http://def-cl-resp.org'))
+ response._post_init(loop, session)
+
+ def side_effect(*args, **kwargs):
+ fut = loop.create_future()
+ fut.set_result('{"тест": "пройден"}'.encode('cp1251'))
+ return fut
+
+ response.headers = {
+ 'Content-Type': 'custom/type;charset=cp1251'}
+ content = response.content = mock.Mock()
+ content.read.side_effect = side_effect
+
+ res = await response.json(content_type='custom/type')
+ assert res == {'тест': 'пройден'}
+ assert response._connection is None
+
+
async def test_json_custom_loader(loop, session):
response = ClientResponse('get', URL('http://def-cl-resp.org'))
response._post_init(loop, session)
| json request for certain media types fail
## Long story short
The `resp.json()` call reports a bug if the return type is something like `application/XXX+json` (as opposed to `application/json`). However, `application/XXX+json` is a perfectly fine media type, "extending" the _semantics_ (not the syntax) of the core json syntax, ie, should be parsed as bona fide json...
## Your environment
<!-- Describe the environment you have that lead to your issue.
This includes project version, OS, proxy server and other bits that
are related to your case. -->
OS X 10.11.6, Python 3.6.2
| You can pass a custom content_type parameter to the ClientResponse.json method, although I agree that this isn't perhaps the most sensible API. I just switched some code from using Requests to using this library and while Requests could decode the JSON without a problem, aiohttp reported an error which will trip some people up.
Related to #2174
Extending the list of accepted json content types is fair easy change.
Please propose a Pull Request. `application/XXX+json` is not acceptable but I pretty sure you could grab a list of wide spread json types from `requests` library.
use `content_type=None` for disabling the check
@asvetlov, unfortunately I missed the [reply on Sep 11](https://github.com/aio-libs/aiohttp/issues/2206#issuecomment-328630051); probably my fault.
However, I am not sure I agree with your statement that "`application/XXX+json` is not acceptable". In my view, it is perfectly acceptable (and future proof) if the code simply ignores the `XXX+` part, whatever `XXX` is. Looking at [RFC 6839](https://tools.ietf.org/html/rfc6839#section-2):
> At the same time, using the suffix allows receivers of the media types to do generic processing of the underlying representation in cases where
>
>* they do not need to perform special handling of the particular semantics of the exact media type, and
>
>* there is no special knowledge needed by such a generic processor in order to parse that underlying representation other than what would be needed to parse any example of that underlying representation.
>
I.e., there is no reason for aiohttp to maintain whatever specific list. This is valid for any conversion step, not only for JSON.
---
Yes, I agree that [using `content_type=None` can be done](https://github.com/aio-libs/aiohttp/issues/2206#issuecomment-366452898), but that is clearly just a hack...
What to do?
On one hand we want to prevent processing non-JSON data in `.json()` method.
On other we don't want to be too strict.
Maybe you can pass a custom content type when fetching a data from your server?
Or create a custom json reading function on top of `resp.read()`?
@asvetlov,
The fundamental fact is that data coming with `XXX+json` **is** JSON. From the point of view of aiohttp that all that matters. A properly registered `XXX+json` media type means that the content _is_ JSON but it may have some additional syntactic or structural constraint _within the framework of JSON_. This may mean imposing a particular JSON Schema, or some more complicated specification on top of what JSON means, or may be no extra syntactic restriction whatsoever.
To take a specific example. `application/ld+json` is a bona fide media type defined through the structural syntax; the content is defined via the [JSON-LD Recommendation](https://www.w3.org/TR/json-ld/). That specification adds some additional semantics to individual terms, requires some specific structure to the JSON data, but the essential point is that a JSON-LD content is a perfectly fine JSON content. (This is probably one of the most complex examples. Others may be a few paragraphs only.)
In my view, this means that the handling of the `XXX+json` (or `XXX+xml`, etc.) is simple, as far as aiohttp goes: strip the `XXX+` string altogether (the `+` character is not allowed elsewhere in a media type name), and handle the content as `json` (or `xml`). You are done. All other processing is in the application domain.
(You will find an abundance of such media types on the [media type registry](https://www.iana.org/assignments/media-types/media-types.xhtml).)
You would say that any `application/xxx+json` is a valid JSON type.
Or parsing all JSON registered types from the media registry is better?
I'd say both statements are true.
By sending `application/xxx+json` client explicitly tells server to expect valid json payload and vice versa
@asvetlov : parsing the json types from the media registry is of course nicer and better but... it is not that obvious. Would aiohttp check that registry any time the "read" function is called? Would set up some sort of a local cache for the registry that has a finite lifetime? The registry is a changing target after all...
I believe just saying that "any `application/xxx+json` is a valid JSON type" is good enough.
Reopen the issue.
@iherman would you provide a Pull Request with the first option (accepting `application/xxx+json`)?
@asvetlov : I have no idea when I could do that; I have never looked into the internal of the aiohttp code, ie, it would take me quite a while to familiarize myself with it to produce a proper PR. I can try at some point unless somebody beats me into it, but it may take a while:-(
@asvetlov, I have looked at the code and I _think_ I have found the few changes that are required. However, I simply will not have the time to make a proper pull request, ie, running all the necessary tests on the code overall. It would require a major setup on my machine, understanding how the test harness works, etc. Sorry, but that will not work.
I will look at the code again tomorrow just to be sure, and I could then forward you (tomorrow or on Wednesday) the file where I have made some changes: `client_reqrep.py`. Would that work? | 2018-03-09T16:53:33 | -1.0 |
aio-libs/aiohttp | 2,824 | aio-libs__aiohttp-2824 | [
"2795"
] | 9108698af4890439cc3831e2c3a4dd2dfc4d27af | diff --git a/aiohttp/web_urldispatcher.py b/aiohttp/web_urldispatcher.py
--- a/aiohttp/web_urldispatcher.py
+++ b/aiohttp/web_urldispatcher.py
@@ -29,16 +29,23 @@
__all__ = ('UrlDispatcher', 'UrlMappingMatchInfo',
'AbstractResource', 'Resource', 'PlainResource', 'DynamicResource',
'AbstractRoute', 'ResourceRoute',
- 'StaticResource', 'View', 'RouteDef', 'RouteTableDef',
- 'head', 'get', 'post', 'patch', 'put', 'delete', 'route', 'view')
+ 'StaticResource', 'View', 'RouteDef', 'StaticDef', 'RouteTableDef',
+ 'head', 'get', 'post', 'patch', 'put', 'delete', 'route', 'view',
+ 'static')
HTTP_METHOD_RE = re.compile(r"^[0-9A-Za-z!#\$%&'\*\+\-\.\^_`\|~]+$")
ROUTE_RE = re.compile(r'(\{[_a-zA-Z][^{}]*(?:\{[^{}]*\}[^{}]*)*\})')
PATH_SEP = re.escape('/')
+class AbstractRouteDef(abc.ABC):
+ @abc.abstractmethod
+ def register(self, router):
+ pass # pragma: no cover
+
+
@attr.s(frozen=True, repr=False, slots=True)
-class RouteDef:
+class RouteDef(AbstractRouteDef):
method = attr.ib(type=str)
path = attr.ib(type=str)
handler = attr.ib()
@@ -61,6 +68,24 @@ def register(self, router):
**self.kwargs)
+@attr.s(frozen=True, repr=False, slots=True)
+class StaticDef(AbstractRouteDef):
+ prefix = attr.ib(type=str)
+ path = attr.ib(type=str)
+ kwargs = attr.ib()
+
+ def __repr__(self):
+ info = []
+ for name, value in sorted(self.kwargs.items()):
+ info.append(", {}={!r}".format(name, value))
+ return ("<StaticDef {prefix} -> {path}"
+ "{info}>".format(prefix=self.prefix, path=self.path,
+ info=''.join(info)))
+
+ def register(self, router):
+ router.add_static(self.prefix, self.path, **self.kwargs)
+
+
class AbstractResource(Sized, Iterable):
def __init__(self, *, name=None):
@@ -995,6 +1020,10 @@ def view(path, handler, **kwargs):
return route(hdrs.METH_ANY, path, handler, **kwargs)
+def static(prefix, path, **kwargs):
+ return StaticDef(prefix, path, kwargs)
+
+
class RouteTableDef(Sequence):
"""Route definition table"""
def __init__(self):
@@ -1041,3 +1070,6 @@ def delete(self, path, **kwargs):
def view(self, path, **kwargs):
return self.route(hdrs.METH_ANY, path, **kwargs)
+
+ def static(self, prefix, path, **kwargs):
+ self._items.append(StaticDef(prefix, path, kwargs))
| diff --git a/tests/test_route_def.py b/tests/test_route_def.py
--- a/tests/test_route_def.py
+++ b/tests/test_route_def.py
@@ -1,4 +1,7 @@
+import pathlib
+
import pytest
+from yarl import URL
from aiohttp import web
from aiohttp.web_urldispatcher import UrlDispatcher
@@ -103,6 +106,19 @@ async def handler(request):
assert str(route.url_for()) == '/'
+def test_static(router):
+ folder = pathlib.Path(__file__).parent
+ router.add_routes([web.static('/prefix', folder)])
+ assert len(router.resources()) == 1 # 2 routes: for HEAD and GET
+
+ resource = list(router.resources())[0]
+ info = resource.get_info()
+ assert info['prefix'] == '/prefix'
+ assert info['directory'] == folder
+ url = resource.url_for(filename='sample.key')
+ assert url == URL('/prefix/sample.key')
+
+
def test_head_deco(router):
routes = web.RouteTableDef()
@@ -256,6 +272,15 @@ async def handler(request):
assert repr(rd) == "<RouteDef GET /path -> 'handler', extra='info'>"
+def test_repr_static_def():
+ routes = web.RouteTableDef()
+
+ routes.static('/prefix', '/path', name='name')
+
+ rd = routes[0]
+ assert repr(rd) == "<StaticDef /prefix -> /path, name='name'>"
+
+
def test_repr_route_table_def():
routes = web.RouteTableDef()
| Add web.static and web.RouteTableDef.static
As aliases for UrlDispatcher.add_static.
The idea is adding static file resources in declarative way: `app.add_routes([web.static(...)])` and `table.static(...)`.
Part of #2766
| 2018-03-12T14:41:07 | -1.0 |
|
aio-libs/aiohttp | 2,828 | aio-libs__aiohttp-2828 | [
"2827"
] | 9108698af4890439cc3831e2c3a4dd2dfc4d27af | diff --git a/aiohttp/client.py b/aiohttp/client.py
--- a/aiohttp/client.py
+++ b/aiohttp/client.py
@@ -326,11 +326,14 @@ async def _request(self, method, url, *,
tcp_nodelay(conn.transport, True)
tcp_cork(conn.transport, False)
try:
- resp = await req.send(conn)
try:
- await resp.start(conn, read_until_eof)
+ resp = await req.send(conn)
+ try:
+ await resp.start(conn, read_until_eof)
+ except BaseException:
+ resp.close()
+ raise
except BaseException:
- resp.close()
conn.close()
raise
except ClientError:
| diff --git a/tests/test_client_session.py b/tests/test_client_session.py
--- a/tests/test_client_session.py
+++ b/tests/test_client_session.py
@@ -371,6 +371,7 @@ async def create_connection(req, traces=None):
# return self.transport, self.protocol
return mock.Mock()
session._connector._create_connection = create_connection
+ session._connector._release = mock.Mock()
with pytest.raises(aiohttp.ClientOSError) as ctx:
await session.request('get', 'http://example.com')
@@ -379,6 +380,43 @@ async def create_connection(req, traces=None):
assert e.strerror == err.strerror
+async def test_close_conn_on_error(create_session):
+ class UnexpectedException(BaseException):
+ pass
+
+ err = UnexpectedException("permission error")
+ req = mock.Mock()
+ req_factory = mock.Mock(return_value=req)
+ req.send = mock.Mock(side_effect=err)
+ session = create_session(request_class=req_factory)
+
+ connections = []
+ original_connect = session._connector.connect
+
+ async def connect(req, traces=None):
+ conn = await original_connect(req, traces=traces)
+ connections.append(conn)
+ return conn
+
+ async def create_connection(req, traces=None):
+ # return self.transport, self.protocol
+ conn = mock.Mock()
+ return conn
+
+ session._connector.connect = connect
+ session._connector._create_connection = create_connection
+ session._connector._release = mock.Mock()
+
+ with pytest.raises(UnexpectedException):
+ async with session.request('get', 'http://example.com') as resp:
+ await resp.text()
+
+ # normally called during garbage collection. triggers an exception
+ # if the connection wasn't already closed
+ for c in connections:
+ c.__del__()
+
+
async def test_cookie_jar_usage(loop, aiohttp_client):
req_url = None
| Exception handling doesn't close connection
## Long story short
With certain malformed urls, the connection is not properly closed. This causes an "Unclosed connection".
## Expected behaviour
Connection cleaned up when there is an exception writing headers.
## Actual behaviour
Connection not properly cleaned up.
## Steps to reproduce
Here are two examples, one mocked and one using a live site. Both should produce an "Unclosed connection" error.
requires pytest and [aresponses](https://github.com/circleup/aresponses)
```python
@pytest.mark.asyncio
async def test_invalid_location(aresponses):
# real example: 'http://www.skinhealthcanada.com/index.cfm?pagepath=Brands&id=66172'
url = 'http://badwebsite.circleup.com/'
bad_url = 'http://badwebsite.circleup.com/in\udcaedex.html'
aresponses.add('badwebsite.circleup.com', aresponses.ANY, 'GET', aresponses.Response(status=302, headers={'Location': bad_url}))
aresponses.add('badwebsite.circleup.com', aresponses.ANY, 'GET', aresponses.Response())
async with aiohttp.ClientSession() as session:
try:
response = await session.get(bad_url)
except Exception as e:
print(repr(e))
async with aiohttp.ClientSession() as session:
response = await session.get(bad_url)
@pytest.mark.asyncio
async def test_invalid_location_live():
url = 'http://www.skinhealthcanada.com/index.cfm?pagepath=Brands&id=66172'
bad_url = 'http://www.skinhealthcanada.com/index.cfm?pagepath=Brands/Environ\udcae&id=66220'
async with aiohttp.ClientSession() as session:
try:
response = await session.get(bad_url)
except Exception as e:
print(repr(e))
async with aiohttp.ClientSession() as session:
response = await session.get(bad_url)
```
## Your environment
client
Python 3.6.4
Ubuntu 17.10
virtualenv
aiohttp==3.0.6
## possible solution
change aiohttp/client.py lines 328-343 to
```python
try:
try:
resp = req.send(conn)
except BaseException:
conn.close()
raise
try:
await resp.start(conn, read_until_eof)
except BaseException:
resp.close()
conn.close()
raise
except ClientError:
raise
except OSError as exc:
raise ClientOSError(*exc.args) from exc
```
| 2018-03-13T04:26:55 | -1.0 |
|
aio-libs/aiohttp | 2,858 | aio-libs__aiohttp-2858 | [
"2841"
] | 4f8ea944bb049bc9af02a17e91716154db88afea | diff --git a/aiohttp/connector.py b/aiohttp/connector.py
--- a/aiohttp/connector.py
+++ b/aiohttp/connector.py
@@ -728,12 +728,12 @@ async def _create_connection(self, req, traces=None):
if req.proxy:
_, proto = await self._create_proxy_connection(
req,
- traces=None
+ traces=traces
)
else:
_, proto = await self._create_direct_connection(
req,
- traces=None
+ traces=traces
)
return proto
| diff --git a/tests/test_web_functional.py b/tests/test_web_functional.py
--- a/tests/test_web_functional.py
+++ b/tests/test_web_functional.py
@@ -2,6 +2,7 @@
import io
import json
import pathlib
+import socket
import zlib
from unittest import mock
@@ -1638,10 +1639,14 @@ async def handler(request):
assert resp.status == 200
-async def test_request_tracing(aiohttp_client):
+async def test_request_tracing(aiohttp_server):
on_request_start = mock.Mock(side_effect=asyncio.coroutine(mock.Mock()))
on_request_end = mock.Mock(side_effect=asyncio.coroutine(mock.Mock()))
+ on_dns_resolvehost_start = mock.Mock(
+ side_effect=asyncio.coroutine(mock.Mock()))
+ on_dns_resolvehost_end = mock.Mock(
+ side_effect=asyncio.coroutine(mock.Mock()))
on_request_redirect = mock.Mock(side_effect=asyncio.coroutine(mock.Mock()))
on_connection_create_start = mock.Mock(
side_effect=asyncio.coroutine(mock.Mock()))
@@ -1663,20 +1668,50 @@ async def redirected(request):
on_connection_create_start)
trace_config.on_connection_create_end.append(
on_connection_create_end)
+ trace_config.on_dns_resolvehost_start.append(
+ on_dns_resolvehost_start)
+ trace_config.on_dns_resolvehost_end.append(
+ on_dns_resolvehost_end)
app = web.Application()
app.router.add_get('/redirector', redirector)
app.router.add_get('/redirected', redirected)
+ server = await aiohttp_server(app)
+
+ class FakeResolver:
+ _LOCAL_HOST = {0: '127.0.0.1',
+ socket.AF_INET: '127.0.0.1'}
+
+ def __init__(self, fakes):
+ """fakes -- dns -> port dict"""
+ self._fakes = fakes
+ self._resolver = aiohttp.DefaultResolver()
+
+ async def resolve(self, host, port=0, family=socket.AF_INET):
+ fake_port = self._fakes.get(host)
+ if fake_port is not None:
+ return [{'hostname': host,
+ 'host': self._LOCAL_HOST[family], 'port': fake_port,
+ 'family': socket.AF_INET, 'proto': 0,
+ 'flags': socket.AI_NUMERICHOST}]
+ else:
+ return await self._resolver.resolve(host, port, family)
- client = await aiohttp_client(app, trace_configs=[trace_config])
+ resolver = FakeResolver({'example.com': server.port})
+ connector = aiohttp.TCPConnector(resolver=resolver)
+ client = aiohttp.ClientSession(connector=connector,
+ trace_configs=[trace_config])
- await client.get('/redirector', data="foo")
+ await client.get('http://example.com/redirector', data="foo")
assert on_request_start.called
assert on_request_end.called
+ assert on_dns_resolvehost_start.called
+ assert on_dns_resolvehost_end.called
assert on_request_redirect.called
assert on_connection_create_start.called
assert on_connection_create_end.called
+ await client.close()
async def test_return_http_exception_deprecated(aiohttp_client):
| Client DNS start/end events not fired
## Long story short
Unable to track dns timing via TraceConfig using client with TcpConnector, since `None` intead of `traces` arg passed to direct and proxy connections methods
```
async def _create_connection(self, req, traces=None):
"""Create connection.
Has same keyword arguments as BaseEventLoop.create_connection.
"""
if req.proxy:
_, proto = await self._create_proxy_connection(
req,
traces=None
)
else:
_, proto = await self._create_direct_connection(
req,
traces=None
)
return proto
```
## Expected behaviour
working DNS tracing with TCP
## Actual behaviour
dns events missed
## Your environment
aiohttp: 3.0.9
OS X
| That's sad but the fix looks easy.
Would you provide a patch?
yes
shame of me, the integration test that wires all of the tracing does not cover this signal [1] ... if its not a big deal would be nice if we can add these signals to that test. That would help in the future to avoid the same mistake.
[1] https://github.com/aio-libs/aiohttp/blob/master/tests/test_web_functional.py#L1641
Thanks for pointing the test. I actually wrote a different one and seems like in a wrong place. Will do there.
Well this just checks the happy path an upper level, if you have others at function level please commit them as well! | 2018-03-20T12:15:43 | -1.0 |
aio-libs/aiohttp | 2,943 | aio-libs__aiohttp-2943 | [
"2631"
] | af0994fedd471f73f0a23e18a99a8635f8d83f87 | diff --git a/aiohttp/client.py b/aiohttp/client.py
--- a/aiohttp/client.py
+++ b/aiohttp/client.py
@@ -18,7 +18,8 @@
from . import hdrs, http, payload
from .client_exceptions import * # noqa
from .client_exceptions import (ClientError, ClientOSError, InvalidURL,
- ServerTimeoutError, WSServerHandshakeError)
+ ServerTimeoutError, TooManyRedirects,
+ WSServerHandshakeError)
from .client_reqrep import * # noqa
from .client_reqrep import ClientRequest, ClientResponse, _merge_ssl_params
from .client_ws import ClientWebSocketResponse
@@ -360,7 +361,8 @@ async def _request(self, method, url, *,
history.append(resp)
if max_redirects and redirects >= max_redirects:
resp.close()
- break
+ raise TooManyRedirects(
+ history[0].request_info, tuple(history))
else:
resp.release()
diff --git a/aiohttp/client_exceptions.py b/aiohttp/client_exceptions.py
--- a/aiohttp/client_exceptions.py
+++ b/aiohttp/client_exceptions.py
@@ -93,6 +93,10 @@ class ClientHttpProxyError(ClientResponseError):
"""
+class TooManyRedirects(ClientResponseError):
+ """Client was redirected too many times."""
+
+
class ClientConnectionError(ClientError):
"""Base class for client socket errors."""
| diff --git a/tests/test_client_functional.py b/tests/test_client_functional.py
--- a/tests/test_client_functional.py
+++ b/tests/test_client_functional.py
@@ -16,6 +16,7 @@
import aiohttp
from aiohttp import Fingerprint, ServerFingerprintMismatch, hdrs, web
from aiohttp.abc import AbstractResolver
+from aiohttp.client_exceptions import TooManyRedirects
from aiohttp.test_utils import unused_port
@@ -914,10 +915,11 @@ async def redirect(request):
app.router.add_get(r'/redirect/{count:\d+}', redirect)
client = await aiohttp_client(app)
- resp = await client.get('/redirect/5', max_redirects=2)
- assert 302 == resp.status
- assert 2 == len(resp.history)
- resp.close()
+ with pytest.raises(TooManyRedirects) as ctx:
+ await client.get('/redirect/5', max_redirects=2)
+ assert 2 == len(ctx.value.history)
+ assert ctx.value.request_info.url.path == '/redirect/5'
+ assert ctx.value.request_info.method == 'GET'
async def test_HTTP_200_GET_WITH_PARAMS(aiohttp_client):
| There should be an exception in a case of "too many redirects"
## Long story short
When loading `http://relax-tip.de/` it redirects to `http://deepskin.de/`, but `http://deepskin.de/` redirects back to `http://relax-tip.de/`.
aiohttp has built-in limit `max_redirects=10`, but when this limit is reached users get last redirect response and no way to figure-out that is cycle redirect, as well aiohttp follows redirects automatically and it is no expected to get `304` response.
## Expected behaviour
I have an idea, when redirects limit is reached raise `TooManyRedirects` (requests do it)
## Actual behaviour
10 redirect response returned as successful response
## Steps to reproduce
```python
import aiohttp
import asyncio
async def main():
async with aiohttp.ClientSession() as session:
async with session.get('http://relax-tip.de/') as response:
import ipdb; ipdb.set_trace()
response
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
```
## Your environment
client
If such idea makes sense, I'll do pull request to implement this feature
| Google chrome gets
```
This page isn’t working
relax-tip.de redirected you too many times.
Try clearing your cookies.
ERR_TOO_MANY_REDIRECTS
```
I think the same behavior in aiohttp makes sense.
Hi, i did some reading:
@hellysmile says that it should throw "TooManyRedirects" but according to wikipedia:
"The user has sent too many requests in a given amount of time. Intended for use with rate-limiting schemes."
i see problem with the end of that paragraph, because we are not doing rate-limiting here, no?
I think this kind of exception should be excpeted with 421 Misdirected Request:
https://en.wikipedia.org/wiki/List_of_HTTP_status_codes#421
Maybe even to throw:
"404 Not Found
The requested resource could not be found but may be available in the future. Subsequent requests by the client are permissible."
Because in the end the "client/agent" did not "found" the resource in the end, right?
This is problem of Recursion: https://en.wikipedia.org/wiki/Recursion
as we started basically an infinite loop, we did stop it with request recurrence number limit doe, but still this is recursion, so following some Python patterns this custom exception would be named something alike:
RequestRedirectRecursionError
Or by looking into the aiohttp code:
ClientRedirectRecursionError to be more consistent.
But how to handle it is another story :), since in @hellysmile example in "Steps to reproduce" client managed to get response on the LAST recurrence can we say we got the resource?
I think not, and that is why i would treat this exception as "404 Not Found".
Draft:
class ClientRedirectRecursionError(ClientError):
""" Recursion loop found in the request redirect cycle. """
Not sure how to handle it doe yet.
In my mind `404 Not Found` means exactly a server response with 404 status code.
But in case of too many redirects (it not necessary recursive redirections, maybe the chain is just longer than `max_redirects` parameter in aiohttp) status code for last response is 3XX. That's why a new `TooManyRedirects` exception should be added.
Perhaps `TooManyRedirects` should be derived from `aiohttp.ClientResponseError`
Agreed on 404 i went wrong in that direction.
So basically we talk here about two exceptions not one:
1.
ClientRedirectRecursionError(ClientResponseError):
""" Recursion loop found in the request redirect cycle. """
What if max_redirects would be big number here lets say > 2000 would you get RecurssionError
as in python intrepeter?
2.
ClientRedirectionTimeout(ClientResponseError):
""" Client timed out waiting for the response. """
or
ClientRedirectionMaximumReached(ClientResponseError):
""" Client has been redirected to many times without response. """
This exception in case of some small max_redirects, well that is a kind of timeout, and in this "timeout" the client did not manage to "find" response.
`requests` count a number of redirection limit (30 by default) and raises `requests.TooManyRedirects`.
I think the same behavior is good for aiohttp too (exception the limit is 10 for historical reasons, let's not touch it now).
Too high `max_redirects` is bad because a cost of HTTP request is much higher than cost of Python recursive call.
Redirection timeout is not what I want to have: the timeout depends on network speed and it interfere with timeout for single request.
"Too high max_redirects is bad because a cost of HTTP request is much higher than cost of Python recursive call."
Yup, somebody could put max_redirects to big number by accident, for testing purposes...so what would/should happen then?
How about to limit that number to which it could be set and if somebody sets max_redirects over that number then you would raise ClientMaximumRedirectError("Too high max_redirects is bad because...") or issue a warning at least.
And in the case of reaching max_redirects:
ClientRedirectMaximumReached(ClientResponseError):
""" Client has been redirected to many times without response. """
No, it is overengineering. If somebody want to shoot the leg -- we do nothing.
Otherwise for consistency we should add such rings and bells (sane checks) to every parameter in every aiohttp call. | 2018-04-17T11:31:53 | -1.0 |
aio-libs/aiohttp | 2,944 | aio-libs__aiohttp-2944 | [
"2942"
] | 544716cf93b3856e485c8e993de60d9911c002cb | diff --git a/aiohttp/web_fileresponse.py b/aiohttp/web_fileresponse.py
--- a/aiohttp/web_fileresponse.py
+++ b/aiohttp/web_fileresponse.py
@@ -107,7 +107,8 @@ async def _sendfile_system(self, request, fobj, count):
transport = request.transport
if (transport.get_extra_info("sslcontext") or
- transport.get_extra_info("socket") is None):
+ transport.get_extra_info("socket") is None or
+ self.compression):
writer = await self._sendfile_fallback(request, fobj, count)
else:
writer = SendfileStreamWriter(
@@ -131,7 +132,7 @@ async def _sendfile_fallback(self, request, fobj, count):
# fobj is transferred in chunks controlled by the
# constructor's chunk_size argument.
- writer = (await super().prepare(request))
+ writer = await super().prepare(request)
chunk_size = self._chunk_size
diff --git a/aiohttp/web_response.py b/aiohttp/web_response.py
--- a/aiohttp/web_response.py
+++ b/aiohttp/web_response.py
@@ -43,7 +43,7 @@ def __init__(self, *, status=200, reason=None, headers=None):
self._keep_alive = None
self._chunked = False
self._compression = False
- self._compression_force = False
+ self._compression_force = None
self._cookies = SimpleCookie()
self._req = None
| diff --git a/tests/test_web_sendfile_functional.py b/tests/test_web_sendfile_functional.py
--- a/tests/test_web_sendfile_functional.py
+++ b/tests/test_web_sendfile_functional.py
@@ -1,6 +1,7 @@
import asyncio
import os
import pathlib
+import zlib
import pytest
@@ -729,3 +730,25 @@ async def handler(request):
resp = await client.get('/', headers={'If-Range': lastmod})
assert 200 == resp.status
resp.close()
+
+
+async def test_static_file_compression(aiohttp_client, sender):
+ filepath = pathlib.Path(__file__).parent / 'data.unknown_mime_type'
+
+ async def handler(request):
+ ret = sender(filepath)
+ ret.enable_compression()
+ return ret
+
+ app = web.Application()
+ app.router.add_get('/', handler)
+ client = await aiohttp_client(app, auto_decompress=False)
+
+ resp = await client.get('/')
+ assert resp.status == 200
+ zcomp = zlib.compressobj(wbits=-zlib.MAX_WBITS)
+ expected_body = zcomp.compress(b'file content\n') + zcomp.flush()
+ assert expected_body == await resp.read()
+ assert 'application/octet-stream' == resp.headers['Content-Type']
+ assert resp.headers.get('Content-Encoding') == 'deflate'
+ await resp.release()
| enable_compression doesn't work with FileResponse in sendfile mode
## Long story short
response.enable_compression doesn't work on Heroku.
## Expected behaviour
enable_compression on a response should work on Heroku.
## Actual behaviour
While this worked locally just fine, Heroku didn't like it when I enabled compression on a FileResponse:
Apr 17 11:58:19 my-app-staging heroku/router: http_error="Bad chunk" at=error code=H17 desc="Poorly formatted HTTP response" method=GET path="/" host=my-app-staging.herokuapp.com request_id=0bc47133-af19-4999-bc0f-179054b7388a fwd="xx.xx.xx.xx" dyno=web.1 connect=0ms service=19ms status=503 bytes= protocol=https
Apr 17 11:58:19 my-app-staging heroku/router: http_error="Bad chunk" at=error code=H17 desc="Poorly formatted HTTP response" method=GET path="/" host=my-app-staging.herokuapp.com request_id=9c1b4583-61d5-4867-bf1a-75de194cf71f fwd="xx.xx.xx.xx" dyno=web.1 connect=0ms service=5ms status=503 bytes= protocol=https
Apr 17 11:58:23 my-app-staging heroku/router: http_error="Bad chunk" at=error code=H17 desc="Poorly formatted HTTP response" method=GET path="/" host=my-app-staging.herokuapp.com request_id=a2f80aee-0ba0-4117-9139-123a79fd2f1c fwd="xx.xx.xx.xx" dyno=web.1 connect=0ms service=3ms status=503 bytes= protocol=https
## Steps to reproduce
```
resp = web.FileResponse('./dist/index.html', headers={'Cache-Control': 'no-cache'})
resp.enable_compression()
return resp
```
## Your environment
aiohttp==2.3.6
Using aiohttp directly on Heroku (no gunicorn/etc).
| Try to set `AIOHTTP_NOSENDFILE=1` environment variable.
Works now, thanks. Is there a way to set this programmatically?
Not yet.
The problem is: aiohttp uses `sendfile` syscall even for compressed files.
The fast path should be disabled for non-identity content encoding.
The path for `FileResponse` is welcome (the required change is relatively simple).
The issue is waiting for a champion.
I'll take this one. | 2018-04-17T14:25:12 | -1.0 |
koxudaxi/datamodel-code-generator | 1,248 | koxudaxi__datamodel-code-generator-1248 | [
"1220"
] | e10f1bcce5f0135458a96e4d0e3d4e6ab7e54c3d | diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -2,6 +2,7 @@
import contextlib
import os
+import sys
from datetime import datetime, timezone
from enum import Enum
from pathlib import Path
@@ -308,6 +309,10 @@ def generate(
if is_openapi(input_text_) # type: ignore
else InputFileType.JsonSchema
)
+ print(
+ f'The input file type was determined to be: {input_file_type.value}',
+ file=sys.stderr,
+ )
except: # noqa
raise Error('Invalid file format')
| diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -588,7 +588,7 @@ def test_main_no_file(capsys: CaptureFixture) -> None:
assert (
captured.out == (EXPECTED_MAIN_PATH / 'main_no_file' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_extra_template_data_config(capsys: CaptureFixture) -> None:
@@ -614,7 +614,7 @@ def test_main_extra_template_data_config(capsys: CaptureFixture) -> None:
EXPECTED_MAIN_PATH / 'main_extra_template_data_config' / 'output.py'
).read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir_old_style(capsys: CaptureFixture) -> None:
@@ -641,7 +641,7 @@ def test_main_custom_template_dir_old_style(capsys: CaptureFixture) -> None:
captured.out
== (EXPECTED_MAIN_PATH / 'main_custom_template_dir' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
@@ -668,7 +668,7 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
captured.out
== (EXPECTED_MAIN_PATH / 'main_custom_template_dir' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
@freeze_time('2019-07-26')
diff --git a/tests/test_main_kr.py b/tests/test_main_kr.py
--- a/tests/test_main_kr.py
+++ b/tests/test_main_kr.py
@@ -146,7 +146,7 @@ def test_main_no_file(capsys: CaptureFixture) -> None:
== (EXPECTED_MAIN_KR_PATH / 'main_no_file' / 'output.py').read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
@@ -175,7 +175,7 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
EXPECTED_MAIN_KR_PATH / 'main_custom_template_dir' / 'output.py'
).read_text()
)
- assert not captured.err
+ assert captured.err == 'The input file type was determined to be: openapi\n'
@freeze_time('2019-07-26')
| (🎁) Log input file type when when `--input-file-type` is `auto`
I was left a little confused when my json file was silently detected as jsonschema instead of json.
| I agree. the CLI should show the detail of the error. | 2023-04-15T04:09:13 | -1.0 |
koxudaxi/datamodel-code-generator | 1,249 | koxudaxi__datamodel-code-generator-1249 | [
"1221"
] | 083691c6fea8fabc5000466c40c16298c7a4b463 | diff --git a/datamodel_code_generator/__init__.py b/datamodel_code_generator/__init__.py
--- a/datamodel_code_generator/__init__.py
+++ b/datamodel_code_generator/__init__.py
@@ -168,6 +168,37 @@ def is_openapi(text: str) -> bool:
return 'openapi' in load_yaml(text)
+JSON_SCHEMA_URLS: Tuple[str, ...] = (
+ 'http://json-schema.org/',
+ 'https://json-schema.org/',
+)
+
+
+def is_schema(text: str) -> bool:
+ data = load_yaml(text)
+ if not isinstance(data, dict):
+ return False
+ schema = data.get('$schema')
+ if isinstance(schema, str) and any(
+ schema.startswith(u) for u in JSON_SCHEMA_URLS
+ ): # pragma: no cover
+ return True
+ if isinstance(data.get('type'), str):
+ return True
+ if any(
+ isinstance(data.get(o), list)
+ for o in (
+ 'allOf',
+ 'anyOf',
+ 'oneOf',
+ )
+ ):
+ return True
+ if isinstance(data.get('properties'), dict):
+ return True
+ return False
+
+
class InputFileType(Enum):
Auto = 'auto'
OpenAPI = 'openapi'
@@ -304,11 +335,8 @@ def generate(
if isinstance(input_, Path)
else input_text
)
- input_file_type = (
- InputFileType.OpenAPI
- if is_openapi(input_text_) # type: ignore
- else InputFileType.JsonSchema
- )
+ assert isinstance(input_text_, str)
+ input_file_type = infer_input_type(input_text_)
print(
inferred_message.format(input_file_type.value),
file=sys.stderr,
@@ -483,6 +511,14 @@ def get_header_and_first_line(csv_file: IO[str]) -> Dict[str, Any]:
file.close()
+def infer_input_type(text: str) -> InputFileType:
+ if is_openapi(text):
+ return InputFileType.OpenAPI
+ elif is_schema(text):
+ return InputFileType.JsonSchema
+ return InputFileType.Json
+
+
inferred_message = (
'The input file type was determined to be: {}\nThis can be specificied explicitly with the '
'`--input-file-type` option.'
| diff --git a/tests/data/jsonschema/items_boolean.json b/tests/data/jsonschema/items_boolean.json
--- a/tests/data/jsonschema/items_boolean.json
+++ b/tests/data/jsonschema/items_boolean.json
@@ -1,5 +1,5 @@
{
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"example": {
diff --git a/tests/data/jsonschema/root_id.json b/tests/data/jsonschema/root_id.json
--- a/tests/data/jsonschema/root_id.json
+++ b/tests/data/jsonschema/root_id.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/jsonschema/root_id_absolute_url.json b/tests/data/jsonschema/root_id_absolute_url.json
--- a/tests/data/jsonschema/root_id_absolute_url.json
+++ b/tests/data/jsonschema/root_id_absolute_url.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/jsonschema/root_id_ref.json b/tests/data/jsonschema/root_id_ref.json
--- a/tests/data/jsonschema/root_id_ref.json
+++ b/tests/data/jsonschema/root_id_ref.json
@@ -1,5 +1,5 @@
{
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "root_id.json#/definitions/Person"
diff --git a/tests/data/jsonschema/root_id_self_ref.json b/tests/data/jsonschema/root_id_self_ref.json
--- a/tests/data/jsonschema/root_id_self_ref.json
+++ b/tests/data/jsonschema/root_id_self_ref.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id_self_ref.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/data/openapi/complex_reference.json b/tests/data/openapi/complex_reference.json
--- a/tests/data/openapi/complex_reference.json
+++ b/tests/data/openapi/complex_reference.json
@@ -1,4 +1,5 @@
{
+ "openapi": "3.0.0",
"components": {
"schemas": {
"A": {
diff --git a/tests/data/openapi/datetime.yaml b/tests/data/openapi/datetime.yaml
--- a/tests/data/openapi/datetime.yaml
+++ b/tests/data/openapi/datetime.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
InventoryItem:
diff --git a/tests/data/openapi/definitions.yaml b/tests/data/openapi/definitions.yaml
--- a/tests/data/openapi/definitions.yaml
+++ b/tests/data/openapi/definitions.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
schemas:
Problem:
properties:
diff --git a/tests/data/openapi/discriminator.yaml b/tests/data/openapi/discriminator.yaml
--- a/tests/data/openapi/discriminator.yaml
+++ b/tests/data/openapi/discriminator.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
ObjectBase:
diff --git a/tests/data/openapi/override_required_all_of.yaml b/tests/data/openapi/override_required_all_of.yaml
--- a/tests/data/openapi/override_required_all_of.yaml
+++ b/tests/data/openapi/override_required_all_of.yaml
@@ -1,3 +1,4 @@
+openapi: "3.0.0"
components:
schemas:
ObjectBase:
diff --git a/tests/data/openapi/x_enum_varnames.yaml b/tests/data/openapi/x_enum_varnames.yaml
--- a/tests/data/openapi/x_enum_varnames.yaml
+++ b/tests/data/openapi/x_enum_varnames.yaml
@@ -1,4 +1,4 @@
-openapi: 3.0
+openapi: "3.0.0"
components:
schemas:
string:
diff --git a/tests/root_id.json b/tests/root_id.json
--- a/tests/root_id.json
+++ b/tests/root_id.json
@@ -1,6 +1,6 @@
{
"$id": "https://example.com/root_id.json",
- "$schema ": "http://json-schema.org/draft-07/schema#",
+ "$schema": "http://json-schema.org/draft-07/schema#",
"definitions": {
"Person": {
"$ref": "person.json"
diff --git a/tests/test_infer_input_type.py b/tests/test_infer_input_type.py
new file mode 100644
--- /dev/null
+++ b/tests/test_infer_input_type.py
@@ -0,0 +1,46 @@
+from pathlib import Path
+
+from datamodel_code_generator import InputFileType, infer_input_type
+
+DATA_PATH: Path = Path(__file__).parent / 'data'
+
+
+def test_infer_input_type():
+ def assert_infer_input_type(file: Path, raw_data_type: InputFileType) -> None:
+ __tracebackhide__ = True
+ if file.is_dir():
+ return
+ if file.suffix not in ('.yaml', '.json'):
+ return
+ result = infer_input_type(file.read_text())
+ assert result == raw_data_type, f'{file} was the wrong type!'
+
+ for file in (DATA_PATH / 'json').rglob('*'):
+ if str(file).endswith('broken.json'):
+ continue
+ assert_infer_input_type(file, InputFileType.Json)
+ for file in (DATA_PATH / 'jsonschema').rglob('*'):
+ if str(file).endswith(('external_child.json', 'external_child.yaml')):
+ continue
+ if 'reference_same_hierarchy_directory' in str(file):
+ continue
+ assert_infer_input_type(file, InputFileType.JsonSchema)
+ for file in (DATA_PATH / 'openapi').rglob('*'):
+ if str(file).endswith(
+ (
+ 'aliases.json',
+ 'extra_data.json',
+ 'invalid.yaml',
+ 'list.json',
+ 'empty_data.json',
+ 'root_model.yaml',
+ 'json_pointer.yaml',
+ 'const.json',
+ )
+ ):
+ continue
+
+ if str(file).endswith('not.json'):
+ assert_infer_input_type(file, InputFileType.Json)
+ continue
+ assert_infer_input_type(file, InputFileType.OpenAPI)
| (🎁) Can we use heuristics to automatically detect the input type of json files?
It would be convenient if json files could be automatically detected if they are either schema or data files.
input:
```json
{
"a": 1
}
```
```
👉 datamodel_code_generator --input test.json
# generated by datamodel-codegen:
# filename: test.json
# timestamp: 2023-04-15T04:05:21+00:00
from __future__ import annotations
from typing import Any
from pydantic import BaseModel
class Model(BaseModel):
__root__: Any
```
| @KotlinIsland
Do you have an example?
The CLI tries to parse json file as JSON Schema.
https://github.com/koxudaxi/datamodel-code-generator/blob/b48cb94edc76851eca0ff637bf81831330ea5808/datamodel_code_generator/__init__.py#L301-L310
So here we use check if the file is an OpenAPI, but could we do an additional check to see if the file is not a schema at all, but just a normal json file?
@KotlinIsland
How can we recognize whether JSON Schema or JSON data?
If we apply the strict rule, current behavior will be broken.
I think some use `auto` to convert rough JSON Schema :thinking:
Yes, I'm not certain if it's possible to determine if a file is schema or data. | 2023-04-15T07:50:00 | -1.0 |
koxudaxi/datamodel-code-generator | 1,432 | koxudaxi__datamodel-code-generator-1432 | [
"1419"
] | fab9f575bfec9975f598a02d71fe1fb6b7d87376 | diff --git a/datamodel_code_generator/parser/base.py b/datamodel_code_generator/parser/base.py
--- a/datamodel_code_generator/parser/base.py
+++ b/datamodel_code_generator/parser/base.py
@@ -868,7 +868,9 @@ def __collapse_root_models(
data_type.remove_reference()
root_type_model.reference.children = [
- c for c in root_type_model.reference.children if c.parent
+ c
+ for c in root_type_model.reference.children
+ if getattr(c, 'parent', None)
]
if not root_type_model.reference.children:
| diff --git a/tests/data/expected/main/main_collapse_root_models_with_references_to_flat_types/output.py b/tests/data/expected/main/main_collapse_root_models_with_references_to_flat_types/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_collapse_root_models_with_references_to_flat_types/output.py
@@ -0,0 +1,13 @@
+# generated by datamodel-codegen:
+# filename: flat_type.jsonschema
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel
+
+
+class FooModel(BaseModel):
+ foo: Optional[str] = None
diff --git a/tests/data/openapi/flat_type.jsonschema b/tests/data/openapi/flat_type.jsonschema
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/flat_type.jsonschema
@@ -0,0 +1,16 @@
+{
+ "title": "Foo",
+ "$schema": "http://json-schema.org/schema#",
+ "description": "",
+ "type": "object",
+ "properties": {
+ "foo": {
+ "$ref": "#/definitions/foo"
+ }
+ },
+ "definitions": {
+ "foo": {
+ "type": "string"
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -4339,6 +4339,31 @@ def test_main_collapse_root_models_field_constraints():
)
+@freeze_time('2019-07-26')
+def test_main_collapse_root_models_with_references_to_flat_types():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'flat_type.jsonschema'),
+ '--output',
+ str(output_file),
+ '--collapse-root-models',
+ ]
+ )
+
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_collapse_root_models_with_references_to_flat_types'
+ / 'output.py'
+ ).read_text()
+ )
+
+
@freeze_time('2019-07-26')
def test_main_openapi_max_items_enum():
with TemporaryDirectory() as output_dir:
| AttributeError raised when using --collapse-root-models flag
**Describe the bug**
When using the `--collapse-root-models` flag, this error is raised:
```shell
Traceback (most recent call last):
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/__main__.py", line 767, in main
generate(
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/__init__.py", line 431, in generate
results = parser.parse()
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/parser/base.py", line 1083, in parse
self.__collapse_root_models(models, unused_models)
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/parser/base.py", line 856, in __collapse_root_models
root_type_model.reference.children = [
File ".venv/lib/python3.10/site-packages/datamodel_code_generator/parser/base.py", line 857, in <listcomp>
c for c in root_type_model.reference.children if c.parent
AttributeError: 'BaseModel' object has no attribute 'parent'
```
**To Reproduce**
Input schema: https://github.com/CycloneDX/specification/blob/1.5/schema/bom-1.5.schema.json
pyproject.toml config:
```toml
[tool.datamodel-codegen]
base-class = ".base.BaseModel"
collapse-root-models = true
disable-warnings = true
enum-field-as-literal = "one"
field-include-all-keys = true
input-file-type = "jsonschema"
reuse-model = true
set-default-enum-member = true
snake-case-field = true
target-python-version = "3.7"
use-annotated = true
use-default-kwarg = true
use-double-quotes = true
use-schema-description = true
use-standard-collections = true
use-subclass-enum = true
use-title-as-name = true
wrap-string-literal = true
```
Base class code:
```python
from pydantic import BaseModel as _BaseModel
from pydantic import Extra
class BaseModel(_BaseModel):
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
extra = Extra.allow
use_enum_values = True
def __hash__(self) -> int:
return hash(repr(self))
```
**Expected behavior**
Collapse root models without raising an exception
**Version:**
- OS: MacOS 13.4.1
- Python version: 3.10
- datamodel-code-generator version: 0.21.1
| Seems to work (at least for my use case, don't know if breaks others) if that line is changed to
```python
c for c in root_type_model.reference.children if getattr(c, "parent", None)
``` | 2023-07-18T09:48:04 | -1.0 |
koxudaxi/datamodel-code-generator | 1,448 | koxudaxi__datamodel-code-generator-1448 | [
"1435",
"1435"
] | e8d600886f894c100d6c4c0277b91a67f89cae48 | diff --git a/datamodel_code_generator/__main__.py b/datamodel_code_generator/__main__.py
--- a/datamodel_code_generator/__main__.py
+++ b/datamodel_code_generator/__main__.py
@@ -36,8 +36,10 @@
import toml
from pydantic import BaseModel
+if TYPE_CHECKING:
+ from typing_extensions import Self
+
from datamodel_code_generator import (
- DEFAULT_BASE_CLASS,
DataModelType,
Error,
InputFileType,
@@ -470,9 +472,6 @@ def get(self, item: str) -> Any:
def __getitem__(self, item: str) -> Any:
return self.get(item)
- def __setitem__(self, key: str, value: Any) -> None:
- setattr(self, key, value)
-
if TYPE_CHECKING:
@classmethod
@@ -580,23 +579,21 @@ def validate_each_item(each_item: Any) -> Tuple[str, str]:
return [validate_each_item(each_item) for each_item in value]
return value # pragma: no cover
- @model_validator(mode='after')
- def validate_root(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- values = cls._validate_use_annotated(values)
- return cls._validate_base_class(values)
-
- @classmethod
- def _validate_use_annotated(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- if values.get('use_annotated'):
- values['field_constraints'] = True
- return values
+ if PYDANTIC_V2:
- @classmethod
- def _validate_base_class(cls, values: Dict[str, Any]) -> Dict[str, Any]:
- if 'base_class' not in values and 'output_model_type' in values:
- if values['output_model_type'] != DataModelType.PydanticBaseModel.value:
- values['base_class'] = ''
- return values
+ @model_validator(mode='after') # type: ignore
+ def validate_root(self: Self) -> Self:
+ if self.use_annotated:
+ self.field_constraints = True
+ return self
+
+ else:
+
+ @model_validator(mode='after')
+ def validate_root(cls, values: Any) -> Any:
+ if values.get('use_annotated'):
+ values['field_constraints'] = True
+ return values
input: Optional[Union[Path, str]] = None
input_file_type: InputFileType = InputFileType.Auto
@@ -605,7 +602,7 @@ def _validate_base_class(cls, values: Dict[str, Any]) -> Dict[str, Any]:
debug: bool = False
disable_warnings: bool = False
target_python_version: PythonVersion = PythonVersion.PY_37
- base_class: str = DEFAULT_BASE_CLASS
+ base_class: str = ''
custom_template_dir: Optional[Path] = None
extra_template_data: Optional[TextIOBase] = None
validation: bool = False
@@ -666,9 +663,11 @@ def merge_args(self, args: Namespace) -> None:
for f in self.get_fields()
if getattr(args, f) is not None
}
- set_args = self._validate_use_annotated(set_args)
- set_args = self._validate_base_class(set_args)
- parsed_args = self.parse_obj(set_args)
+
+ if set_args.get('use_annotated'):
+ set_args['field_constraints'] = True
+
+ parsed_args = Config.parse_obj(set_args)
for field_name in set_args:
setattr(self, field_name, getattr(parsed_args, field_name))
| diff --git a/tests/data/expected/main/main_openapi_custom_id_pydantic_v2/output.py b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2/output.py
@@ -0,0 +1,18 @@
+# generated by datamodel-codegen:
+# filename: custom_id.yaml
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+from uuid import UUID
+
+from pydantic import BaseModel, Field, RootModel
+
+
+class CustomId(RootModel):
+ root: UUID = Field(..., description='My custom ID')
+
+
+class Model(BaseModel):
+ custom_id: Optional[CustomId] = None
diff --git a/tests/data/expected/main/main_openapi_custom_id_pydantic_v2_custom_base/output.py b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2_custom_base/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_openapi_custom_id_pydantic_v2_custom_base/output.py
@@ -0,0 +1,20 @@
+# generated by datamodel-codegen:
+# filename: custom_id.yaml
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+from uuid import UUID
+
+from pydantic import Field, RootModel
+
+from custom_base import Base
+
+
+class CustomId(RootModel):
+ root: UUID = Field(..., description='My custom ID')
+
+
+class Model(Base):
+ custom_id: Optional[CustomId] = None
diff --git a/tests/data/openapi/custom_id.yaml b/tests/data/openapi/custom_id.yaml
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/custom_id.yaml
@@ -0,0 +1,12 @@
+openapi: 3.0.0
+components:
+ schemas:
+ CustomId:
+ description: My custom ID
+ type: string
+ format: uuid
+ Model:
+ type: object
+ properties:
+ custom_id:
+ $ref: "#/components/schemas/CustomId"
\ No newline at end of file
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5527,3 +5527,57 @@ def test_main_pydantic_v2():
output_file.read_text()
== (EXPECTED_MAIN_PATH / 'main_pydantic_v2' / 'output.py').read_text()
)
+
+
+@freeze_time('2019-07-26')
+def test_main_openapi_custom_id_pydantic_v2():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'custom_id.yaml'),
+ '--output',
+ str(output_file),
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH / 'main_openapi_custom_id_pydantic_v2' / 'output.py'
+ ).read_text()
+ )
+
+
+@pytest.mark.skipif(
+ not isort.__version__.startswith('4.'),
+ reason="isort 5.x don't sort pydantic modules",
+)
+@freeze_time('2019-07-26')
+def test_main_openapi_custom_id_pydantic_v2_custom_base():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'custom_id.yaml'),
+ '--output',
+ str(output_file),
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ '--base-class',
+ 'custom_base.Base',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_openapi_custom_id_pydantic_v2_custom_base'
+ / 'output.py'
+ ).read_text()
+ )
| Wrong parent class for pydantic V2 root models
**Describe the bug**
Generator uses `pydantic.BaseModel` as parent class for root model instead of `pydantic.RootModel`
Example schema (`custom_id.yaml`):
```yaml
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
Used commandline:
```
$ datamodel-codegen --input custom_id.yaml --output-model-type pydantic_v2.BaseModel --output model.py
```
Contents of `model.py`:
```python
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel, Field
class CustomId(BaseModel):
root: UUID = Field(..., description='My custom ID')
```
**Expected behavior**
```python
from __future__ import annotations
from uuid import UUID
from pydantic import RootModel, Field
class CustomId(RootModel):
root: UUID = Field(..., description='My custom ID')
```
**Version:**
- OS: [e.g. iOS]
- Python version: 3.10.8
- datamodel-code-generator version: 0.21.1
Wrong parent class for pydantic V2 root models
**Describe the bug**
Generator uses `pydantic.BaseModel` as parent class for root model instead of `pydantic.RootModel`
Example schema (`custom_id.yaml`):
```yaml
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
Used commandline:
```
$ datamodel-codegen --input custom_id.yaml --output-model-type pydantic_v2.BaseModel --output model.py
```
Contents of `model.py`:
```python
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel, Field
class CustomId(BaseModel):
root: UUID = Field(..., description='My custom ID')
```
**Expected behavior**
```python
from __future__ import annotations
from uuid import UUID
from pydantic import RootModel, Field
class CustomId(RootModel):
root: UUID = Field(..., description='My custom ID')
```
**Version:**
- OS: [e.g. iOS]
- Python version: 3.10.8
- datamodel-code-generator version: 0.21.1
| @andreyLetenkovWefox
Thank you for creating the issue.
I have tested your schema. But I can't reproduce the issue.
Do you use a custom template for root model?
> @andreyLetenkovWefox Thank you for creating the issue. I have tested your schema. But I can't reproduce the issue. Do you use a custom template for root model?
Sorry, I thought the problem could be reproduced via a CLI generator call, but it really only appears when using the python package:
```python
from pathlib import Path
from datamodel_code_generator import DataModelType, generate
generate(
input_=Path("custom_id.yaml"),
output=Path("model.py"),
output_model_type=DataModelType.PydanticV2BaseModel,
)
```
@andreyLetenkovWefox
Thank you for creating the issue.
I have tested your schema. But I can't reproduce the issue.
Do you use a custom template for root model?
> @andreyLetenkovWefox Thank you for creating the issue. I have tested your schema. But I can't reproduce the issue. Do you use a custom template for root model?
Sorry, I thought the problem could be reproduced via a CLI generator call, but it really only appears when using the python package:
```python
from pathlib import Path
from datamodel_code_generator import DataModelType, generate
generate(
input_=Path("custom_id.yaml"),
output=Path("model.py"),
output_model_type=DataModelType.PydanticV2BaseModel,
)
```
| 2023-07-24T20:40:51 | -1.0 |
koxudaxi/datamodel-code-generator | 1,477 | koxudaxi__datamodel-code-generator-1477 | [
"1435"
] | 3cbc02cfe5424b71674be8602cb7e89a6f9b5c9a | diff --git a/datamodel_code_generator/model/pydantic_v2/root_model.py b/datamodel_code_generator/model/pydantic_v2/root_model.py
--- a/datamodel_code_generator/model/pydantic_v2/root_model.py
+++ b/datamodel_code_generator/model/pydantic_v2/root_model.py
@@ -1,6 +1,6 @@
from __future__ import annotations
-from typing import ClassVar
+from typing import Any, ClassVar
from datamodel_code_generator.model.pydantic_v2.base_model import BaseModel
@@ -8,3 +8,14 @@
class RootModel(BaseModel):
TEMPLATE_FILE_PATH: ClassVar[str] = 'pydantic_v2/RootModel.jinja2'
BASE_CLASS: ClassVar[str] = 'pydantic.RootModel'
+
+ def __init__(
+ self,
+ **kwargs: Any,
+ ) -> None:
+ # Remove custom_base_class for Pydantic V2 models; behaviour is different from Pydantic V1 as it will not
+ # be treated as a root model. custom_base_class cannot both implement BaseModel and RootModel!
+ if 'custom_base_class' in kwargs:
+ kwargs.pop('custom_base_class')
+
+ super().__init__(**kwargs)
| diff --git a/tests/model/pydantic_v2/__init__.py b/tests/model/pydantic_v2/__init__.py
new file mode 100644
diff --git a/tests/model/pydantic_v2/test_root_model.py b/tests/model/pydantic_v2/test_root_model.py
new file mode 100644
--- /dev/null
+++ b/tests/model/pydantic_v2/test_root_model.py
@@ -0,0 +1,67 @@
+from datamodel_code_generator.model import DataModelFieldBase
+from datamodel_code_generator.model.pydantic_v2.root_model import RootModel
+from datamodel_code_generator.reference import Reference
+from datamodel_code_generator.types import DataType
+
+
+def test_root_model():
+ root_model = RootModel(
+ fields=[
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ )
+ ],
+ reference=Reference(name='TestRootModel', path='test_root_model'),
+ )
+
+ assert root_model.name == 'TestRootModel'
+ assert root_model.fields == [
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ parent=root_model,
+ )
+ ]
+
+ assert root_model.base_class == 'RootModel'
+ assert root_model.custom_base_class is None
+ assert root_model.render() == (
+ 'class TestRootModel(RootModel):\n' " root: Optional[str] = 'abc'"
+ )
+
+
+def test_root_model_custom_base_class():
+ root_model = RootModel(
+ custom_base_class='test.Test',
+ fields=[
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ )
+ ],
+ reference=Reference(name='TestRootModel', path='test_root_model'),
+ )
+
+ assert root_model.name == 'TestRootModel'
+ assert root_model.fields == [
+ DataModelFieldBase(
+ name='a',
+ data_type=DataType(type='str'),
+ default='abc',
+ required=False,
+ parent=root_model,
+ )
+ ]
+
+ assert root_model.base_class == 'RootModel'
+ assert root_model.custom_base_class is None
+ assert root_model.render() == (
+ 'class TestRootModel(RootModel):\n' " root: Optional[str] = 'abc'"
+ )
| Wrong parent class for pydantic V2 root models
**Describe the bug**
Generator uses `pydantic.BaseModel` as parent class for root model instead of `pydantic.RootModel`
Example schema (`custom_id.yaml`):
```yaml
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
Used commandline:
```
$ datamodel-codegen --input custom_id.yaml --output-model-type pydantic_v2.BaseModel --output model.py
```
Contents of `model.py`:
```python
from __future__ import annotations
from uuid import UUID
from pydantic import BaseModel, Field
class CustomId(BaseModel):
root: UUID = Field(..., description='My custom ID')
```
**Expected behavior**
```python
from __future__ import annotations
from uuid import UUID
from pydantic import RootModel, Field
class CustomId(RootModel):
root: UUID = Field(..., description='My custom ID')
```
**Version:**
- OS: [e.g. iOS]
- Python version: 3.10.8
- datamodel-code-generator version: 0.21.1
| @andreyLetenkovWefox
Thank you for creating the issue.
I have tested your schema. But I can't reproduce the issue.
Do you use a custom template for root model?
> @andreyLetenkovWefox Thank you for creating the issue. I have tested your schema. But I can't reproduce the issue. Do you use a custom template for root model?
Sorry, I thought the problem could be reproduced via a CLI generator call, but it really only appears when using the python package:
```python
from pathlib import Path
from datamodel_code_generator import DataModelType, generate
generate(
input_=Path("custom_id.yaml"),
output=Path("model.py"),
output_model_type=DataModelType.PydanticV2BaseModel,
)
```
We hit this issue in a slightly different setting: if `output_data_model = "pydantic_v2.BaseModel"` is read from `pyproject.toml` and not passed on the command line, root models have the wrong base class. However, if `--output_data_model "pydantic_v2.BaseModel"` is passed on the command line, the root model base class is correct.
I drafted a fix in #1448 but it still needs quite some work (mostly because it only works with Pydantic v2 at the moment, and relies on using a `Config` instance as input to `generate`, so it won't fix @andreyLetenkovWefox 's example in https://github.com/koxudaxi/datamodel-code-generator/issues/1435#issuecomment-1646081300).
@koxudaxi would you like to see further work in the direction of #1448? or do you have a simpler idea for the fix? It seems that the handling of `base_class` could be simplified in the parser stack, but this looks quite intrusive to me.
> TL;DR: The issue comes from `base_model` incorrectly overriding `RootModel`.
I was also running into this issue, but not in the same way as @andreyLetenkovWefox. Apparently, if you set the `base_class` parameter and use Pydantic V2, it will incorrectly set the Base Model for Root Models to your custom Base Class.
See the following example:
```openapi
openapi: 3.0.0
components:
schemas:
CustomId:
description: My custom ID
type: string
format: uuid
```
```bash
datamodel-codegen --input custom_id.yaml --base-class "utils.dto_class.BaseDtoModel" --output-model-type pydantic_v2.BaseModel
```
This will output:
``` python
from __future__ import annotations
from uuid import UUID
from pydantic import Field
from utils.dto_class import BaseDtoModel
class CustomId(BaseDtoModel):
root: UUID = Field(..., description='My custom ID')
```
This, for Pydantic V2, is wrong as it will be interpreted as a normal model with a literal `root` field (and not a Root Model). In Pydantic V1 this was not an issue because it was being populated through the `__root__` field which uses `BaseModel` as the upper class (just like a normal model).
But here arises another question: **Should `base_class` actually override `RootModel` as well?** In Pydantic V1 this made sense, but for Pydantic V2 probably RootModel should never be overwritten.
IMO, `RootModel` should not be overwritten even with a custom `base_class` as it implements several methods which are not available in normal models. However, a developer may want to define their custom `RootModel` as well, so not sure what's the correct procedure for this... | 2023-08-06T16:31:38 | -1.0 |
koxudaxi/datamodel-code-generator | 1,606 | koxudaxi__datamodel-code-generator-1606 | [
"1605"
] | 62c34cdb7a90238b0c960c68bb7f65788ec3d857 | diff --git a/datamodel_code_generator/model/msgspec.py b/datamodel_code_generator/model/msgspec.py
--- a/datamodel_code_generator/model/msgspec.py
+++ b/datamodel_code_generator/model/msgspec.py
@@ -1,6 +1,8 @@
from pathlib import Path
from typing import Any, ClassVar, DefaultDict, Dict, List, Optional, Set, Tuple
+from pydantic import Field
+
from datamodel_code_generator.imports import Import
from datamodel_code_generator.model import DataModel, DataModelFieldBase
from datamodel_code_generator.model.base import UNDEFINED
@@ -10,10 +12,12 @@
IMPORT_MSGSPEC_META,
IMPORT_MSGSPEC_STRUCT,
)
-from datamodel_code_generator.model.pydantic.base_model import Constraints
+from datamodel_code_generator.model.pydantic.base_model import (
+ Constraints as _Constraints,
+)
from datamodel_code_generator.model.rootmodel import RootModel
from datamodel_code_generator.reference import Reference
-from datamodel_code_generator.types import chain_as_tuple
+from datamodel_code_generator.types import chain_as_tuple, get_optional_type
def _has_field_assignment(field: DataModelFieldBase) -> bool:
@@ -71,6 +75,12 @@ def imports(self) -> Tuple[Import, ...]:
return chain_as_tuple(super().imports, extra_imports)
+class Constraints(_Constraints):
+ # To override existing pattern alias
+ regex: Optional[str] = Field(None, alias='regex')
+ pattern: Optional[str] = Field(None, alias='pattern')
+
+
class DataModelField(DataModelFieldBase):
_FIELD_KEYS: ClassVar[Set[str]] = {
'default',
@@ -88,7 +98,7 @@ class DataModelField(DataModelFieldBase):
# 'max_items', # not supported by msgspec
'min_length',
'max_length',
- 'regex',
+ 'pattern',
# 'unique_items', # not supported by msgspec
}
_PARSE_METHOD = 'convert'
@@ -197,6 +207,10 @@ def annotated(self) -> Optional[str]:
meta = f'Meta({", ".join(meta_arguments)})'
+ if not self.required:
+ type_hint = self.data_type.type_hint
+ annotated_type = f'Annotated[{type_hint}, {meta}]'
+ return get_optional_type(annotated_type, self.data_type.use_union_operator)
return f'Annotated[{self.type_hint}, {meta}]'
def _get_default_as_struct_model(self) -> Optional[str]:
| diff --git a/tests/data/expected/main/main_msgspec_struct/output.py b/tests/data/expected/main/main_msgspec_struct/output.py
--- a/tests/data/expected/main/main_msgspec_struct/output.py
+++ b/tests/data/expected/main/main_msgspec_struct/output.py
@@ -40,17 +40,17 @@ class Error(Struct):
class Api(Struct):
- apiKey: Annotated[
- Optional[str], Meta(description='To be used as a dataset parameter value')
+ apiKey: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
] = None
- apiVersionNumber: Annotated[
- Optional[str], Meta(description='To be used as a version parameter value')
+ apiVersionNumber: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
] = None
- apiUrl: Annotated[
- Optional[str], Meta(description="The URL describing the dataset's fields")
+ apiUrl: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
] = None
- apiDocumentationUrl: Annotated[
- Optional[str], Meta(description='A URL to the API console for each API')
+ apiDocumentationUrl: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
] = None
diff --git a/tests/data/expected/main/main_pattern_msgspec/output.py b/tests/data/expected/main/main_pattern_msgspec/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_pattern_msgspec/output.py
@@ -0,0 +1,25 @@
+# generated by datamodel-codegen:
+# filename: pattern.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Annotated, Optional
+
+from msgspec import Meta, Struct
+
+
+class Info(Struct):
+ hostName: Optional[str] = None
+ arn: Optional[
+ Annotated[
+ str,
+ Meta(pattern='(^arn:([^:]*):([^:]*):([^:]*):(|\\*|[\\d]{12}):(.+)$)|^\\*$'),
+ ]
+ ] = None
+ tel: Optional[
+ Annotated[str, Meta(pattern='^(\\([0-9]{3}\\))?[0-9]{3}-[0-9]{4}$')]
+ ] = None
+ comment: Optional[
+ Annotated[str, Meta(pattern='[^\\b\\f\\n\\r\\t\\\\a+.?\'"|()]+$')]
+ ] = None
diff --git a/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py b/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py
--- a/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py
+++ b/tests/data/expected/main/main_use_annotated_with_msgspec_meta_constraints/output.py
@@ -12,7 +12,7 @@
class Pet(Struct):
id: Annotated[int, Meta(ge=0, le=9223372036854775807)]
name: Annotated[str, Meta(max_length=256)]
- tag: Annotated[Optional[str], Meta(max_length=64)] = None
+ tag: Optional[Annotated[str, Meta(max_length=64)]] = None
Pets = List[Pet]
@@ -31,13 +31,13 @@ class User(Struct):
id: Annotated[int, Meta(ge=0)]
name: Annotated[str, Meta(max_length=256)]
uid: UID
- tag: Annotated[Optional[str], Meta(max_length=64)] = None
+ tag: Optional[Annotated[str, Meta(max_length=64)]] = None
phones: Optional[List[Phone]] = None
fax: Optional[List[FaxItem]] = None
- height: Annotated[Optional[Union[int, float]], Meta(ge=1.0, le=300.0)] = None
- weight: Annotated[Optional[Union[float, int]], Meta(ge=1.0, le=1000.0)] = None
- age: Annotated[Optional[int], Meta(gt=0, le=200)] = None
- rating: Annotated[Optional[float], Meta(gt=0.0, le=5.0)] = None
+ height: Optional[Annotated[Union[int, float], Meta(ge=1.0, le=300.0)]] = None
+ weight: Optional[Annotated[Union[float, int], Meta(ge=1.0, le=1000.0)]] = None
+ age: Optional[Annotated[int, Meta(gt=0, le=200)]] = None
+ rating: Optional[Annotated[float, Meta(gt=0.0, le=5.0)]] = None
Users = List[User]
@@ -55,17 +55,17 @@ class Error(Struct):
class Api(Struct):
- apiKey: Annotated[
- Optional[str], Meta(description='To be used as a dataset parameter value')
+ apiKey: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
] = None
- apiVersionNumber: Annotated[
- Optional[str], Meta(description='To be used as a version parameter value')
+ apiVersionNumber: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
] = None
- apiUrl: Annotated[
- Optional[str], Meta(description="The URL describing the dataset's fields")
+ apiUrl: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
] = None
- apiDocumentationUrl: Annotated[
- Optional[str], Meta(description='A URL to the API console for each API')
+ apiDocumentationUrl: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
] = None
diff --git a/tests/data/expected/main/main_with_aliases_msgspec/output.py b/tests/data/expected/main/main_with_aliases_msgspec/output.py
--- a/tests/data/expected/main/main_with_aliases_msgspec/output.py
+++ b/tests/data/expected/main/main_with_aliases_msgspec/output.py
@@ -39,17 +39,17 @@ class Error(Struct):
class Api(Struct):
- apiKey: Annotated[
- Optional[str], Meta(description='To be used as a dataset parameter value')
+ apiKey: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
] = None
- apiVersionNumber: Annotated[
- Optional[str], Meta(description='To be used as a version parameter value')
+ apiVersionNumber: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
] = None
- apiUrl: Annotated[
- Optional[str], Meta(description="The URL describing the dataset's fields")
+ apiUrl: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
] = None
- apiDocumentationUrl: Annotated[
- Optional[str], Meta(description='A URL to the API console for each API')
+ apiDocumentationUrl: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
] = None
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -2688,6 +2688,10 @@ def test_main_openapi_nullable_strict_nullable():
'pydantic_v2.BaseModel',
'main_pattern_pydantic_v2',
),
+ (
+ 'msgspec.Struct',
+ 'main_pattern_msgspec',
+ ),
],
)
@freeze_time('2019-07-26')
@@ -2702,6 +2706,8 @@ def test_main_openapi_pattern(output_model, expected_output):
str(output_file),
'--input-file-type',
'openapi',
+ '--target-python',
+ '3.9',
'--output-model-type',
output_model,
]
| msgspec output uses regex instead of pattern to Meta constraint
Strings with a pattern constraint are output as `Meta(regex='....')` however they should be output as `Meta(pattern='...')`
**To Reproduce**
Example schema:
```yaml
openapi: "3.0.0"
info:
version: 1.0.0
title: Swagger Petstore
license:
name: MIT
servers:
- url: http://petstore.swagger.io/v1
components:
schemas:
info:
type: object
properties:
hostName:
type: string
format: hostname
arn:
type: string
pattern: '(^arn:([^:]*):([^:]*):([^:]*):(|\*|[\d]{12}):(.+)$)|^\*$'
```
Used commandline:
```bash
$ datamodel-codegen --input pattern.yaml --output output_file.py --input-file-type 'openapi' \
--target-python '3.9' \
--output-model-type msgspec.Struct
```
**Expected behavior**
The regex argument to the Meta annotation should be `pattern`
```python
class Info(Struct):
hostName: Optional[str] = None
arn: Optional[
Annotated[
str,
Meta(pattern='(^arn:([^:]*):([^:]*):([^:]*):(|\\*|[\\d]{12}):(.+)$)|^\\*$'),
]
] = None
```
**Version:**
- OS: macOS
- Python version: 3.11.4
- datamodel-code-generator version: 0.22
| 2023-10-06T10:15:35 | -1.0 |
|
koxudaxi/datamodel-code-generator | 1,678 | koxudaxi__datamodel-code-generator-1678 | [
"1677"
] | 8ca47052e88b07f8677ddd07d9aa35d0917c2380 | diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -1027,6 +1027,7 @@ def parse_item(
)
elif item.allOf:
all_of_path = get_special_path('allOf', path)
+ all_of_path = [self.model_resolver.resolve_ref(all_of_path)]
return self.parse_all_of(
self.model_resolver.add(
all_of_path, name, singular_name=singular_name, class_name=True
| diff --git a/tests/data/expected/main/main_all_of_ref_self/output.py b/tests/data/expected/main/main_all_of_ref_self/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_all_of_ref_self/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: all_of_ref_self.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel
+
+
+class Version(BaseModel):
+ __root__: None
+
+
+class Model(BaseModel):
+ version: Optional[Version] = None
diff --git a/tests/data/jsonschema/all_of_ref_self.json b/tests/data/jsonschema/all_of_ref_self.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/all_of_ref_self.json
@@ -0,0 +1,17 @@
+{
+ "$schema": "https://json-schema.org/draft/2020-12/schema",
+ "$id": "https://example.com/all_of_ref_self.json",
+ "type": "object",
+ "properties": {
+ "version": {
+ "allOf": [
+ { "$ref": "#/$defs/version" }
+ ]
+ }
+ },
+ "$defs": {
+ "version": {
+ "type": "null"
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -5957,3 +5957,24 @@ def test_main_dataclass_default():
EXPECTED_MAIN_PATH / 'main_dataclass_field_default' / 'output.py'
).read_text()
)
+
+
+@freeze_time('2019-07-26')
+def test_main_all_of_ref_self():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'all_of_ref_self.json'),
+ '--output',
+ str(output_file),
+ '--input-file-type',
+ 'jsonschema',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (EXPECTED_MAIN_PATH / 'main_all_of_ref_self' / 'output.py').read_text()
+ )
| json schema files making use of `allOf` with a reference and a `$id` throw a `KeyError`
**Describe the bug**
json schema files making use of `allOf` with a reference and a `$id` throw a `KeyError`.
My understanding of the issue is that the `references` dictionary contains
non-resolved paths as keys for `allOf` items, and therefore the reference can't
be deleted when we do resolved lookup.
**To Reproduce**
Example schema:
```json
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"$id": "https://example.com/all_of_ref_self.json",
"type": "object",
"properties": {
"version": {
"allOf": [
{ "$ref": "#/$defs/version" }
]
}
},
"$defs": {
"version": {
"type": "null"
}
}
}
```
Used commandline:
```
$ datamodel-codegen --input-file-type jsonschema --input tmp.json
Traceback (most recent call last):
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/__main__.py", line 386, in main
generate(
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/__init__.py", line 446, in generate
results = parser.parse()
^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/base.py", line 1136, in parse
self.parse_raw()
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1603, in parse_raw
self._parse_file(self.raw_obj, obj_name, path_parts)
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1687, in _parse_file
self.parse_obj(obj_name, root_obj, path_parts or ['#'])
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1555, in parse_obj
self.parse_object(name, obj, path)
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 912, in parse_object
fields = self.parse_object_fields(obj, path, get_module_name(class_name, None))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 867, in parse_object_fields
field_type = self.parse_item(modular_name, field, [*path, field_name])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1030, in parse_item
return self.parse_all_of(
^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 782, in parse_all_of
return self._parse_object_common_part(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 677, in _parse_object_common_part
self.model_resolver.delete(path)
File "/home/pim/Shadow/dev/virtu/vmdesc/env/lib/python3.11/site-packages/datamodel_code_generator/reference.py", line 643, in delete
del self.references[self.resolve_ref(path)]
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^
KeyError: 'https://abc.com/tmp.json/version#-datamodel-code-generator-#-allOf-#-special-#'
```
**Expected behavior**
I would expect this schema to be parsed successfully (no exceptions).
**Version:**
- OS: Linux (Archlinux, 6.5.7-arch1-1)
- Python version: 3.11.5
- datamodel-code-generator version: 0.17.2 to 0.23.0
**Additional context**
This behaviour seemed to appear between 0.17.1 (works) and 0.17.2 (doesn't work). In particular, with commit https://github.com/koxudaxi/datamodel-code-generator/commit/0187b4aa7955adfc5b42090274e146546a132715
This pattern seems to be parsed correctly by other validators, e.g. https://www.jsonschemavalidator.net/s/FwyNnGC3
Real world jsonschemas also have this pattern, and show the same issue, for example with [stripe/stripe-apps](https://github.com/stripe/stripe-apps/blob/954a21bc807671e72a126ac04d72e2ac88063f5d/schema/stripe-app.schema.json):
```
$ cd $(mktemp -d)
$ python -m venv env
$ . env/bin/activate
$ pip install datamodel-code-generator
$ curl https://raw.githubusercontent.com/stripe/stripe-apps/main/schema/stripe-app.schema.json -o stripe-app.schema.json
$ datamodel-codegen --input stripe-app.schema.json
The input file type was determined to be: jsonschema
This can be specificied explicitly with the `--input-file-type` option.
Traceback (most recent call last):
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/__main__.py", line 388, in main
generate(
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/__init__.py", line 435, in generate
results = parser.parse()
^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/base.py", line 1135, in parse
self.parse_raw()
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1603, in parse_raw
self._parse_file(self.raw_obj, obj_name, path_parts)
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1687, in _parse_file
self.parse_obj(obj_name, root_obj, path_parts or ['#'])
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1555, in parse_obj
self.parse_object(name, obj, path)
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 912, in parse_object
fields = self.parse_object_fields(obj, path, get_module_name(class_name, None))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 867, in parse_object_fields
field_type = self.parse_item(modular_name, field, [*path, field_name])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1024, in parse_item
data_types=self.parse_one_of(
^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 641, in parse_one_of
data_types = self.parse_list_item(name, obj.oneOf, path, obj)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1075, in parse_list_item
return [
^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1076, in <listcomp>
self.parse_item(
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1041, in parse_item
return self.parse_object(
^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 912, in parse_object
fields = self.parse_object_fields(obj, path, get_module_name(class_name, None))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 867, in parse_object_fields
field_type = self.parse_item(modular_name, field, [*path, field_name])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 1030, in parse_item
return self.parse_all_of(
^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 782, in parse_all_of
return self._parse_object_common_part(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/parser/jsonschema.py", line 677, in _parse_object_common_part
self.model_resolver.delete(path)
File "/tmp/tmp.8CqYazcKUp/env/lib/python3.11/site-packages/datamodel_code_generator/reference.py", line 643, in delete
del self.references[self.resolve_ref(path)]
~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^^^^^
KeyError: 'https://stripe.com/stripe-app.schema.json/post_install_action#-datamodel-code-generator-#-oneOf-#-special-#/1#-datamodel-code-generator-#-object-#-special-#/type#-datamodel-code-generator-#-allOf-#-special-#'
```
| 2023-11-09T22:21:12 | -1.0 |
|
koxudaxi/datamodel-code-generator | 1,767 | koxudaxi__datamodel-code-generator-1767 | [
"1759"
] | 38bf2b9e62bfb8176ad3c49514461437f954c851 | diff --git a/datamodel_code_generator/model/pydantic_v2/base_model.py b/datamodel_code_generator/model/pydantic_v2/base_model.py
--- a/datamodel_code_generator/model/pydantic_v2/base_model.py
+++ b/datamodel_code_generator/model/pydantic_v2/base_model.py
@@ -87,10 +87,9 @@ def process_const(self) -> None:
self.const = True
self.nullable = False
const = self.extras['const']
- if self.data_type.type == 'str' and isinstance(
- const, str
- ): # pragma: no cover # Literal supports only str
- self.data_type = self.data_type.__class__(literals=[const])
+ self.data_type = self.data_type.__class__(literals=[const])
+ if not self.default:
+ self.default = const
def _process_data_in_str(self, data: Dict[str, Any]) -> None:
if self.const:
@@ -103,7 +102,7 @@ def _process_data_in_str(self, data: Dict[str, Any]) -> None:
def _process_annotated_field_arguments(
self, field_arguments: List[str]
) -> List[str]:
- if not self.required:
+ if not self.required or self.const:
if self.use_default_kwarg:
return [
f'default={repr(self.default)}',
| diff --git a/tests/data/expected/main/use_default_with_const/output.py b/tests/data/expected/main/use_default_with_const/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/use_default_with_const/output.py
@@ -0,0 +1,12 @@
+# generated by datamodel-codegen:
+# filename: use_default_with_const.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from pydantic import BaseModel
+from typing_extensions import Literal
+
+
+class UseDefaultWithConst(BaseModel):
+ foo: Literal['foo'] = 'foo'
diff --git a/tests/data/jsonschema/use_default_with_const.json b/tests/data/jsonschema/use_default_with_const.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/use_default_with_const.json
@@ -0,0 +1,10 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "type": "object",
+ "title": "Use default with const",
+ "properties": {
+ "foo": {
+ "const": "foo"
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1264,6 +1264,28 @@ def test_force_optional():
)
+@freeze_time('2019-07-26')
+def test_use_default_pydantic_v2_with_json_schema_const():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'use_default_with_const.json'),
+ '--output',
+ str(output_file),
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ '--use-default',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (EXPECTED_MAIN_PATH / 'use_default_with_const' / 'output.py').read_text()
+ )
+
+
@freeze_time('2019-07-26')
def test_main_with_exclusive():
with TemporaryDirectory() as output_dir:
| JSON Schema `const` value and type ignored when setting defaults for Pydantic V2
**Describe the bug**
Use of `--force-optional` clobbers `--use-one-literal-as-default`. In my opinion `--force-optional` should use defaults where they exist and only fall back to assigning to `None` where they don't exist.
### Input
```json
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"title": "Force optional demo",
"properties": {
"foo": {
"const": "foo"
}
}
}
```
### Used command
```
datamodel-codegen --input force-optional-demo.json --output-model-type pydantic_v2.BaseModel --force-optional --use-one-literal-as-default
```
### Actual output
```python
# generated by datamodel-codegen:
# filename: force-optional-demo.json
# timestamp: 2023-12-05T15:06:01+00:00
from __future__ import annotations
from pydantic import BaseModel
from typing_extensions import Literal
class ForceOptionalDemo(BaseModel):
foo: Literal['foo'] = None
```
### Expected output
```python
# generated by datamodel-codegen:
# filename: force-optional-demo.json
# timestamp: 2023-12-05T15:06:01+00:00
from __future__ import annotations
from pydantic import BaseModel
from typing_extensions import Literal
class ForceOptionalDemo(BaseModel):
foo: Literal['foo'] = 'foo'
```
| 2023-12-08T10:00:27 | -1.0 |
|
koxudaxi/datamodel-code-generator | 1,829 | koxudaxi__datamodel-code-generator-1829 | [
"1821"
] | 1320fcbf0a67d3afa2210b16d5093c915cc33960 | diff --git a/datamodel_code_generator/format.py b/datamodel_code_generator/format.py
--- a/datamodel_code_generator/format.py
+++ b/datamodel_code_generator/format.py
@@ -7,6 +7,7 @@
from warnings import warn
import black
+import black.mode
import isort
from datamodel_code_generator.util import cached_property, load_toml
@@ -131,9 +132,15 @@ def __init__(
if wrap_string_literal is not None:
experimental_string_processing = wrap_string_literal
else:
- experimental_string_processing = config.get(
- 'experimental-string-processing'
- )
+ if black.__version__ < '24.1.0': # type: ignore
+ experimental_string_processing = config.get(
+ 'experimental-string-processing'
+ )
+ else:
+ experimental_string_processing = config.get('preview', False) and (
+ config.get('unstable', False)
+ or 'string_processing' in config.get('enable-unstable-feature', [])
+ )
if experimental_string_processing is not None: # pragma: no cover
if black.__version__.startswith('19.'): # type: ignore
@@ -141,10 +148,16 @@ def __init__(
f"black doesn't support `experimental-string-processing` option" # type: ignore
f' for wrapping string literal in {black.__version__}'
)
- else:
+ elif black.__version__ < '24.1.0': # type: ignore
black_kwargs[
'experimental_string_processing'
] = experimental_string_processing
+ elif experimental_string_processing:
+ black_kwargs['preview'] = True
+ black_kwargs['unstable'] = config.get('unstable', False)
+ black_kwargs['enabled_features'] = {
+ black.mode.Preview.string_processing
+ }
if TYPE_CHECKING:
self.black_mode: black.FileMode
| diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml
--- a/.github/workflows/test.yml
+++ b/.github/workflows/test.yml
@@ -21,6 +21,16 @@ jobs:
black-version: default
python-version: 3.8
pydantic-version: 1.8.2
+ - os: ubuntu-latest
+ isort-version: 5.6.4
+ black-version: 24.1.0
+ python-version: 3.12
+ pydantic-version: 2.4.2
+ - os: ubuntu-latest
+ isort-version: 5.6.4
+ black-version: 23.12.1
+ python-version: 3.12
+ pydantic-version: 2.4.2
exclude:
- os: windows-latest
black-version: 22.1.0
@@ -91,10 +101,10 @@ jobs:
if: matrix.pydantic-version != 'default'
run: |
poetry run pip install pydantic=="${{ matrix.pydantic-version }}"
- - name: Install Black 22.1.0
- if: matrix.black-version == '22.1.0'
+ - name: Install Black ${{ matrix.black-version }}
+ if: matrix.black-version != 'default'
run: |
- poetry run pip install black=="22.1.0"
+ poetry run pip install black=="${{ matrix.black-version }}"
- name: Lint
if: matrix.pydantic-version == 'default'
run: |
diff --git a/tests/parser/test_openapi.py b/tests/parser/test_openapi.py
--- a/tests/parser/test_openapi.py
+++ b/tests/parser/test_openapi.py
@@ -2,6 +2,7 @@
from pathlib import Path
from typing import List, Optional
+import black
import pydantic
import pytest
from packaging import version
@@ -713,6 +714,10 @@ def test_openapi_parser_responses_with_tag():
)
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
def test_openapi_parser_with_query_parameters():
parser = OpenAPIParser(
data_model_field_type=DataModelFieldBase,
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -703,6 +703,10 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
assert captured.err == inferred_message.format('openapi') + '\n'
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_pyproject():
if platform.system() == 'Windows':
@@ -1755,6 +1759,10 @@ def test_main_use_standard_collections(tmpdir_factory: TempdirFactory) -> None:
assert result == path.read_text()
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
def test_main_use_generic_container_types(tmpdir_factory: TempdirFactory) -> None:
output_directory = Path(tmpdir_factory.mktemp('output'))
@@ -1781,6 +1789,10 @@ def test_main_use_generic_container_types(tmpdir_factory: TempdirFactory) -> Non
assert result == path.read_text()
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@pytest.mark.benchmark
def test_main_use_generic_container_types_standard_collections(
tmpdir_factory: TempdirFactory,
@@ -2366,6 +2378,10 @@ def test_main_openapi_use_one_literal_as_default():
version.parse(pydantic.VERSION) < version.parse('1.9.0'),
reason='Require Pydantic version 1.9.0 or later ',
)
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_openapi_enum_models_as_literal_all():
with TemporaryDirectory() as output_dir:
@@ -2397,6 +2413,10 @@ def test_main_openapi_enum_models_as_literal_all():
version.parse(pydantic.VERSION) < version.parse('1.9.0'),
reason='Require Pydantic version 1.9.0 or later ',
)
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_openapi_enum_models_as_literal_py37(capsys):
with TemporaryDirectory() as output_dir:
@@ -2687,6 +2707,10 @@ def test_main_all_of_with_object():
)
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_combined_array():
with TemporaryDirectory() as output_dir:
@@ -3359,6 +3383,10 @@ def test_main_strict_types():
)
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_main_strict_types_all():
with TemporaryDirectory() as output_dir:
diff --git a/tests/test_main_kr.py b/tests/test_main_kr.py
--- a/tests/test_main_kr.py
+++ b/tests/test_main_kr.py
@@ -3,6 +3,7 @@
from pathlib import Path
from tempfile import TemporaryDirectory
+import black
import pytest
from freezegun import freeze_time
@@ -180,6 +181,10 @@ def test_main_custom_template_dir(capsys: CaptureFixture) -> None:
assert captured.err == inferred_message.format('openapi') + '\n'
+@pytest.mark.skipif(
+ black.__version__.split('.')[0] >= '24',
+ reason="Installed black doesn't support the old style",
+)
@freeze_time('2019-07-26')
def test_pyproject():
with TemporaryDirectory() as output_dir:
| Black 24.1.0 breaks code formatting if wrap-string-literal is set
**Describe the bug**
Black [24.1.0](https://github.com/psf/black/releases/tag/24.1.0) was just released and removes support for the deprecated `--experimental-string-processing` flag (psf/black#4096). This breaks the code in [`format.py`](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/datamodel_code_generator/format.py#L146) that uses this option:
```
Traceback (most recent call last):
File ".../python3.11/site-packages/datamodel_code_generator/__main__.py", line 429, in main
generate(
File ".../python3.11/site-packages/datamodel_code_generator/__init__.py", line 463, in generate
results = parser.parse()
^^^^^^^^^^^^^^
File ".../python3.11/site-packages/datamodel_code_generator/parser/base.py", line 1156, in parse
code_formatter: Optional[CodeFormatter] = CodeFormatter(
^^^^^^^^^^^^^^
File ".../python3.11/site-packages/datamodel_code_generator/format.py", line 152, in __init__
self.black_mode = black.FileMode(
^^^^^^^^^^^^^^^
TypeError: Mode.__init__() got an unexpected keyword argument 'experimental_string_processing'
```
**Expected behavior**
No crash.
**Version:**
- OS: Linux
- Python version: 3.11
- datamodel-code-generator version: 0.25.2
- black version: 0.24.1
**Additional context**
Possible mitigation:
- add a temporary upper bound to the `black` version spec in [pyproject.toml](https://github.com/koxudaxi/datamodel-code-generator/blob/acc6bf604b13626f22fc123d72ae08ff0a114155/pyproject.toml#L54)
- same, but in user environment definitions
- use `--preview --enable-unstable-feature string_processing` instead (as suggested by the black release notes).
| @airwoodix
Thank you for creating the issue.
OK, we should fix it. | 2024-02-01T07:23:54 | -1.0 |
koxudaxi/datamodel-code-generator | 1,942 | koxudaxi__datamodel-code-generator-1942 | [
"1920"
] | 01dd102c4f577a9993ff30946de639051c4b83c9 | diff --git a/datamodel_code_generator/model/msgspec.py b/datamodel_code_generator/model/msgspec.py
--- a/datamodel_code_generator/model/msgspec.py
+++ b/datamodel_code_generator/model/msgspec.py
@@ -33,7 +33,7 @@
def _has_field_assignment(field: DataModelFieldBase) -> bool:
- return bool(field.field) or not (
+ return not (
field.required
or (field.represented_default == 'None' and field.strip_default_none)
)
@@ -48,7 +48,9 @@ def import_extender(cls: Type[DataModelFieldBaseT]) -> Type[DataModelFieldBaseT]
@wraps(original_imports.fget) # type: ignore
def new_imports(self: DataModelFieldBaseT) -> Tuple[Import, ...]:
extra_imports = []
- if self.field:
+ field = self.field
+ # TODO: Improve field detection
+ if field and field.startswith('field('):
extra_imports.append(IMPORT_MSGSPEC_FIELD)
if self.field and 'lambda: convert' in self.field:
extra_imports.append(IMPORT_MSGSPEC_CONVERT)
@@ -177,6 +179,8 @@ def __str__(self) -> str:
if self.default != UNDEFINED and self.default is not None:
data['default'] = self.default
+ elif not self.required:
+ data['default'] = None
if self.required:
data = {
| diff --git a/tests/data/expected/main/main_msgspec_struct_snake_case/output.py b/tests/data/expected/main/main_msgspec_struct_snake_case/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_msgspec_struct_snake_case/output.py
@@ -0,0 +1,66 @@
+# generated by datamodel-codegen:
+# filename: api_ordered_required_fields.yaml
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import List, Optional
+
+from msgspec import Meta, Struct, field
+from typing_extensions import Annotated
+
+
+class Pet(Struct):
+ id: int
+ name: str
+ before_tag: str = field(name='beforeTag')
+ tag: Optional[str] = None
+
+
+Pets = List[Pet]
+
+
+class User(Struct):
+ id: int
+ name: str
+ tag: Optional[str] = None
+
+
+Users = List[User]
+
+
+Id = str
+
+
+Rules = List[str]
+
+
+class Error(Struct):
+ code: int
+ message: str
+
+
+class Api(Struct):
+ api_key: Optional[
+ Annotated[str, Meta(description='To be used as a dataset parameter value')]
+ ] = field(name='apiKey', default=None)
+ api_version_number: Optional[
+ Annotated[str, Meta(description='To be used as a version parameter value')]
+ ] = field(name='apiVersionNumber', default=None)
+ api_url: Optional[
+ Annotated[str, Meta(description="The URL describing the dataset's fields")]
+ ] = field(name='apiUrl', default=None)
+ api_documentation_url: Optional[
+ Annotated[str, Meta(description='A URL to the API console for each API')]
+ ] = field(name='apiDocumentationUrl', default=None)
+
+
+Apis = List[Api]
+
+
+class Event(Struct):
+ name: Optional[str] = None
+
+
+class Result(Struct):
+ event: Optional[Event] = None
diff --git a/tests/data/expected/main/main_with_aliases_msgspec/output.py b/tests/data/expected/main/main_with_aliases_msgspec/output.py
--- a/tests/data/expected/main/main_with_aliases_msgspec/output.py
+++ b/tests/data/expected/main/main_with_aliases_msgspec/output.py
@@ -57,7 +57,7 @@ class Api(Struct):
class Event(Struct):
- name_: Optional[str] = field(name='name')
+ name_: Optional[str] = field(name='name', default=None)
class Result(Struct):
diff --git a/tests/data/openapi/api_ordered_required_fields.yaml b/tests/data/openapi/api_ordered_required_fields.yaml
new file mode 100644
--- /dev/null
+++ b/tests/data/openapi/api_ordered_required_fields.yaml
@@ -0,0 +1,182 @@
+openapi: "3.0.0"
+info:
+ version: 1.0.0
+ title: Swagger Petstore
+ license:
+ name: MIT
+servers:
+ - url: http://petstore.swagger.io/v1
+paths:
+ /pets:
+ get:
+ summary: List all pets
+ operationId: listPets
+ tags:
+ - pets
+ parameters:
+ - name: limit
+ in: query
+ description: How many items to return at one time (max 100)
+ required: false
+ schema:
+ type: integer
+ format: int32
+ responses:
+ '200':
+ description: A paged array of pets
+ headers:
+ x-next:
+ description: A link to the next page of responses
+ schema:
+ type: string
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Pets"
+ default:
+ description: unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ x-amazon-apigateway-integration:
+ uri:
+ Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${PythonVersionFunction.Arn}/invocations
+ passthroughBehavior: when_no_templates
+ httpMethod: POST
+ type: aws_proxy
+ post:
+ summary: Create a pet
+ operationId: createPets
+ tags:
+ - pets
+ responses:
+ '201':
+ description: Null response
+ default:
+ description: unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ x-amazon-apigateway-integration:
+ uri:
+ Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${PythonVersionFunction.Arn}/invocations
+ passthroughBehavior: when_no_templates
+ httpMethod: POST
+ type: aws_proxy
+ /pets/{petId}:
+ get:
+ summary: Info for a specific pet
+ operationId: showPetById
+ tags:
+ - pets
+ parameters:
+ - name: petId
+ in: path
+ required: true
+ description: The id of the pet to retrieve
+ schema:
+ type: string
+ responses:
+ '200':
+ description: Expected response to a valid request
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Pets"
+ default:
+ description: unexpected error
+ content:
+ application/json:
+ schema:
+ $ref: "#/components/schemas/Error"
+ x-amazon-apigateway-integration:
+ uri:
+ Fn::Sub: arn:aws:apigateway:${AWS::Region}:lambda:path/2015-03-31/functions/${PythonVersionFunction.Arn}/invocations
+ passthroughBehavior: when_no_templates
+ httpMethod: POST
+ type: aws_proxy
+components:
+ schemas:
+ Pet:
+ required:
+ - id
+ - name
+ - beforeTag
+ properties:
+ id:
+ type: integer
+ format: int64
+ default: 1
+ name:
+ type: string
+ beforeTag:
+ type: string
+ tag:
+ type: string
+ Pets:
+ type: array
+ items:
+ $ref: "#/components/schemas/Pet"
+ Users:
+ type: array
+ items:
+ required:
+ - id
+ - name
+ properties:
+ id:
+ type: integer
+ format: int64
+ name:
+ type: string
+ tag:
+ type: string
+ Id:
+ type: string
+ Rules:
+ type: array
+ items:
+ type: string
+ Error:
+ description: error result
+ required:
+ - code
+ - message
+ properties:
+ code:
+ type: integer
+ format: int32
+ message:
+ type: string
+ apis:
+ type: array
+ items:
+ type: object
+ properties:
+ apiKey:
+ type: string
+ description: To be used as a dataset parameter value
+ apiVersionNumber:
+ type: string
+ description: To be used as a version parameter value
+ apiUrl:
+ type: string
+ format: uri
+ description: "The URL describing the dataset's fields"
+ apiDocumentationUrl:
+ type: string
+ format: uri
+ description: A URL to the API console for each API
+ Event:
+ type: object
+ description: Event object
+ properties:
+ name:
+ type: string
+ Result:
+ type: object
+ properties:
+ event:
+ $ref: '#/components/schemas/Event'
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -6139,6 +6139,33 @@ def test_main_msgspec_struct():
)
+@freeze_time('2019-07-26')
+def test_main_msgspec_struct_snake_case():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(OPEN_API_DATA_PATH / 'api_ordered_required_fields.yaml'),
+ '--output',
+ str(output_file),
+ # min msgspec python version is 3.8
+ '--target-python-version',
+ '3.8',
+ '--snake-case-field',
+ '--output-model-type',
+ 'msgspec.Struct',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH / 'main_msgspec_struct_snake_case' / 'output.py'
+ ).read_text()
+ )
+
+
@freeze_time('2019-07-26')
@pytest.mark.skipif(
black.__version__.split('.')[0] == '19',
| msgspec: Optional fields are missing a default when using `--snake-case-field`
### Steps to reproduce
1. Download the NVD CVE [schema][schema]
2. Generate a msgpsec model:
```sh
datamodel-codegen \
--input $schema_json \
--input-file-type jsonschema \
--output-model-type 'msgspec.Struct' \
# This is important I think
--snake-case-field \
--output "."
```
3. (Ignore the circular imports #836)
4. (Ignore wrong field ordering #1919
5. Look at the `class CpeMatch` (and most other classes as well).
```python
class CpeMatch(Struct, kw_only=True):
vulnerable: bool
criteria: str
match_criteria_id: str = field(name='matchCriteriaId')
version_start_excluding: Optional[str] = field(name='versionStartExcluding')
version_start_including: Optional[str] = field(name='versionStartIncluding')
version_end_excluding: Optional[str] = field(name='versionEndExcluding')
version_end_including: Optional[str] = field(name='versionEndIncluding')
```
vs
```json
"cpe_match": {
"description": "CPE match string or range",
"type": "object",
"properties": {
"vulnerable": {"type": "boolean"},
"criteria": {"type": "string"},
"matchCriteriaId": {"type": "string", "format": "uuid"},
"versionStartExcluding": {"type": "string"},
"versionStartIncluding": {"type": "string"},
"versionEndExcluding": {"type": "string"},
"versionEndIncluding": {"type": "string"}
},
"required": ["vulnerable", "criteria", "matchCriteriaId"],
"additionalProperties": false
},
```
Note that the optional fields are missing the `default=None` parameter in the `field` call.
[schema]: https://csrc.nist.gov/schema/nvd/api/2.0/cve_api_json_2.0.schema
### Expected behavior
The field should have a default of value `None`.
### Workaround
Do not use `--snake-case-field`.
### Setup
```sh
$ datamodel-codegen --version
0.25.5
$ python --version
Python 3.11.8
```
| 2024-05-01T20:21:48 | -1.0 |
|
koxudaxi/datamodel-code-generator | 1,962 | koxudaxi__datamodel-code-generator-1962 | [
"1910"
] | 5bab6270de86a83ff70358d87ca55cba7954b59f | diff --git a/datamodel_code_generator/parser/jsonschema.py b/datamodel_code_generator/parser/jsonschema.py
--- a/datamodel_code_generator/parser/jsonschema.py
+++ b/datamodel_code_generator/parser/jsonschema.py
@@ -1311,6 +1311,10 @@ def parse_root_type(
data_type = self.data_type_manager.get_data_type_from_full_path(
obj.custom_type_path, is_custom_type=True
)
+ elif obj.is_array:
+ data_type = self.parse_array_fields(
+ name, obj, get_special_path('array', path)
+ ).data_type
elif obj.anyOf or obj.oneOf:
reference = self.model_resolver.add(
path, name, loaded=True, class_name=True
| diff --git a/tests/data/expected/main/main_one_of_with_sub_schema_array_item/output.py b/tests/data/expected/main/main_one_of_with_sub_schema_array_item/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/main_one_of_with_sub_schema_array_item/output.py
@@ -0,0 +1,17 @@
+# generated by datamodel-codegen:
+# filename: one_of_with_sub_schema_array_item.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import List, Optional, Union
+
+from pydantic import AnyUrl, BaseModel, Field
+
+
+class SpatialPlan(BaseModel):
+ officialDocument: Optional[Union[str, List[AnyUrl]]] = Field(
+ None,
+ description='Link to the official documents that relate to the spatial plan.',
+ title='officialDocument',
+ )
diff --git a/tests/data/jsonschema/one_of_with_sub_schema_array_item.json b/tests/data/jsonschema/one_of_with_sub_schema_array_item.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/one_of_with_sub_schema_array_item.json
@@ -0,0 +1,25 @@
+{
+ "$schema": "http://json-schema.org/draft-04/schema#",
+ "title": "SpatialPlan",
+ "type": "object",
+ "properties": {
+ "officialDocument": {
+ "title": "officialDocument",
+ "description": "Link to the official documents that relate to the spatial plan.",
+ "oneOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "array",
+ "minItems": 1,
+ "items": {
+ "type": "string",
+ "format": "uri"
+ },
+ "uniqueItems": true
+ }
+ ]
+ }
+ }
+}
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -6798,3 +6798,30 @@ def test_main_root_one_of():
path.relative_to(expected_directory)
).read_text()
assert result == path.read_text()
+
+
+@freeze_time('2019-07-26')
+def test_one_of_with_sub_schema_array_item():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'one_of_with_sub_schema_array_item.json'),
+ '--output',
+ str(output_file),
+ '--input-file-type',
+ 'jsonschema',
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'main_one_of_with_sub_schema_array_item'
+ / 'output.py'
+ ).read_text()
+ )
| oneOf with subschema array items not incorporated/generated as Any for pydantic.v2
**Describe the bug**
When using a JSON schema as input with aoneOf construct where one option is an array with items defined in a subschema, the resulting pydantic v2 model does not incorporate the subschema definition, but rather `list[Any]`
**To Reproduce**
The following JSON schema snippet:
```json
"SpatialPlan": {
"type": "object",
"properties": {
"officialDocument": {
"title": "officialDocument",
"description": "Link to the official documents that relate to the spatial plan.",
"oneOf": [
{
"$ref": "definitions/voidable.json#/definitions/Voidable"
},
{
"type": "array",
"minItems": 1,
"items": {
"$ref": "definitions/ref.json#/definitions/FeatureRef"
},
"uniqueItems": true
}
]
}
```
leads to the pydantic v2 model:
```python
class OfficialDocument(RootModel[list[Any]]):
root: Annotated[
list[Any],
Field(
description='Link to the official documents that relate to the spatial plan.',
min_length=1,
title='officialDocument',
),
]
class SpatialPlan(BaseModel):
officialDocument: Annotated[
Voidable | OfficialDocument,
Field(
description='Link to the official documents that relate to the spatial plan.',
title='officialDocument',
),
]
```
Used commandline:
```
$ datamodel-codegen --target-python-version 3.10 --use-union-operator --use-standard-collections --use-schema-description --use-annotated --collapse-root-models --output-model-type pydantic_v2.BaseModel --input input.json --output output.py
```
**Expected behavior**
The resulting pydantic model should look like this:
```python
class OfficialDocument(RootModel[list[FeatureRef]]):
root: Annotated[
list[FeatureRef],
Field(
description="Link to the official documents that relate to the spatial plan.",
min_length=1,
title="officialDocument",
),
]
```
Or maybe even more preferable, the addtional RootModel definition should be dropped as a whole:
```python
class SpatialPlan(BaseModel):
officialDocument: Annotated[
list[FeatureRef] | Voidable,
Field(
description="Link to the official documents that relate to the spatial plan.",
min_length=1,
title="officialDocument",
),
]
```
**Version:**
- OS: Ubuntu 22.04 (WSL)
- Python version: 3.10
- datamodel-code-generator version: 0.25.5
**Additional context**
Add any other context about the problem here.
| Is this related to: https://github.com/koxudaxi/datamodel-code-generator/blob/fcab9a4d555d4b96d64bb277f974bb7507982fb2/datamodel_code_generator/parser/jsonschema.py#L681-L694
If so - or if you can provide another hint - maybe we can have a look and work on a PR. This issue is really hampering our use case.
I've been looking into a similar issue on my project - so far I think it may be related to enabling the `--field-constraints` option, which is also enabled by using the `--use-annotated` option.
I'm working off of a very slightly modified version of the [CycloneDX 1.5 schema](https://cyclonedx.org/docs/1.5/json/#tab-pane_components_items_licenses_oneOf_i1), where the `licenses` field here is changed from an `array` to `object` type (due to some other issue with datamodel-code-generator parsing the schema). I expect to get a Python class somewhere that includes the `expression` and `bom-ref` fields. Here's what I'm seeing using datamodel-codegen 0.25.6, with the command
`datamodel-codegen --input ~/temp/modified-bom-1.5.schema.json --output output-license-obj-annotated --use-annot
ated`:
```
class LicenseChoice1(BaseModel):
__root__: Annotated[
List[Any],
Field(
description='A tuple of exactly one SPDX License Expression.',
max_items=1,
min_items=1,
title='SPDX License Expression',
),
]
class LicenseChoice(BaseModel):
__root__: Annotated[
Union[List[LicenseChoiceItem], LicenseChoice1],
Field(
description='EITHER (list of SPDX licenses and/or named licenses) OR (tuple of one SPDX License Expression)',
title='License Choice',
),
]
```
When I remove `--use-annotated`, I get something more like what I expect:
```
class LicenseChoiceItem1(BaseModel):
class Config:
extra = Extra.forbid
expression: str = Field(
...,
examples=[
'Apache-2.0 AND (MIT OR GPL-2.0-only)',
'GPL-3.0-only WITH Classpath-exception-2.0',
],
title='SPDX License Expression',
)
bom_ref: Optional[RefType] = Field(
None,
alias='bom-ref',
description='An optional identifier which can be used to reference the license elsewhere in the BOM. Every bom-ref MUST be unique within the BOM.',
title='BOM Reference',
)
class LicenseChoice(BaseModel):
__root__: Union[List[LicenseChoiceItem], List[LicenseChoiceItem1]] = Field(
...,
description='EITHER (list of SPDX licenses and/or named licenses) OR (tuple of one SPDX License Expression)',
title='License Choice',
)
```
I'll keep digging, but for now it appears that using annotations/field constraints ends up dropping type information somewhere down that path. | 2024-05-15T16:07:22 | -1.0 |
koxudaxi/datamodel-code-generator | 1,991 | koxudaxi__datamodel-code-generator-1991 | [
"1990"
] | e68629d0602c35d734d3d5397476cce89b8d49d3 | diff --git a/datamodel_code_generator/parser/base.py b/datamodel_code_generator/parser/base.py
--- a/datamodel_code_generator/parser/base.py
+++ b/datamodel_code_generator/parser/base.py
@@ -778,8 +778,10 @@ def __apply_discriminator_type(
discriminator_model.path.split('#/')[-1]
!= path.split('#/')[-1]
):
- # TODO: support external reference
- continue
+ if '#' in path or discriminator_model.path[
+ :-1
+ ] != path.lstrip('./'):
+ continue
type_names.append(name)
else:
type_names = [discriminator_model.path.split('/')[-1]]
| diff --git a/tests/data/expected/main/discriminator_with_external_reference/output.py b/tests/data/expected/main/discriminator_with_external_reference/output.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_reference/output.py
@@ -0,0 +1,26 @@
+# generated by datamodel-codegen:
+# filename: schema.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Union
+
+from pydantic import BaseModel, Field
+from typing_extensions import Literal
+
+
+class Type1(BaseModel):
+ type_: Literal['a'] = Field('a', title='Type ')
+
+
+class Type2(BaseModel):
+ type_: Literal['b'] = Field('b', title='Type ')
+
+
+class Type3(BaseModel):
+ type_: Literal['c'] = Field('c', title='Type ')
+
+
+class Response(BaseModel):
+ inner: Union[Type1, Type2, Type3] = Field(..., discriminator='type_', title='Inner')
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/artificial_folder/type1.json b/tests/data/jsonschema/discriminator_with_external_reference/artificial_folder/type1.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/discriminator_with_external_reference/artificial_folder/type1.json
@@ -0,0 +1,11 @@
+{
+ "properties": {
+ "type_": {
+ "const": "a",
+ "default": "a",
+ "title": "Type "
+ }
+ },
+ "title": "Type1",
+ "type": "object"
+}
\ No newline at end of file
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/schema.json b/tests/data/jsonschema/discriminator_with_external_reference/schema.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/discriminator_with_external_reference/schema.json
@@ -0,0 +1,44 @@
+{
+ "$def": {
+ "Type3": {
+ "properties": {
+ "type_": {
+ "const": "c",
+ "default": "c",
+ "title": "Type "
+ }
+ },
+ "title": "Type3",
+ "type": "object"
+ }
+ },
+ "properties": {
+ "inner": {
+ "discriminator": {
+ "mapping": {
+ "a": "./artificial_folder/type1.json",
+ "b": "./type2.json",
+ "c": "#/$def/Type3"
+ },
+ "propertyName": "type_"
+ },
+ "oneOf": [
+ {
+ "$ref": "./artificial_folder/type1.json"
+ },
+ {
+ "$ref": "./type2.json"
+ },
+ {
+ "$ref": "#/$def/Type3"
+ }
+ ],
+ "title": "Inner"
+ }
+ },
+ "required": [
+ "inner"
+ ],
+ "title": "Response",
+ "type": "object"
+}
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/type2.json b/tests/data/jsonschema/discriminator_with_external_reference/type2.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/discriminator_with_external_reference/type2.json
@@ -0,0 +1,11 @@
+{
+ "properties": {
+ "type_": {
+ "const": "b",
+ "default": "b",
+ "title": "Type "
+ }
+ },
+ "title": "Type2",
+ "type": "object"
+}
\ No newline at end of file
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -6123,6 +6123,35 @@ def test_main_jsonschema_discriminator_literals():
)
+@freeze_time('2019-07-26')
+def test_main_jsonschema_external_discriminator():
+ with TemporaryDirectory() as output_dir:
+ output_file: Path = Path(output_dir) / 'output.py'
+ return_code: Exit = main(
+ [
+ '--input',
+ str(
+ JSON_SCHEMA_DATA_PATH
+ / 'discriminator_with_external_reference'
+ / 'schema.json'
+ ),
+ '--output',
+ str(output_file),
+ '--output-model-type',
+ 'pydantic_v2.BaseModel',
+ ]
+ )
+ assert return_code == Exit.OK
+ assert (
+ output_file.read_text()
+ == (
+ EXPECTED_MAIN_PATH
+ / 'discriminator_with_external_reference'
+ / 'output.py'
+ ).read_text()
+ )
+
+
@freeze_time('2019-07-26')
@pytest.mark.skipif(
black.__version__.split('.')[0] == '19',
| JSON Schema Discriminator With External Reference Support
**Is your feature request related to a problem? Please describe.**
While I love the project for most use cases, I'm always frustrated when trying to generate Pydantic models from JSON Schemas that utilize field discriminators with external references, as it results in errors and hinders my ability to work efficiently.
**Describe the solution you'd like**
I would like the `datamodel-codegen` tool to support parsing and generating Pydantic models for JSON Schemas that contain field discriminators referencing external schema files (e.g., `$ref`: "./type1.json" or likewise references in the `discriminator.mapping`). This feature would allow me to effectively utilize JSON Schema's external reference mechanism, enabling more flexible and reusable schema designs.
**Describe alternatives you've considered**
I've considered alternative solutions such as manually parsing and preprocessing the JSON Schema to construct a schema where all external references are resolved internally within a `$def` object. However, this approach would be time-consuming, error-prone for a widespread application, and may not provide the same level of performance and maintainability as a dedicated tool `datamodel-codegen`.
**Additional context**
Showcase of a breaking data model generation:
`datamodel-codegen --input schema.json`
`schema.json:
```json
{
"properties": {
"inner": {
"discriminator": {
"mapping": {
"a": "./type1.json",
"b": "./type2.json"
},
"propertyName": "type_"
},
"oneOf": [
{
"$ref": "./type1.json"
},
{
"$ref": "./type2.json"
}
],
"title": "Inner"
}
},
"required": [
"inner"
],
"title": "Response",
"type": "object"
}
```
`type1.json`:
```json
{
"properties": {
"type_": {
"const": "a",
"default": "a",
"title": "Type "
}
},
"title": "Type1",
"type": "object"
}
```
`type2.json`:
```json
{
"properties": {
"type_": {
"const": "b",
"default": "b",
"title": "Type "
}
},
"title": "Type2",
"type": "object"
}
```
| 2024-06-04T13:48:56 | -1.0 |
|
koxudaxi/datamodel-code-generator | 1,999 | koxudaxi__datamodel-code-generator-1999 | [
"1141"
] | b1c0f22ad8c192a3de34a0be4c7671d83cd52b6d | diff --git a/datamodel_code_generator/parser/base.py b/datamodel_code_generator/parser/base.py
--- a/datamodel_code_generator/parser/base.py
+++ b/datamodel_code_generator/parser/base.py
@@ -700,6 +700,7 @@ def __change_from_import(
from_, import_ = full_path = relative(
model.module_name, data_type.full_name
)
+ import_ = import_.replace('-', '_')
alias = scoped_model_resolver.add(full_path, import_).name
@@ -778,10 +779,18 @@ def __apply_discriminator_type(
discriminator_model.path.split('#/')[-1]
!= path.split('#/')[-1]
):
- if '#' in path or discriminator_model.path[
- :-1
- ] != path.lstrip('./'):
- continue
+ if (
+ path.startswith('#/')
+ or discriminator_model.path[:-1]
+ != path.split('/')[-1]
+ ):
+ t_path = path[str(path).find('/') + 1 :]
+ t_disc = discriminator_model.path[
+ : str(discriminator_model.path).find('#')
+ ].lstrip('../')
+ t_disc_2 = '/'.join(t_disc.split('/')[1:])
+ if t_path != t_disc and t_path != t_disc_2:
+ continue
type_names.append(name)
else:
type_names = [discriminator_model.path.split('/')[-1]]
@@ -1252,6 +1261,7 @@ class Processed(NamedTuple):
init = True
else:
module = (*module[:-1], f'{module[-1]}.py')
+ module = tuple(part.replace('-', '_') for part in module)
else:
module = ('__init__.py',)
| diff --git a/tests/data/expected/main/discriminator_with_external_reference/output.py b/tests/data/expected/main/discriminator_with_external_reference/output.py
--- a/tests/data/expected/main/discriminator_with_external_reference/output.py
+++ b/tests/data/expected/main/discriminator_with_external_reference/output.py
@@ -4,7 +4,7 @@
from __future__ import annotations
-from typing import Union
+from typing import Optional, Union
from pydantic import BaseModel, Field
from typing_extensions import Literal
@@ -16,6 +16,15 @@ class Type1(BaseModel):
class Type2(BaseModel):
type_: Literal['b'] = Field('b', title='Type ')
+ ref_type: Optional[Type1] = Field(None, description='A referenced type.')
+
+
+class Type4(BaseModel):
+ type_: Literal['d'] = Field('d', title='Type ')
+
+
+class Type5(BaseModel):
+ type_: Literal['e'] = Field('e', title='Type ')
class Type3(BaseModel):
@@ -23,4 +32,6 @@ class Type3(BaseModel):
class Response(BaseModel):
- inner: Union[Type1, Type2, Type3] = Field(..., discriminator='type_', title='Inner')
+ inner: Union[Type1, Type2, Type3, Type4, Type5] = Field(
+ ..., discriminator='type_', title='Inner'
+ )
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/__init__.py b/tests/data/expected/main/discriminator_with_external_references_folder/__init__.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/__init__.py
@@ -0,0 +1,3 @@
+# generated by datamodel-codegen:
+# filename: discriminator_with_external_reference
+# timestamp: 2019-07-26T00:00:00+00:00
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/__init__.py b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/__init__.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/__init__.py
@@ -0,0 +1,3 @@
+# generated by datamodel-codegen:
+# filename: discriminator_with_external_reference
+# timestamp: 2019-07-26T00:00:00+00:00
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/artificial_folder/__init__.py b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/artificial_folder/__init__.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/artificial_folder/__init__.py
@@ -0,0 +1,3 @@
+# generated by datamodel-codegen:
+# filename: discriminator_with_external_reference
+# timestamp: 2019-07-26T00:00:00+00:00
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/artificial_folder/type_1.py b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/artificial_folder/type_1.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/artificial_folder/type_1.py
@@ -0,0 +1,12 @@
+# generated by datamodel-codegen:
+# filename: inner_folder/artificial_folder/type-1.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from pydantic import BaseModel, Field
+from typing_extensions import Literal
+
+
+class Type1(BaseModel):
+ type_: Literal['a'] = Field(..., const=True, title='Type ')
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/schema.py b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/schema.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/schema.py
@@ -0,0 +1,25 @@
+# generated by datamodel-codegen:
+# filename: inner_folder/schema.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Union
+
+from pydantic import BaseModel, Field
+from typing_extensions import Literal
+
+from .. import type_4
+from ..subfolder import type_5
+from . import type_2
+from .artificial_folder import type_1
+
+
+class Type3(BaseModel):
+ type_: Literal['c'] = Field(..., const=True, title='Type ')
+
+
+class Response(BaseModel):
+ inner: Union[type_1.Type1, type_2.Type2, Type3, type_4.Type4, type_5.Type5] = Field(
+ ..., discriminator='type_', title='Inner'
+ )
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/type_2.py b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/type_2.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/inner_folder/type_2.py
@@ -0,0 +1,16 @@
+# generated by datamodel-codegen:
+# filename: inner_folder/type-2.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from typing import Optional
+
+from pydantic import BaseModel, Field
+
+from .artificial_folder import type_1
+
+
+class Type2(BaseModel):
+ type_: Literal['b'] = Field(..., const=True, title='Type ')
+ ref_type: Optional[type_1.Type1] = Field(None, description='A referenced type.')
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/subfolder/__init__.py b/tests/data/expected/main/discriminator_with_external_references_folder/subfolder/__init__.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/subfolder/__init__.py
@@ -0,0 +1,3 @@
+# generated by datamodel-codegen:
+# filename: discriminator_with_external_reference
+# timestamp: 2019-07-26T00:00:00+00:00
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/subfolder/type_5.py b/tests/data/expected/main/discriminator_with_external_references_folder/subfolder/type_5.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/subfolder/type_5.py
@@ -0,0 +1,11 @@
+# generated by datamodel-codegen:
+# filename: subfolder/type-5.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from pydantic import BaseModel, Field
+
+
+class Type5(BaseModel):
+ type_: Literal['e'] = Field(..., const=True, title='Type ')
diff --git a/tests/data/expected/main/discriminator_with_external_references_folder/type_4.py b/tests/data/expected/main/discriminator_with_external_references_folder/type_4.py
new file mode 100644
--- /dev/null
+++ b/tests/data/expected/main/discriminator_with_external_references_folder/type_4.py
@@ -0,0 +1,11 @@
+# generated by datamodel-codegen:
+# filename: type-4.json
+# timestamp: 2019-07-26T00:00:00+00:00
+
+from __future__ import annotations
+
+from pydantic import BaseModel, Field
+
+
+class Type4(BaseModel):
+ type_: Literal['d'] = Field(..., const=True, title='Type ')
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/artificial_folder/type1.json b/tests/data/jsonschema/discriminator_with_external_reference/inner_folder/artificial_folder/type-1.json
similarity index 100%
rename from tests/data/jsonschema/discriminator_with_external_reference/artificial_folder/type1.json
rename to tests/data/jsonschema/discriminator_with_external_reference/inner_folder/artificial_folder/type-1.json
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/schema.json b/tests/data/jsonschema/discriminator_with_external_reference/inner_folder/schema.json
similarity index 59%
rename from tests/data/jsonschema/discriminator_with_external_reference/schema.json
rename to tests/data/jsonschema/discriminator_with_external_reference/inner_folder/schema.json
--- a/tests/data/jsonschema/discriminator_with_external_reference/schema.json
+++ b/tests/data/jsonschema/discriminator_with_external_reference/inner_folder/schema.json
@@ -16,21 +16,29 @@
"inner": {
"discriminator": {
"mapping": {
- "a": "./artificial_folder/type1.json",
- "b": "./type2.json",
- "c": "#/$def/Type3"
+ "a": "./artificial_folder/type-1.json",
+ "b": "./type-2.json",
+ "c": "#/$def/Type3",
+ "d": "../type-4.json",
+ "e": "../subfolder/type-5.json"
},
"propertyName": "type_"
},
"oneOf": [
{
- "$ref": "./artificial_folder/type1.json"
+ "$ref": "./artificial_folder/type-1.json"
},
{
- "$ref": "./type2.json"
+ "$ref": "./type-2.json"
},
{
"$ref": "#/$def/Type3"
+ },
+ {
+ "$ref": "../type-4.json"
+ },
+ {
+ "$ref": "../subfolder/type-5.json"
}
],
"title": "Inner"
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/inner_folder/type-2.json b/tests/data/jsonschema/discriminator_with_external_reference/inner_folder/type-2.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/discriminator_with_external_reference/inner_folder/type-2.json
@@ -0,0 +1,15 @@
+{
+ "properties": {
+ "type_": {
+ "const": "b",
+ "default": "b",
+ "title": "Type "
+ },
+ "ref_type": {
+ "$ref": "./artificial_folder/type-1.json",
+ "description": "A referenced type."
+ }
+ },
+ "title": "Type2",
+ "type": "object"
+}
\ No newline at end of file
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/type2.json b/tests/data/jsonschema/discriminator_with_external_reference/subfolder/type-5.json
similarity index 58%
rename from tests/data/jsonschema/discriminator_with_external_reference/type2.json
rename to tests/data/jsonschema/discriminator_with_external_reference/subfolder/type-5.json
--- a/tests/data/jsonschema/discriminator_with_external_reference/type2.json
+++ b/tests/data/jsonschema/discriminator_with_external_reference/subfolder/type-5.json
@@ -1,11 +1,11 @@
{
"properties": {
"type_": {
- "const": "b",
- "default": "b",
+ "const": "e",
+ "default": "e",
"title": "Type "
}
},
- "title": "Type2",
+ "title": "Type5",
"type": "object"
}
\ No newline at end of file
diff --git a/tests/data/jsonschema/discriminator_with_external_reference/type-4.json b/tests/data/jsonschema/discriminator_with_external_reference/type-4.json
new file mode 100644
--- /dev/null
+++ b/tests/data/jsonschema/discriminator_with_external_reference/type-4.json
@@ -0,0 +1,11 @@
+{
+ "properties": {
+ "type_": {
+ "const": "d",
+ "default": "d",
+ "title": "Type "
+ }
+ },
+ "title": "Type4",
+ "type": "object"
+}
\ No newline at end of file
diff --git a/tests/test_main.py b/tests/test_main.py
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -6133,6 +6133,7 @@ def test_main_jsonschema_external_discriminator():
str(
JSON_SCHEMA_DATA_PATH
/ 'discriminator_with_external_reference'
+ / 'inner_folder'
/ 'schema.json'
),
'--output',
@@ -6152,6 +6153,29 @@ def test_main_jsonschema_external_discriminator():
)
+@freeze_time('2019-07-26')
+def test_main_jsonschema_external_discriminator_folder():
+ with TemporaryDirectory() as output_dir:
+ output_path: Path = Path(output_dir)
+ return_code: Exit = main(
+ [
+ '--input',
+ str(JSON_SCHEMA_DATA_PATH / 'discriminator_with_external_reference'),
+ '--output',
+ str(output_path),
+ ]
+ )
+ assert return_code == Exit.OK
+ main_modular_dir = (
+ EXPECTED_MAIN_PATH / 'discriminator_with_external_references_folder'
+ )
+ for path in main_modular_dir.rglob('*.py'):
+ result = output_path.joinpath(
+ path.relative_to(main_modular_dir)
+ ).read_text()
+ assert result == path.read_text()
+
+
@freeze_time('2019-07-26')
@pytest.mark.skipif(
black.__version__.split('.')[0] == '19',
| Generated python file names contain invalid caharacters
**Describe the bug**
When generating data models from schemas in a directory (owned by another team in my case), the generated file names contain invalid characters, which doesn't allow python to import the module.
**To Reproduce**
example: INPUT
```
schemas:
- order-request.schema.json
- cancel-request.schema.json
```
example: OUTPUT
```
gen:
- order-request.schema.py
- cancel-request.schema.py
````
Used commandline:
```
$ datamodel-codegen --input "../schemas" --input-file-type jsonschema --output "./gen"
```
**Expected behavior**
The file names to be valid python module names, ex: periods and dash characters to be replaced by underscores.
**Version:**
- OS: MacOs
- Python version: 3.8
- datamodel-code-generator version: 0.17.1
**Additional context**
Workaround to import the module: https://stackoverflow.com/questions/8350853/how-to-import-module-when-module-name-has-a-dash-or-hyphen-in-it
| @pulasthibandara
Thank you for creating the issue.
I will fix it.
Apologies, closed the wrong issue. | 2024-06-10T20:55:56 | -1.0 |
mindsdb/lightwood | 1,206 | mindsdb__lightwood-1206 | [
"988"
] | 2ca281bab206153eae86c67eb8798da6c4545de4 | diff --git a/lightwood/api/predictor.py b/lightwood/api/predictor.py
--- a/lightwood/api/predictor.py
+++ b/lightwood/api/predictor.py
@@ -28,6 +28,7 @@ class PredictorInterface:
You can also use the predictor to now estimate new data:
- ``predict``: Deploys the chosen best model, and evaluates the given data to provide target estimates.
+ - ``test``: Similar to predict, but user also passes an accuracy function that will be used to compute a metric with the generated predictions.
- ``save``: Saves the Predictor object for further use.
The ``PredictorInterface`` is created via J{ai}son's custom code creation. A problem inherits from this class with pre-populated routines to fill out expected results, given the nature of each problem type.
@@ -127,12 +128,27 @@ def adjust(self, new_data: pd.DataFrame, old_data: Optional[pd.DataFrame] = None
def predict(self, data: pd.DataFrame, args: Dict[str, object] = {}) -> pd.DataFrame:
"""
- Intakes raw data to provide predicted values for your trained model.
+ Intakes raw data to provide model predictions.
+
+ :param data: Data (n_samples, n_columns) that the model will use as input to predict the corresponding target value for each sample.
+ :param args: any parameters used to customize inference behavior. Wrapped as a ``PredictionArguments`` object.
+
+ :returns: A dataframe containing predictions and additional sample-wise information. `n_samples` rows.
+ """ # noqa
+ pass
+
+ def test(
+ self, data: pd.DataFrame, metrics: list, args: Dict[str, object] = {}, strict: bool = False
+ ) -> pd.DataFrame:
+ """
+ Intakes raw data to compute values for a list of provided metrics using a Lightwood predictor.
:param data: Data (n_samples, n_columns) that the model(s) will evaluate on and provide the target prediction.
+ :param metrics: A list of metrics to evaluate the model's performance on.
:param args: parameters needed to update the predictor ``PredictionArguments`` object, which holds any parameters relevant for prediction.
+ :param strict: If True, the function will raise an error if the model does not support any of the requested metrics. Otherwise it skips them.
- :returns: A dataframe of predictions of the same length of input.
+ :returns: A dataframe with `n_metrics` columns, each cell containing the respective score of each metric.
""" # noqa
pass
diff --git a/lightwood/helpers/codegen.py b/lightwood/helpers/codegen.py
--- a/lightwood/helpers/codegen.py
+++ b/lightwood/helpers/codegen.py
@@ -505,6 +505,50 @@ def _timed_call(encoded_ds):
predict_body = align(predict_body, 2)
+ # ----------------- #
+ # Test Body
+ # ----------------- #
+ test_body = """
+preds = self.predict(data, args)
+preds = preds.rename(columns={'prediction': self.target})
+filtered = []
+
+# filter metrics if not supported
+for metric in metrics:
+ # metric should be one of: an actual function, registered in the model class, or supported by the evaluator
+ if not (callable(metric) or metric in self.accuracy_functions or metric in mdb_eval_accuracy_metrics):
+ if strict:
+ raise Exception(f'Invalid metric: {metric}')
+ else:
+ log.warning(f'Invalid metric: {metric}. Skipping...')
+ else:
+ filtered.append(metric)
+
+metrics = filtered
+try:
+ labels = self.model_analysis.histograms[self.target]['x']
+except:
+ if strict:
+ raise Exception('Label histogram not found')
+ else:
+ label_map = None # some accuracy functions will crash without this, be mindful
+scores = evaluate_accuracies(
+ data,
+ preds[self.target],
+ self.target,
+ metrics,
+ ts_analysis=self.ts_analysis,
+ labels=labels
+ )
+
+# TODO: remove once mdb_eval returns an actual list
+scores = {k: [v] for k, v in scores.items() if not isinstance(v, list)}
+
+return pd.DataFrame.from_records(scores) # TODO: add logic to disaggregate per-mixer
+"""
+
+ test_body = align(test_body, 2)
+
predictor_code = f"""
{IMPORTS}
{IMPORT_EXTERNAL_DIRS}
@@ -597,6 +641,11 @@ def adjust(self, train_data: Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame],
@timed_predictor
def predict(self, data: pd.DataFrame, args: Dict = {{}}) -> pd.DataFrame:
{predict_body}
+
+ def test(
+ self, data: pd.DataFrame, metrics: list, args: Dict[str, object] = {{}}, strict: bool = False
+ ) -> pd.DataFrame:
+{test_body}
"""
try:
diff --git a/lightwood/helpers/constants.py b/lightwood/helpers/constants.py
--- a/lightwood/helpers/constants.py
+++ b/lightwood/helpers/constants.py
@@ -45,6 +45,9 @@
from dataprep_ml.splitters import splitter
from dataprep_ml.imputers import *
+from mindsdb_evaluator import evaluate_accuracies
+from mindsdb_evaluator.accuracy import __all__ as mdb_eval_accuracy_metrics
+
import pandas as pd
from typing import Dict, List, Union, Optional
import os
| diff --git a/tests/integration/basic/test_categorical.py b/tests/integration/basic/test_categorical.py
--- a/tests/integration/basic/test_categorical.py
+++ b/tests/integration/basic/test_categorical.py
@@ -69,3 +69,11 @@ def test_2_binary_no_analysis(self):
self.assertTrue(balanced_accuracy_score(test['target'], predictions['prediction']) > 0.5)
self.assertTrue('confidence' not in predictions.columns)
+ metrics = ['balanced_accuracy_score', 'accuracy_score', 'precision_score']
+ results = predictor.test(test, metrics)
+
+ for metric in metrics:
+ assert metric in results.columns
+ assert 0.5 < results[metric].iloc[0] < 1.0
+
+ print(results)
| `Test` method for PredictorInterface
We should add a new method to the predictor interface for arbitrary metric computation given a dataframe with values for the target column.
```python
pdef = ProblemDefinition.from_dict({...})
json_ai = json_ai_from_problem(train_df, problem_definition=pdef)
predictor_class_code = code_from_json_ai(json_ai)
predictor = predictor_from_code(predictor_class_code)
predictor.learn(train_df)
# existing behavior
predictions = predictor.predict(test_df)
# new behavior
# 1) string for already supported metrics and user-defined modules
metrics = predictor.test(test_df, metrics=['mean_absolute_error', 'my_custom_module.method_name'])
# 2) support for callables and lambdas
from sklearn.metrics import mean_absolute_error
metrics = predictor.test(test_df, metrics=[mean_absolute_error, lambda x, y: abs(x - y)])
```
Returned object should be a Pandas dataframe with the value of each passed metric:
```python
In[1]: metrics
Out[1]:
metric_name value
mean_absolute_error mean_absolute_error 32433
lambda_0 lambda_0 45576
```
It may also be worth defining an extra kwarg so that the user can retrieve predictions (as if they were using `predict()`):
```python
metrics, predictions = predictor.test(test_df, metrics=['mean_absolute_error'], return_preds=True)
```
| 2024-02-29T11:03:54 | -1.0 |
|
pretix/pretix | 3,847 | pretix__pretix-3847 | [
"65"
] | a5d4434a64f485fdcd504c98917ccd4bcc2d1619 | diff --git a/src/pretix/base/migrations/0257_item_default_price_not_null.py b/src/pretix/base/migrations/0257_item_default_price_not_null.py
new file mode 100644
--- /dev/null
+++ b/src/pretix/base/migrations/0257_item_default_price_not_null.py
@@ -0,0 +1,19 @@
+# Generated by Django 4.2.9 on 2024-01-30 11:11
+
+from django.db import migrations, models
+
+
+class Migration(migrations.Migration):
+
+ dependencies = [
+ ("pretixbase", "0256_itemvariation_unavail_modes"),
+ ]
+
+ operations = [
+ migrations.AlterField(
+ model_name="item",
+ name="default_price",
+ field=models.DecimalField(decimal_places=2, default=0, max_digits=13),
+ preserve_default=False,
+ ),
+ ]
diff --git a/src/pretix/base/models/items.py b/src/pretix/base/models/items.py
--- a/src/pretix/base/models/items.py
+++ b/src/pretix/base/models/items.py
@@ -430,7 +430,7 @@ class Item(LoggedModel):
help_text=_("If this product has multiple variations, you can set different prices for each of the "
"variations. If a variation does not have a special price or if you do not have variations, "
"this price will be used."),
- max_digits=13, decimal_places=2, null=True
+ max_digits=13, decimal_places=2,
)
free_price = models.BooleanField(
default=False,
| diff --git a/src/tests/api/test_cart.py b/src/tests/api/test_cart.py
--- a/src/tests/api/test_cart.py
+++ b/src/tests/api/test_cart.py
@@ -934,7 +934,7 @@ def test_cartpos_create_with_voucher_unknown(token_client, organizer, event, ite
@pytest.mark.django_db
def test_cartpos_create_with_voucher_invalid_item(token_client, organizer, event, item, quota):
with scopes_disabled():
- item2 = event.items.create(name="item2")
+ item2 = event.items.create(name="item2", default_price=0)
voucher = event.vouchers.create(code="FOOBAR", item=item2)
res = copy.deepcopy(CARTPOS_CREATE_PAYLOAD)
res['item'] = item.pk
| A item default price of None does not make sense
| Setting the field to null=False currently causes problems when creating items
| 2024-01-30T11:14:57 | -1.0 |
pretix/pretix | 3,992 | pretix__pretix-3992 | [
"3984"
] | 4fd7d406a0646cb4d5bc3d18766150cc8711e517 | diff --git a/src/pretix/api/serializers/organizer.py b/src/pretix/api/serializers/organizer.py
--- a/src/pretix/api/serializers/organizer.py
+++ b/src/pretix/api/serializers/organizer.py
@@ -79,8 +79,8 @@ class CustomerSerializer(I18nAwareModelSerializer):
class Meta:
model = Customer
- fields = ('identifier', 'external_identifier', 'email', 'name', 'name_parts', 'is_active', 'is_verified', 'last_login', 'date_joined',
- 'locale', 'last_modified', 'notes')
+ fields = ('identifier', 'external_identifier', 'email', 'phone', 'name', 'name_parts', 'is_active',
+ 'is_verified', 'last_login', 'date_joined', 'locale', 'last_modified', 'notes')
def update(self, instance, validated_data):
if instance and instance.provider_id:
| diff --git a/src/tests/api/test_customers.py b/src/tests/api/test_customers.py
--- a/src/tests/api/test_customers.py
+++ b/src/tests/api/test_customers.py
@@ -29,6 +29,7 @@ def customer(organizer, event):
return organizer.customers.create(
identifier="8WSAJCJ",
email="foo@example.org",
+ phone="+493012345678",
name_parts={"_legacy": "Foo"},
name_cached="Foo",
is_verified=False,
@@ -39,6 +40,7 @@ def customer(organizer, event):
"identifier": "8WSAJCJ",
"external_identifier": None,
"email": "foo@example.org",
+ "phone": "+493012345678",
"name": "Foo",
"name_parts": {
"_legacy": "Foo",
diff --git a/src/tests/api/test_reusable_media.py b/src/tests/api/test_reusable_media.py
--- a/src/tests/api/test_reusable_media.py
+++ b/src/tests/api/test_reusable_media.py
@@ -155,6 +155,7 @@ def test_medium_detail(token_client, organizer, event, medium, giftcard, custome
"identifier": customer.identifier,
"external_identifier": None,
"email": "foo@example.org",
+ "phone": None,
"name": "Foo",
"name_parts": {"_legacy": "Foo"},
"is_active": True,
| Customer's phoner number is not available in API
### Problem and impact
Unable to pull customer's phone number from API
### Expected behaviour
`GET /api/v1/organizers/(organizer)/customers/` returns phone
### Steps to reproduce
1. Get customer with phone in system
2. Validate that phone is visible in UI
3. Make API call `GET /api/v1/organizers/(organizer)/customers/` and check that all fields except phone are exposed
### Screenshots
_No response_
### Link
_No response_
### Browser (software, desktop or mobile?) and version
_No response_
### Operating system, dependency versions
_No response_
### Version
_No response_
| 2024-03-15T15:53:06 | -1.0 |
|
secondmind-labs/trieste | 441 | secondmind-labs__trieste-441 | [
"439"
] | 312c049cdb8901a5938dcbbb70ad927a3d75b8f7 | diff --git a/docs/notebooks/active_learning.pct.py b/docs/notebooks/active_learning.pct.py
--- a/docs/notebooks/active_learning.pct.py
+++ b/docs/notebooks/active_learning.pct.py
@@ -19,11 +19,11 @@
# %%
-from trieste.objectives import scaled_branin
+from trieste.objectives import BRANIN_SEARCH_SPACE, scaled_branin
from util.plotting_plotly import plot_function_plotly
from trieste.space import Box
-search_space = Box([0, 0], [1, 1])
+search_space = BRANIN_SEARCH_SPACE
fig = plot_function_plotly(
scaled_branin, search_space.lower, search_space.upper, grid_density=20
@@ -32,7 +32,7 @@
fig.show()
# %% [markdown]
-# We begin our Bayesian active learning from a two-point initial design built from a space-filling Halton sequence.
+# We begin our Bayesian active learning from a small initial design built from a space-filling Halton sequence.
# %%
import trieste
@@ -47,16 +47,24 @@
# %% [markdown]
# ## Surrogate model
#
-# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper.
+# Just like in sequential optimization, we fit a surrogate Gaussian process model as implemented in GPflow to the initial data. The GPflow models cannot be used directly in our Bayesian optimization routines, so we build a GPflow's `GPR` model and pass it to the `GaussianProcessRegression` wrapper. As a good practice, we use priors for the kernel hyperparameters.
# %%
import gpflow
from trieste.models.gpflow.models import GaussianProcessRegression
+import tensorflow_probability as tfp
def build_model(data):
variance = tf.math.reduce_variance(data.observations)
- kernel = gpflow.kernels.RBF(variance=variance, lengthscales=[2, 2])
+ kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2, 0.2])
+ prior_scale = tf.cast(1.0, dtype=tf.float64)
+ kernel.variance.prior = tfp.distributions.LogNormal(
+ tf.cast(-2.0, dtype=tf.float64), prior_scale
+ )
+ kernel.lengthscales.prior = tfp.distributions.LogNormal(
+ tf.math.log(kernel.lengthscales), prior_scale
+ )
gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
gpflow.set_trainable(gpr.likelihood, False)
@@ -79,9 +87,7 @@ def build_model(data):
from trieste.acquisition.rule import EfficientGlobalOptimization
acq = PredictiveVariance()
-rule = EfficientGlobalOptimization(
- builder=acq, optimizer=generate_continuous_optimizer()
-)
+rule = EfficientGlobalOptimization(builder=acq) # type: ignore
bo = trieste.bayesian_optimizer.BayesianOptimizer(observer, search_space)
# %% [markdown]
@@ -144,12 +150,14 @@ def pred_var(x):
# %% [markdown]
# ## Batch active learning using predictive variance
#
-# For some cases, query several points at a time can be convenient by doing batch active learning. For this case, we must pass a num_query_points input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is, it tends to query in high variance area less accurately, compared to the sequentially drawing one point at a time.
+# In cases when we can evaluate the black-box function in parallel, it would be useful to produce a batch of points rather than a single point. `PredictiveVariance` acquisition function can also perform batch active learning. We must pass a `num_query_points` input to our `EfficientGlobalOptimization` rule. The drawback of the batch predictive variance is that it tends to query in high variance area less accurately, compared to sequentially drawing one point at a time.
# %%
bo_iter = 5
num_query = 3
+
model = build_model(initial_data)
+
acq = PredictiveVariance()
rule = EfficientGlobalOptimization(
num_query_points=num_query,
| diff --git a/tests/integration/test_active_learning.py b/tests/integration/test_active_learning.py
new file mode 100644
--- /dev/null
+++ b/tests/integration/test_active_learning.py
@@ -0,0 +1,251 @@
+# Copyright 2021 The Trieste Contributors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""
+Integration tests for various forms of active learning implemented in Trieste.
+"""
+
+from __future__ import annotations
+
+import gpflow
+import pytest
+import tensorflow as tf
+import tensorflow_probability as tfp
+
+from tests.util.misc import random_seed
+from trieste.acquisition.function import ExpectedFeasibility, PredictiveVariance
+from trieste.acquisition.rule import AcquisitionRule, EfficientGlobalOptimization
+from trieste.bayesian_optimizer import BayesianOptimizer
+from trieste.data import Dataset
+from trieste.models import TrainableProbabilisticModel
+from trieste.models.gpflow import GaussianProcessRegression
+from trieste.objectives import BRANIN_SEARCH_SPACE, branin, scaled_branin
+from trieste.objectives.utils import mk_observer
+from trieste.observer import Observer
+from trieste.space import Box, SearchSpace
+from trieste.types import TensorType
+
+
+@random_seed
+@pytest.mark.parametrize(
+ "num_steps, acquisition_rule",
+ [
+ (50, EfficientGlobalOptimization(PredictiveVariance())),
+ ],
+)
+def test_optimizer_learns_scaled_branin_function(
+ num_steps: int, acquisition_rule: AcquisitionRule[TensorType, SearchSpace]
+) -> None:
+ """
+ Ensure that the objective function is effectively learned, such that the final model
+ fits well and predictions are close to actual objective values.
+ """
+
+ search_space = BRANIN_SEARCH_SPACE
+
+ def build_model(data: Dataset) -> TrainableProbabilisticModel:
+ variance = tf.math.reduce_variance(data.observations)
+ kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2, 0.2])
+ prior_scale = tf.cast(1.0, dtype=tf.float64)
+ kernel.variance.prior = tfp.distributions.LogNormal(
+ tf.cast(-2.0, dtype=tf.float64), prior_scale
+ )
+ kernel.lengthscales.prior = tfp.distributions.LogNormal(
+ tf.math.log(kernel.lengthscales), prior_scale
+ )
+ gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
+ gpflow.set_trainable(gpr.likelihood, False)
+
+ return GaussianProcessRegression(gpr)
+
+ num_initial_points = 6
+ initial_query_points = search_space.sample_halton(num_initial_points)
+ observer = mk_observer(scaled_branin)
+ initial_data = observer(initial_query_points)
+
+ # we set a performance criterion at 1% of the range
+ # max absolute error needs to be bettter than this criterion
+ test_query_points = search_space.sample_sobol(10000 * search_space.dimension)
+ test_data = observer(test_query_points)
+ test_range = tf.reduce_max(test_data.observations) - tf.reduce_min(test_data.observations)
+ criterion = 0.01 * test_range
+
+ # we expect a model with initial data to fail the criterion
+ initial_model = build_model(initial_data)
+ initial_model.optimize(initial_data)
+ initial_predicted_means, _ = initial_model.model.predict_f(test_query_points) # type: ignore
+ initial_accuracy = tf.reduce_max(tf.abs(initial_predicted_means - test_data.observations))
+
+ assert not initial_accuracy < criterion
+
+ # after active learning the model should be much more accurate
+ model = build_model(initial_data)
+ final_model = (
+ BayesianOptimizer(observer, search_space)
+ .optimize(num_steps, initial_data, model, acquisition_rule)
+ .try_get_final_model()
+ )
+ final_predicted_means, _ = final_model.model.predict_f(test_query_points) # type: ignore
+ final_accuracy = tf.reduce_max(tf.abs(final_predicted_means - test_data.observations))
+
+ assert initial_accuracy > final_accuracy
+ assert final_accuracy < criterion
+
+
+@random_seed
+@pytest.mark.slow
+@pytest.mark.parametrize(
+ "num_steps, acquisition_rule, threshold",
+ [
+ (50, EfficientGlobalOptimization(ExpectedFeasibility(80, delta=1)), 80),
+ (50, EfficientGlobalOptimization(ExpectedFeasibility(80, delta=2)), 80),
+ (70, EfficientGlobalOptimization(ExpectedFeasibility(20, delta=1)), 20),
+ ],
+)
+def test_optimizer_learns_feasibility_set_of_thresholded_branin_function(
+ num_steps: int, acquisition_rule: AcquisitionRule[TensorType, SearchSpace], threshold: int
+) -> None:
+ """
+ Ensure that the feasible set is sufficiently well learned, such that the final model
+ classifies with great degree of certainty whether points in the search space are in
+ in the feasible set or not.
+ """
+
+ search_space = BRANIN_SEARCH_SPACE
+
+ def build_model(data: Dataset) -> TrainableProbabilisticModel:
+ variance = tf.math.reduce_variance(data.observations)
+ kernel = gpflow.kernels.Matern52(variance=variance, lengthscales=[0.2, 0.2])
+ prior_scale = tf.cast(1.0, dtype=tf.float64)
+ kernel.variance.prior = tfp.distributions.LogNormal(
+ tf.cast(-2.0, dtype=tf.float64), prior_scale
+ )
+ kernel.lengthscales.prior = tfp.distributions.LogNormal(
+ tf.math.log(kernel.lengthscales), prior_scale
+ )
+ gpr = gpflow.models.GPR(data.astuple(), kernel, noise_variance=1e-5)
+ gpflow.set_trainable(gpr.likelihood, False)
+
+ return GaussianProcessRegression(gpr)
+
+ num_initial_points = 6
+ initial_query_points = search_space.sample_halton(num_initial_points)
+ observer = mk_observer(branin)
+ initial_data = observer(initial_query_points)
+
+ # we set a performance criterion at 0.001 probability of required precision per point
+ # for global points and 0.01 close to the boundary
+ n_global = 10000 * search_space.dimension
+ n_boundary = 2000 * search_space.dimension
+ global_test, boundary_test = _get_feasible_set_test_data(
+ search_space, observer, n_global, n_boundary, threshold
+ )
+ global_criterion = 0.001 * (1 - 0.001) * tf.cast(n_global, tf.float64)
+ boundary_criterion = 0.01 * (1 - 0.01) * tf.cast(n_boundary, tf.float64)
+
+ # we expect a model with initial data to fail the criteria
+ initial_model = build_model(initial_data)
+ initial_model.optimize(initial_data)
+ initial_accuracy_global = _get_excursion_accuracy(global_test, initial_model, threshold)
+ initial_accuracy_boundary = _get_excursion_accuracy(boundary_test, initial_model, threshold)
+
+ assert not initial_accuracy_global < global_criterion
+ assert not initial_accuracy_boundary < boundary_criterion
+
+ # after active learning the model should be much more accurate
+ model = build_model(initial_data)
+ final_model = (
+ BayesianOptimizer(observer, search_space)
+ .optimize(num_steps, initial_data, model, acquisition_rule)
+ .try_get_final_model()
+ )
+ final_accuracy_global = _get_excursion_accuracy(global_test, final_model, threshold)
+ final_accuracy_boundary = _get_excursion_accuracy(boundary_test, final_model, threshold)
+
+ assert initial_accuracy_global > final_accuracy_global
+ assert initial_accuracy_boundary > final_accuracy_boundary
+ assert final_accuracy_global < global_criterion
+ assert final_accuracy_boundary < boundary_criterion
+
+
+def _excursion_probability(
+ x: TensorType, model: TrainableProbabilisticModel, threshold: int
+) -> tfp.distributions.Distribution:
+ mean, variance = model.model.predict_f(x) # type: ignore
+ normal = tfp.distributions.Normal(tf.cast(0, x.dtype), tf.cast(1, x.dtype))
+ t = (mean - threshold) / tf.sqrt(variance)
+ return normal.cdf(t)
+
+
+def _get_excursion_accuracy(
+ x: TensorType, model: TrainableProbabilisticModel, threshold: int
+) -> float:
+ prob = _excursion_probability(x, model, threshold)
+ accuracy = tf.reduce_sum(prob * (1 - prob))
+
+ return accuracy
+
+
+def _get_feasible_set_test_data(
+ search_space: Box,
+ observer: Observer,
+ n_global: int,
+ n_boundary: int,
+ threshold: float,
+ range_pct: float = 0.01,
+) -> tuple[TensorType, TensorType]:
+
+ boundary_done = False
+ global_done = False
+ boundary_points = tf.constant(0, dtype=tf.float64, shape=(0, search_space.dimension))
+ global_points = tf.constant(0, dtype=tf.float64, shape=(0, search_space.dimension))
+
+ while not boundary_done and not global_done:
+ test_query_points = search_space.sample(100000)
+ test_data = observer(test_query_points)
+ threshold_deviation = range_pct * (
+ tf.reduce_max(test_data.observations) # type: ignore
+ - tf.reduce_min(test_data.observations) # type: ignore
+ )
+
+ mask = tf.reduce_all(
+ tf.concat(
+ [
+ test_data.observations > threshold - threshold_deviation, # type: ignore
+ test_data.observations < threshold + threshold_deviation, # type: ignore
+ ],
+ axis=1,
+ ),
+ axis=1,
+ )
+ boundary_points = tf.concat(
+ [boundary_points, tf.boolean_mask(test_query_points, mask)], axis=0
+ )
+ global_points = tf.concat(
+ [global_points, tf.boolean_mask(test_query_points, tf.logical_not(mask))], axis=0
+ )
+
+ if boundary_points.shape[0] > n_boundary:
+ boundary_done = True
+ if global_points.shape[0] > n_global:
+ global_done = True
+
+ return (
+ global_points[
+ :n_global,
+ ],
+ boundary_points[
+ :n_boundary,
+ ],
+ )
| Active learning acquisition functions are missing integration tests
| 2021-11-29T16:59:30 | -1.0 |
|
alibaba/FederatedScope | 547 | alibaba__FederatedScope-547 | [
"502"
] | c6a7de482047a4c84224fbc2744aed30811f4213 | diff --git a/federatedscope/core/parallel/parallel_runner.py b/federatedscope/core/parallel/parallel_runner.py
--- a/federatedscope/core/parallel/parallel_runner.py
+++ b/federatedscope/core/parallel/parallel_runner.py
@@ -195,6 +195,7 @@ def __init__(self, rank, config, server_class, receive_channel,
self.server_id = 0
self.resource_info = resource_info
self.client_resource_info = client_resource_info
+ self.serial_num_for_msg = 0
def setup(self):
self.config.defrost()
@@ -248,6 +249,8 @@ def run(self):
# For the server, move the received message to a
# cache for reordering the messages according to
# the timestamps
+ msg.serial_num = self.serial_num_for_msg
+ self.serial_num_for_msg += 1
heapq.heappush(server_msg_cache, msg)
elif len(server_msg_cache) > 0:
msg = heapq.heappop(server_msg_cache)
diff --git a/federatedscope/core/workers/server.py b/federatedscope/core/workers/server.py
--- a/federatedscope/core/workers/server.py
+++ b/federatedscope/core/workers/server.py
@@ -132,10 +132,10 @@ def __init__(self,
if self._cfg.federate.make_global_eval:
# set up a trainer for conducting evaluation in server
- assert self.model is not None
+ assert self.models is not None
assert self.data is not None
self.trainer = get_trainer(
- model=self.model,
+ model=self.models[0],
data=self.data,
device=self.device,
config=self._cfg,
@@ -456,7 +456,7 @@ def _perform_federated_aggregation(self):
staleness.append((client_id, self.state - state))
# Trigger the monitor here (for training)
- self._monitor.calc_model_metric(self.model.state_dict(),
+ self._monitor.calc_model_metric(self.models[0].state_dict(),
msg_list,
rnd=self.state)
@@ -664,7 +664,7 @@ def broadcast_model_para(self,
model_para = [{} if skip_broadcast else model.state_dict()
for model in self.models]
else:
- model_para = {} if skip_broadcast else self.model.state_dict()
+ model_para = {} if skip_broadcast else self.models[0].state_dict()
# We define the evaluation happens at the end of an epoch
rnd = self.state - 1 if msg_type == 'evaluate' else self.state
@@ -781,7 +781,7 @@ def trigger_for_start(self):
else:
if self._cfg.backend == 'torch':
model_size = sys.getsizeof(pickle.dumps(
- self.model)) / 1024.0 * 8.
+ self.models[0])) / 1024.0 * 8.
else:
# TODO: calculate model size for TF Model
model_size = 1.0
@@ -851,7 +851,7 @@ def terminate(self, msg_type='finish'):
if self.model_num > 1:
model_para = [model.state_dict() for model in self.models]
else:
- model_para = self.model.state_dict()
+ model_para = self.models[0].state_dict()
self._monitor.finish_fl()
diff --git a/federatedscope/tabular/dataloader/toy.py b/federatedscope/tabular/dataloader/toy.py
--- a/federatedscope/tabular/dataloader/toy.py
+++ b/federatedscope/tabular/dataloader/toy.py
@@ -1,3 +1,4 @@
+import copy
import pickle
import numpy as np
@@ -58,7 +59,7 @@ def _generate_data(client_num=5,
test_y = np.expand_dims(test_y, -1)
test_data = {'x': test_x, 'y': test_y}
for each_client in range(1, client_num + 1):
- data[each_client]['test'] = test_data
+ data[each_client]['test'] = copy.deepcopy(test_data)
# val data
val_x = np.random.normal(loc=0.0,
@@ -68,7 +69,7 @@ def _generate_data(client_num=5,
val_y = np.expand_dims(val_y, -1)
val_data = {'x': val_x, 'y': val_y}
for each_client in range(1, client_num + 1):
- data[each_client]['val'] = val_data
+ data[each_client]['val'] = copy.deepcopy(val_data)
# server_data
data[0] = dict()
| diff --git a/.github/workflows/test_atc.yml b/.github/workflows/test_atc.yml
--- a/.github/workflows/test_atc.yml
+++ b/.github/workflows/test_atc.yml
@@ -7,7 +7,7 @@ on:
jobs:
run:
- if: false == contains(github.event.pull_request.title, 'WIP')
+ if: (false == contains(github.event.pull_request.title, 'WIP') && github.repository == 'alibaba/FederatedScope')
runs-on: ${{ matrix.os }}
timeout-minutes: 30
strategy:
| Inappropriate way of assigning values to Client data in toy and vfl synthetic dataset
When applying feature transformation in `ClientData`, we must use `deepcopy`. Otherwise, the data in server and client will be inconsistent (in server, x = f(x), but in client, x=f(f(x))).
And I've fixed this in https://github.com/alibaba/FederatedScope/pull/486/commits/ae896cd5740dc0b34f03bc637a724edf3f3be77e, please help me to double-check if there are the same issues on other datasets, thanks! @xieyxclack
| There is a minor bug in `vfl_synthetic_data`, where the `config.vertical.dims` is inconsistent with those in `xgb` module.
https://github.com/alibaba/FederatedScope/blob/f4872600f01ea9b4dba9fb4c95f7145f4dc419eb/federatedscope/vertical_fl/dataloader/dataloader.py#L50
I've fix it in [`74bceeb`](https://github.com/alibaba/FederatedScope/pull/486/commits/74bceeb62433cd8591022ea0945a4a7434ad9f44):
```python
total_dims = config.vertical.dims[-1]
```
Thank you very much for pointing out and fixing this issue! | 2023-03-17T09:08:47 | -1.0 |
google/etils | 159 | google__etils-159 | [
"143"
] | a72cedc0b8b53d48301f3f1ee70831b43ac090a0 | diff --git a/etils/edc/dataclass_utils.py b/etils/edc/dataclass_utils.py
--- a/etils/edc/dataclass_utils.py
+++ b/etils/edc/dataclass_utils.py
@@ -18,6 +18,7 @@
import dataclasses
import functools
+import inspect
import reprlib
import typing
from typing import Any, Callable, TypeVar
@@ -196,7 +197,8 @@ def has_default_repr(cls: _Cls) -> bool:
# Use `cls.__dict__` and not `hasattr` to ignore parent classes
'__repr__' not in cls.__dict__
# `__repr__` exists but is the default dataclass implementation
- or cls.__repr__.__qualname__ == '__create_fn__.<locals>.__repr__'
+ or inspect.unwrap(cls.__repr__).__qualname__
+ == '__create_fn__.<locals>.__repr__'
)
diff --git a/etils/epath/gpath.py b/etils/epath/gpath.py
--- a/etils/epath/gpath.py
+++ b/etils/epath/gpath.py
@@ -133,6 +133,7 @@ def expanduser(self: _P) -> _P:
def resolve(self: _P, strict: bool = False) -> _P:
"""Returns the abolute path."""
+ # TODO(epot): In pathlib, `resolve` also resolve the symlinks
return self._new(self._PATH.abspath(self._path_str))
def glob(self: _P, pattern: str) -> Iterator[_P]:
diff --git a/etils/epath/resource_utils.py b/etils/epath/resource_utils.py
--- a/etils/epath/resource_utils.py
+++ b/etils/epath/resource_utils.py
@@ -17,8 +17,8 @@
from __future__ import annotations
import itertools
-import os
import pathlib
+import posixpath
import sys
import types
import typing
@@ -26,7 +26,6 @@
from etils.epath import abstract_path
from etils.epath import register
-from etils.epath.typing import PathLike
# pylint: disable=g-import-not-at-top
if sys.version_info >= (3, 9): # `importlib.resources.files` was added in 3.9
@@ -64,19 +63,31 @@ def __fspath__(self) -> str:
"""
raise NotImplementedError('zipapp not supported. Please send us a PR.')
+ # zipfile.Path do not define `__eq__` nor `__hash__`. See:
+ # https://discuss.python.org/t/missing-zipfile-path-eq-and-zipfile-path-hash/16519
+ def __eq__(self, other) -> bool:
+ # pyformat:disable
+ return (
+ type(self) == type(other) # pylint: disable=unidiomatic-typecheck
+ and self.root == other.root # pytype: disable=attribute-error
+ and self.at == other.at # pytype: disable=attribute-error
+ )
+ # pyformat:enable
+
+ def __hash__(self) -> int:
+ return hash((self.root, self.at)) # pytype: disable=attribute-error
+
if sys.version_info < (3, 10):
# Required due to: https://bugs.python.org/issue42043
def _next(self, at) -> 'ResourcePath':
- return type(self)(self.root, at) # pytype: disable=attribute-error # py39-upgrade
+ return type(self)(self.root, at) # pytype: disable=attribute-error
# Before 3.10, joinpath only accept a single arg
- def joinpath(self, *parts: PathLike) -> 'ResourcePath':
+ def joinpath(self, *other):
"""Overwrite `joinpath` to be consistent with `pathlib.Path`."""
- if not parts:
- return self
- else:
- return super().joinpath(os.path.join(*parts)) # pylint: disable=no-value-for-parameter # pytype: disable=bad-return-type # py39-upgrade
+ next_ = posixpath.join(self.at, *other) # pytype: disable=attribute-error
+ return self._next(self.root.resolve_dir(next_)) # pytype: disable=attribute-error
def resource_path(package: Union[str, types.ModuleType]) -> abstract_path.Path:
| diff --git a/etils/epath/resource_utils_test.py b/etils/epath/resource_utils_test.py
--- a/etils/epath/resource_utils_test.py
+++ b/etils/epath/resource_utils_test.py
@@ -50,6 +50,8 @@ def _make_zip_file() -> zipfile.ZipFile:
def test_resource_path():
path = epath.resource_utils.ResourcePath(_make_zip_file())
assert isinstance(path, os.PathLike)
+ assert path.joinpath('b/c.txt') == path / 'b' / 'c.txt'
+ assert hash(path.joinpath('b/c.txt')) == hash(path / 'b' / 'c.txt')
assert path.joinpath('b/c.txt').read_text() == 'content of c'
sub_dirs = list(path.joinpath('b').iterdir())
assert len(sub_dirs) == 3
| Tests fail on Python 3.10
There are two test failures with Python 3.10. With Python 3.9 everything seems fine. Could you have a look?
```
================================================================================== FAILURES ==================================================================================
_________________________________________________________________________________ test_repr __________________________________________________________________________________
def test_repr():
> assert repr(R(123, R11(y='abc'))) == epy.dedent("""
R(
x=123,
y=R11(
x=None,
y='abc',
z=None,
),
)
""")
E assert "R(x=123, y=R...bc', z=None))" == 'R(\n x=12...e,\n ),\n)'
E + R(x=123, y=R11(x=None, y='abc', z=None))
E - R(
E - x=123,
E - y=R11(
E - x=None,
E - y='abc',
E - z=None,...
E
E ...Full output truncated (3 lines hidden), use '-vv' to show
etils/edc/dataclass_utils_test.py:108: AssertionError
_____________________________________________________________________________ test_resource_path _____________________________________________________________________________
def test_resource_path():
path = epath.resource_utils.ResourcePath(_make_zip_file())
assert isinstance(path, os.PathLike)
assert path.joinpath('b/c.txt').read_text() == 'content of c'
sub_dirs = list(path.joinpath('b').iterdir())
assert len(sub_dirs) == 3
for p in sub_dirs: # Childs should be `ResourcePath` instances
assert isinstance(p, epath.resource_utils.ResourcePath)
# Forwarded to `Path` keep the resource.
path = epath.Path(path)
assert isinstance(path, epath.resource_utils.ResourcePath)
> assert path.joinpath() == path
E AssertionError: assert ResourcePath('alpharep.zip', '') == ResourcePath('alpharep.zip', '')
E + where ResourcePath('alpharep.zip', '') = <bound method Path.joinpath of ResourcePath('alpharep.zip', '')>()
E + where <bound method Path.joinpath of ResourcePath('alpharep.zip', '')> = ResourcePath('alpharep.zip', '').joinpath
```
For `test_repr`, apparently custom `__repr__` is not applied as [`__qualname__`](https://github.com/google/etils/blob/3b7bc11f103bf62d3f9e8f48ede035a292a51ff0/etils/edc/dataclass_utils.py#L199) is changed in Python 3.10.
For `test_resource_path`, `joinpath()` returns a new object for Python >= 3.10 as that function is [not overridden](https://github.com/google/etils/blob/3b7bc11f103bf62d3f9e8f48ede035a292a51ff0/etils/epath/resource_utils.py#L74).
I noticed those failures when I'm creating a unofficial package python-etils for Arch Linux as a new dependency for the latest [python-tensorflow-datasets](https://aur.archlinux.org/packages/python-tensorflow-datasets).
Environment: Arch Linux x86_64, Python 3.10.4
| Thank you for reporting and finding those bugs. Your info are very helpful.
Don't hesitate to send a PR. Otherwise I'll have a look this week
Thanks for the suggestion! I didn't send a PR yet as I'm not sure how to fix the issues. The `__qualname__` issue involves Python internals and I may not be able to find a good fix soon. The joinpath issue is more like an incorrect test - apparently `zipfile.Path.joinpath()` still creates a new object when paths to join are empty. I'm not sure if epath should match the behavior of zipfile.Path or not.
Thanks for the answer. I fixed those issues in https://github.com/google/etils/pull/159.
For the `zipfile`, it looks like the issue is that `__eq__` and `__hash__` are missing from `zipfile.Path`. Opened an issue in python: https://discuss.python.org/t/missing-zipfile-path-eq-and-zipfile-path-hash/16519
Thanks, that works!
> it looks like the issue is that `__eq__` and `__hash__` are missing from zipfile.Path
Yeah, that sounds reasonable. I found a more ambitious attempt https://github.com/python/cpython/pull/31085: "It could also be used to make zipfile.Path objects fully pathlib-compatible (no missing methods!)" | 2022-06-14T12:46:29 | -1.0 |
Subsets and Splits