instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
int64
0
0
environment_setup_commit
stringclasses
89 values
FAIL_TO_PASS
sequencelengths
1
4.94k
PASS_TO_PASS
sequencelengths
0
7.82k
meta
dict
created_at
unknown
license
stringclasses
8 values
0b01001001__spectree-64
diff --git a/setup.py b/setup.py index 1b3cb64..4ef21e6 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f: setup( name='spectree', - version='0.3.7', + version='0.3.8', author='Keming Yang', author_email='kemingy94@gmail.com', description=('generate OpenAPI document and validate request&response ' diff --git a/spectree/utils.py b/spectree/utils.py index bb5698d..73d6c71 100644 --- a/spectree/utils.py +++ b/spectree/utils.py @@ -54,6 +54,7 @@ def parse_params(func, params, models): 'in': 'query', 'schema': schema, 'required': name in query.get('required', []), + 'description': schema.get('description', ''), }) if hasattr(func, 'headers'): @@ -64,6 +65,7 @@ def parse_params(func, params, models): 'in': 'header', 'schema': schema, 'required': name in headers.get('required', []), + 'description': schema.get('description', ''), }) if hasattr(func, 'cookies'): @@ -74,6 +76,7 @@ def parse_params(func, params, models): 'in': 'cookie', 'schema': schema, 'required': name in cookies.get('required', []), + 'description': schema.get('description', ''), }) return params
0b01001001/spectree
a091fab020ac26548250c907bae0855273a98778
diff --git a/tests/common.py b/tests/common.py index 0f2d696..83b4140 100644 --- a/tests/common.py +++ b/tests/common.py @@ -1,7 +1,7 @@ from enum import IntEnum, Enum from typing import List -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, root_validator, Field class Order(IntEnum): @@ -43,7 +43,7 @@ class Cookies(BaseModel): class DemoModel(BaseModel): uid: int limit: int - name: str + name: str = Field(..., description='user name') def get_paths(spec): diff --git a/tests/test_utils.py b/tests/test_utils.py index bf3426d..53dd3e1 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -98,8 +98,10 @@ def test_parse_params(): 'name': 'uid', 'in': 'query', 'required': True, + 'description': '', 'schema': { 'title': 'Uid', 'type': 'integer', } } + assert params[2]['description'] == 'user name'
[BUG]description for query paramters can not show in swagger ui Hi, when I add a description for a schema used in query, it can not show in swagger ui but can show in Redoc ```py @HELLO.route('/', methods=['GET']) @api.validate(query=HelloForm) def hello(): """ hello ζ³¨ι‡Š :return: """ return 'ok' class HelloForm(BaseModel): """ hello葨单 """ user: str # η”¨ζˆ·εη§° msg: str = Field(description='msg test', example='aa') index: int data: HelloGetListForm list: List[HelloListForm] ``` ![ζˆͺ屏2020-10-12 δΈ‹εˆ7 54 52](https://user-images.githubusercontent.com/60063723/95743785-de70f480-0cc4-11eb-857b-fffd3d7e9cdd.png) ![ζˆͺ屏2020-10-12 δΈ‹εˆ7 53 59](https://user-images.githubusercontent.com/60063723/95743805-e5980280-0cc4-11eb-99ae-11e6439bae02.png)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_utils.py::test_parse_params" ]
[ "tests/test_utils.py::test_comments", "tests/test_utils.py::test_parse_code", "tests/test_utils.py::test_parse_name", "tests/test_utils.py::test_has_model", "tests/test_utils.py::test_parse_resp", "tests/test_utils.py::test_parse_request" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-10-12T13:21:50Z"
apache-2.0
12rambau__sepal_ui-644
diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 9fc498b3..fc69f702 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -72,17 +72,19 @@ jobs: assert len(unexpected) == 0 - name: test with pytest - run: coverage run -m pytest --color=yes --instafail tests + run: pytest --color=yes --cov --cov-report=xml --instafail tests + + - name: assess dead fixtures + if: matrix.python-version == '3.8' + run: pytest --dead-fixtures - name: build the template panel application if: matrix.python-version == '3.8' - run: | - pytest --nbmake sepal_ui/templates/panel_app/ui.ipynb + run: pytest --nbmake sepal_ui/templates/panel_app/ui.ipynb - name: build the template map application if: matrix.python-version == '3.8' - run: | - pytest --nbmake sepal_ui/templates/map_app/ui.ipynb + run: pytest --nbmake sepal_ui/templates/map_app/ui.ipynb - name: coverage run: coverage xml diff --git a/sepal_ui/sepalwidgets/btn.py b/sepal_ui/sepalwidgets/btn.py index 137622fa..105f6160 100644 --- a/sepal_ui/sepalwidgets/btn.py +++ b/sepal_ui/sepalwidgets/btn.py @@ -25,6 +25,9 @@ class Btn(v.Btn, SepalWidget): .. deprecated:: 2.13 ``text`` and ``icon`` will be replaced by ``msg`` and ``gliph`` to avoid duplicating ipyvuetify trait. + + .. deprecated:: 2.14 + Btn is not using a default ``msg`` anymor`. """ v_icon = None @@ -36,7 +39,7 @@ class Btn(v.Btn, SepalWidget): msg = Unicode("").tag(sync=True) "traitlet.Unicode: the text of the btn" - def __init__(self, msg="Click", gliph="", **kwargs): + def __init__(self, msg="", gliph="", **kwargs): # deprecation in 2.13 of text and icon # as they already exist in the ipyvuetify Btn traits (as booleans) @@ -55,7 +58,7 @@ class Btn(v.Btn, SepalWidget): ) # create the default v_icon - self.v_icon = v.Icon(left=True, children=[""]) + self.v_icon = v.Icon(children=[""]) # set the default parameters kwargs["color"] = kwargs.pop("color", "primary") @@ -89,6 +92,7 @@ class Btn(v.Btn, SepalWidget): Set the text of the btn """ + self.v_icon.left = bool(change["new"]) self.children = [self.v_icon, change["new"]] return self diff --git a/setup.py b/setup.py index 84775ae4..e7ca3ccf 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ setup_params = { "cryptography", "python-box", "xyzservices", - "planet==2.0a2", # this is a prerelease + "planet>=2.0", "pyyaml", "dask", "tqdm", @@ -83,6 +83,8 @@ setup_params = { "pytest-sugar", "pytest-icdiff", "pytest-instafail", + "pytest-deadfixtures", + "pytest-cov", "nbmake ", ], "doc": [
12rambau/sepal_ui
8a8196e3c7893b7a0aebdb4910e83054f59e0374
diff --git a/tests/test_Btn.py b/tests/test_Btn.py index fcaed760..4e3cb9b5 100644 --- a/tests/test_Btn.py +++ b/tests/test_Btn.py @@ -11,7 +11,7 @@ class TestBtn: btn = sw.Btn() assert btn.color == "primary" assert btn.v_icon.children[0] == "" - assert btn.children[1] == "Click" + assert btn.children[1] == "" # extensive btn btn = sw.Btn("toto", "fas fa-folder") @@ -42,12 +42,18 @@ class TestBtn: assert isinstance(btn.v_icon, v.Icon) assert btn.v_icon.children[0] == gliph + assert btn.v_icon.left is True # change existing icon gliph = "fas fa-file" btn.gliph = gliph assert btn.v_icon.children[0] == gliph + # display only the gliph + btn.msg = "" + assert btn.children[1] == "" + assert btn.v_icon.left is False + # remove all gliph gliph = "" btn.gliph = gliph @@ -79,4 +85,4 @@ class TestBtn: def btn(self): """Create a simple btn""" - return sw.Btn() + return sw.Btn("Click") diff --git a/tests/test_PlanetModel.py b/tests/test_PlanetModel.py index f84d2e1f..d6d63c5a 100644 --- a/tests/test_PlanetModel.py +++ b/tests/test_PlanetModel.py @@ -9,11 +9,17 @@ from sepal_ui.planetapi import PlanetModel @pytest.mark.skipif("PLANET_API_KEY" not in os.environ, reason="requires Planet") class TestPlanetModel: - @pytest.mark.parametrize("credentials", ["planet_key", "cred"]) - def test_init(self, credentials, request): + def test_init(self, planet_key, cred, request): + + # Test with a valid api key + planet_model = PlanetModel(planet_key) + + assert isinstance(planet_model, PlanetModel) + assert isinstance(planet_model.session, planet.http.Session) + assert planet_model.active is True - # Test with a valid api key and login credentials - planet_model = PlanetModel(request.getfixturevalue(credentials)) + # Test with a valid login credentials + planet_model = PlanetModel(cred) assert isinstance(planet_model, PlanetModel) assert isinstance(planet_model.session, planet.http.Session) @@ -56,10 +62,7 @@ class TestPlanetModel: return - def test_is_active(self, planet_key): - - # We only need to test with a key. - planet_model = PlanetModel(planet_key) + def test_is_active(self, planet_model): planet_model._is_active() assert planet_model.active is True @@ -69,9 +72,8 @@ class TestPlanetModel: return - def test_get_subscriptions(self, planet_key): + def test_get_subscriptions(self, planet_model): - planet_model = PlanetModel(planet_key) subs = planet_model.get_subscriptions() # Check object has length, because there is no way to check a value @@ -80,10 +82,7 @@ class TestPlanetModel: return - def test_get_planet_items(self, planet_key): - - # Arrange - planet_model = PlanetModel(planet_key) + def test_get_planet_items(self, planet_model): aoi = { # Yasuni national park in Ecuador "type": "Polygon", @@ -119,3 +118,11 @@ class TestPlanetModel: credentials = json.loads(os.getenv("PLANET_API_CREDENTIALS")) return list(credentials.values()) + + @pytest.fixture + def planet_model(self): + """Start a planet model using the API key""" + + key = os.getenv("PLANET_API_KEY") + + return PlanetModel(key)
sepal_ui.Btn does't work as expected I want to create a simple Icon button, to do so: ```python sw.Btn(icon=True, gliph ="mdi-plus") ``` Doing this, without "msg" parameter will add the default text to the button which is "click", I think is worthless having that value. So if I want to remove the default text, I would expect doing this: ```python sw.Btn(children = [""], icon=True, gliph ="mdi-plus") # or sw.Btn(msg= ""] icon=True, gliph ="mdi-plus") ``` Which leads the icon aligned to the left and not centered (as it is using a empyt string as message).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_Btn.py::TestBtn::test_init", "tests/test_Btn.py::TestBtn::test_set_gliph" ]
[ "tests/test_Btn.py::TestBtn::test_toggle_loading", "tests/test_Btn.py::TestBtn::test_set_msg" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-11-29T14:42:21Z"
mit
15five__scim2-filter-parser-13
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 12a5d4f..178f172 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,10 @@ CHANGE LOG ========== +0.3.5 +----- +- Update the sql.Transpiler to collect namedtuples rather than tuples for attr paths + 0.3.4 ----- - Update tox.ini and clean up linting errors diff --git a/setup.py b/setup.py index bbf57bf..bd16f70 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ def long_description(): setup( name='scim2-filter-parser', - version='0.3.4', + version='0.3.5', description='A customizable parser/transpiler for SCIM2.0 filters', url='https://github.com/15five/scim2-filter-parser', maintainer='Paul Logston', diff --git a/src/scim2_filter_parser/transpilers/sql.py b/src/scim2_filter_parser/transpilers/sql.py index 6254f1e..2107758 100644 --- a/src/scim2_filter_parser/transpilers/sql.py +++ b/src/scim2_filter_parser/transpilers/sql.py @@ -4,9 +4,12 @@ clause based on a SCIM filter. """ import ast import string +import collections from .. import ast as scim2ast +AttrPath = collections.namedtuple('AttrPath', ['attr_name', 'sub_attr', 'uri']) + class Transpiler(ast.NodeTransformer): """ @@ -145,7 +148,7 @@ class Transpiler(ast.NodeTransformer): # Convert attr_name to another value based on map. # Otherwise, return None. - attr_path_tuple = (attr_name_value, sub_attr_value, uri_value) + attr_path_tuple = AttrPath(attr_name_value, sub_attr_value, uri_value) self.attr_paths.append(attr_path_tuple) return self.attr_map.get(attr_path_tuple)
15five/scim2-filter-parser
3ed1858b492542d0bc9b9e9ab9547641595e28c1
diff --git a/tests/test_transpiler.py b/tests/test_transpiler.py index b8e1bb4..280c2d3 100644 --- a/tests/test_transpiler.py +++ b/tests/test_transpiler.py @@ -36,6 +36,16 @@ class RFCExamples(TestCase): self.assertEqual(expected_sql, sql, query) self.assertEqual(expected_params, params, query) + def test_attr_paths_are_created(self): + query = 'userName eq "bjensen"' + tokens = self.lexer.tokenize(query) + ast = self.parser.parse(tokens) + self.transpiler.transpile(ast) + + self.assertEqual(len(self.transpiler.attr_paths), 1) + for path in self.transpiler.attr_paths: + self.assertTrue(isinstance(path, transpile_sql.AttrPath)) + def test_username_eq(self): query = 'userName eq "bjensen"' sql = "username = {a}"
Return NamedTuple rather than tuple. It would be nice to return a NamedTuple instead of a tuple here: https://github.com/15five/scim2-filter-parser/blob/7ddc216f8c3dd1cdb2152944187e8f7f5ee07be2/src/scim2_filter_parser/transpilers/sql.py#L148 This way parts of each path could be accessed by name rather than by index in the tuple.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_transpiler.py::RFCExamples::test_attr_paths_are_created" ]
[ "tests/test_transpiler.py::CommandLine::test_command_line", "tests/test_transpiler.py::AzureQueries::test_email_type_eq_primary_value_eq_uuid", "tests/test_transpiler.py::AzureQueries::test_parse_simple_email_filter_with_uuid", "tests/test_transpiler.py::AzureQueries::test_external_id_from_azure", "tests/test_transpiler.py::UndefinedAttributes::test_schemas_eq", "tests/test_transpiler.py::UndefinedAttributes::test_title_has_value_and_user_type_eq_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_2", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_work_and_value_contains_2", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_work_and_value_contains_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_email_contains_or_email_contains", "tests/test_transpiler.py::UndefinedAttributes::test_email_type_eq_primary_value_eq_uuid_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_work_and_value_contains_3", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_3", "tests/test_transpiler.py::UndefinedAttributes::test_email_type_eq_primary_value_eq_uuid_2", "tests/test_transpiler.py::UndefinedAttributes::test_title_has_value_and_user_type_eq_2", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_ne_and_not_email_contains_or_email_contains", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_4", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_1", "tests/test_transpiler.py::UndefinedAttributes::test_username_eq", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_2", "tests/test_transpiler.py::RFCExamples::test_username_eq", "tests/test_transpiler.py::RFCExamples::test_schema_username_startswith", "tests/test_transpiler.py::RFCExamples::test_title_has_value", "tests/test_transpiler.py::RFCExamples::test_family_name_contains", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_lt", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_ge", "tests/test_transpiler.py::RFCExamples::test_user_type_eq_and_not_email_type_eq_work_and_value_contains", "tests/test_transpiler.py::RFCExamples::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_le", "tests/test_transpiler.py::RFCExamples::test_user_type_eq_and_not_email_type_eq", "tests/test_transpiler.py::RFCExamples::test_title_has_value_and_user_type_eq", "tests/test_transpiler.py::RFCExamples::test_schemas_eq", "tests/test_transpiler.py::RFCExamples::test_user_type_eq_and_email_contains_or_email_contains", "tests/test_transpiler.py::RFCExamples::test_title_has_value_or_user_type_eq", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_gt", "tests/test_transpiler.py::RFCExamples::test_user_type_ne_and_not_email_contains_or_email_contains", "tests/test_transpiler.py::RFCExamples::test_username_startswith" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-07-30T14:25:04Z"
mit
15five__scim2-filter-parser-20
diff --git a/src/scim2_filter_parser/parser.py b/src/scim2_filter_parser/parser.py index 516f65d..12c693e 100644 --- a/src/scim2_filter_parser/parser.py +++ b/src/scim2_filter_parser/parser.py @@ -110,9 +110,8 @@ class SCIMParser(Parser): # which takes precedence over "or" # 3. Attribute operators precedence = ( - ('nonassoc', OR), # noqa F821 - ('nonassoc', AND), # noqa F821 - ('nonassoc', NOT), # noqa F821 + ('left', OR, AND), # noqa F821 + ('right', NOT), # noqa F821 ) # FILTER = attrExp / logExp / valuePath / *1"not" "(" FILTER ")"
15five/scim2-filter-parser
08de23c5626556a37beced764a22a2fa7021989b
diff --git a/tests/test_parser.py b/tests/test_parser.py index 4ff562c..19aa198 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -47,6 +47,24 @@ class BuggyQueries(TestCase): with self.assertRaises(parser.SCIMParserError): self.parser.parse(token_stream) + def test_g17_1_log_exp_order(self): + query = 'displayName co "username" or nickName co "username" or userName co "username"' + + tokens = self.lexer.tokenize(query) + self.parser.parse(tokens) # Should not raise error + + def test_g17_2_log_exp_order(self): + query = 'displayName co "username" and nickName co "username" and userName co "username"' + + tokens = self.lexer.tokenize(query) + self.parser.parse(tokens) # Should not raise error + + def test_g17_3_log_exp_order(self): + query = 'displayName co "username" and nickName co "username" or userName co "username"' + + tokens = self.lexer.tokenize(query) + self.parser.parse(tokens) # Should not raise error + class CommandLine(TestCase): def setUp(self):
Issue when using multiple "or" or "and" Hi, I am facing an issue, where the query having two or more "and" or more than two "or" is failing. Have a look at examples below: - 1)```"displayName co \"username\" or nickName co \"username\" or userName co \"username\""``` ```"displayName co \"username\" and nickName co \"username\" and userName co \"username\""``` the two queries fails giving , ```scim2_filter_parser.parser.SCIMParserError: Parsing error at: Token(type='OR', value='or', lineno=1, index=52)``` notice above queries are having either only "or" or "and". 2)```"displayName co \"username\" and nickName co \"username\" or userName co \"username\""``` but this query works.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_parser.py::BuggyQueries::test_g17_2_log_exp_order", "tests/test_parser.py::BuggyQueries::test_g17_1_log_exp_order" ]
[ "tests/test_parser.py::BuggyQueries::test_g17_3_log_exp_order", "tests/test_parser.py::BuggyQueries::test_no_quotes_around_comp_value", "tests/test_parser.py::RegressionTestQueries::test_command_line", "tests/test_parser.py::CommandLine::test_command_line" ]
{ "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2020-10-18T03:21:13Z"
mit
20c__ctl-3
diff --git a/src/ctl/plugins/pypi.py b/src/ctl/plugins/pypi.py index 5d979af..a6117af 100644 --- a/src/ctl/plugins/pypi.py +++ b/src/ctl/plugins/pypi.py @@ -32,7 +32,7 @@ class PyPIPluginConfig(release.ReleasePluginConfig): config_file = confu.schema.Str(help="path to pypi config file (e.g. ~/.pypirc)") # PyPI repository name, needs to exist in your pypi config file - repository = confu.schema.Str( + pypi_repository = confu.schema.Str( help="PyPI repository name - needs to exist " "in your pypi config file", default="pypi", ) @@ -55,16 +55,16 @@ class PyPIPlugin(release.ReleasePlugin): @property def dist_path(self): - return os.path.join(self.target.checkout_path, "dist", "*") + return os.path.join(self.repository.checkout_path, "dist", "*") def prepare(self): super(PyPIPlugin, self).prepare() self.shell = True - self.repository = self.get_config("repository") + self.pypi_repository = self.get_config("pypi_repository") self.pypirc_path = os.path.expanduser(self.config.get("config_file")) self.twine_settings = Settings( config_file=self.pypirc_path, - repository_name=self.repository, + repository_name=self.pypi_repository, sign=self.get_config("sign"), identity=self.get_config("identity"), sign_with=self.get_config("sign_with"), diff --git a/src/ctl/plugins/release.py b/src/ctl/plugins/release.py index bcfa1ce..dcae2f4 100644 --- a/src/ctl/plugins/release.py +++ b/src/ctl/plugins/release.py @@ -18,8 +18,8 @@ import ctl.plugins.git class ReleasePluginConfig(confu.schema.Schema): - target = confu.schema.Str( - help="target for release - should be a path " + repository = confu.schema.Str( + help="repository target for release - should be a path " "to a python package or the name of a " "repository type plugin", cli=False, @@ -46,16 +46,16 @@ class ReleasePlugin(command.CommandPlugin): "version", nargs=1, type=str, - help="release version - if target is managed by git, " + help="release version - if repository is managed by git, " "checkout this branch/tag", ) group.add_argument( - "target", + "repository", nargs="?", type=str, - default=plugin_config.get("target"), - help=ReleasePluginConfig().target.help, + default=plugin_config.get("repository"), + help=ReleasePluginConfig().repository.help, ) sub = parser.add_subparsers(title="Operation", dest="op") @@ -74,7 +74,7 @@ class ReleasePlugin(command.CommandPlugin): return { "group": group, - "confu_target": op_release_parser, + "confu_repository": op_release_parser, "op_release_parser": op_release_parser, "op_validate_parser": op_validate_parser, } @@ -84,48 +84,48 @@ class ReleasePlugin(command.CommandPlugin): self.prepare() self.shell = True - self.set_target(self.get_config("target")) + self.set_repository(self.get_config("repository")) self.dry_run = kwargs.get("dry") self.version = kwargs.get("version")[0] - self.orig_branch = self.target.branch + self.orig_branch = self.repository.branch if self.dry_run: self.log.info("Doing dry run...") - self.log.info("Release target: {}".format(self.target)) + self.log.info("Release repository: {}".format(self.repository)) try: - self.target.checkout(self.version) + self.repository.checkout(self.version) op = self.get_op(kwargs.get("op")) op(**kwargs) finally: - self.target.checkout(self.orig_branch) + self.repository.checkout(self.orig_branch) - def set_target(self, target): - if not target: - raise ValueError("No target specified") + def set_repository(self, repository): + if not repository: + raise ValueError("No repository specified") try: - self.target = self.other_plugin(target) - if not isinstance(self.target, ctl.plugins.repository.RepositoryPlugin): + self.repository = self.other_plugin(repository) + if not isinstance(self.repository, ctl.plugins.repository.RepositoryPlugin): raise TypeError( "The plugin with the name `{}` is not a " "repository type plugin and cannot be used " - "as a target".format(target) + "as a repository".format(repository) ) except KeyError: - self.target = os.path.abspath(target) - if not os.path.exists(self.target): + self.repository = os.path.abspath(repository) + if not os.path.exists(self.repository): raise IOError( "Target is neither a configured repository " "plugin nor a valid file path: " - "{}".format(self.target) + "{}".format(self.repository) ) - self.target = ctl.plugins.git.temporary_plugin( - self.ctl, "{}__tmp_repo".format(self.plugin_name), self.target + self.repository = ctl.plugins.git.temporary_plugin( + self.ctl, "{}__tmp_repo".format(self.plugin_name), self.repository ) - self.cwd = self.target.checkout_path + self.cwd = self.repository.checkout_path @expose("ctl.{plugin_name}.release") def release(self, **kwargs):
20c/ctl
879af37647e61767a1ede59ffd353e4cfd27cd6f
diff --git a/tests/test_plugin_pypi.py b/tests/test_plugin_pypi.py index 20315ad..19813e2 100644 --- a/tests/test_plugin_pypi.py +++ b/tests/test_plugin_pypi.py @@ -53,35 +53,35 @@ def test_init(): -def test_set_target_git_path(tmpdir, ctlr): +def test_set_repository_git_path(tmpdir, ctlr): """ - Test setting build target: existing git repo via filepath + Test setting build repository: existing git repo via filepath """ plugin, git_plugin = instantiate(tmpdir, ctlr) - plugin.set_target(git_plugin.checkout_path) + plugin.set_repository(git_plugin.checkout_path) assert plugin.dist_path == os.path.join(git_plugin.checkout_path, "dist", "*") -def test_set_target_git_plugin(tmpdir, ctlr): +def test_set_repository_git_plugin(tmpdir, ctlr): """ - Test setting build target: existing git plugin + Test setting build repository: existing git plugin """ plugin, git_plugin = instantiate(tmpdir, ctlr) - plugin.set_target(git_plugin.plugin_name) + plugin.set_repository(git_plugin.plugin_name) assert plugin.dist_path == os.path.join(git_plugin.checkout_path, "dist", "*") -def test_set_target_error(tmpdir, ctlr): +def test_set_repository_error(tmpdir, ctlr): """ - Test setting invalid build target + Test setting invalid build repository """ plugin, git_plugin = instantiate(tmpdir, ctlr) @@ -89,17 +89,17 @@ def test_set_target_error(tmpdir, ctlr): # non existing path / plugin name with pytest.raises(IOError): - plugin.set_target("invalid target") + plugin.set_repository("invalid repository") # invalid plugin type with pytest.raises(TypeError): - plugin.set_target("test_pypi") + plugin.set_repository("test_pypi") - # no target + # no repository with pytest.raises(ValueError): - plugin.set_target(None) + plugin.set_repository(None) def test_build_dist(tmpdir, ctlr): @@ -110,7 +110,7 @@ def test_build_dist(tmpdir, ctlr): plugin, git_plugin = instantiate(tmpdir, ctlr) plugin.prepare() - plugin.set_target(git_plugin.plugin_name) + plugin.set_repository(git_plugin.plugin_name) plugin._build_dist() assert os.path.exists(os.path.join(git_plugin.checkout_path, @@ -126,7 +126,7 @@ def test_validate_dist(tmpdir, ctlr): plugin, git_plugin = instantiate(tmpdir, ctlr) plugin.prepare() - plugin.set_target(git_plugin.plugin_name) + plugin.set_repository(git_plugin.plugin_name) plugin._build_dist() plugin._validate_dist()
PyPI plugin: `target` config attribute should be `repository` This is so it's in line with the version plugin, which currently uses `repository` to specify the target repository The pypi plugin currently uses `repository` to specify which PyPI repository to use, this should change to `pypi_repository` as well. Should do this before tagging 1.0.0 since it's a config schema change
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_plugin_pypi.py::test_set_repository_git_path[standard]", "tests/test_plugin_pypi.py::test_set_repository_error[standard]", "tests/test_plugin_pypi.py::test_set_repository_git_plugin[standard]" ]
[ "tests/test_plugin_pypi.py::test_init" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-10-08T09:23:56Z"
apache-2.0
20c__ctl-7
diff --git a/Ctl/Pipfile b/Ctl/Pipfile index 0c7a304..1bd6308 100644 --- a/Ctl/Pipfile +++ b/Ctl/Pipfile @@ -14,7 +14,7 @@ tmpl = "==0.3.0" [packages] munge = "<1,>=0.4" -cfu = ">=1.2.0,<2" +cfu = ">=1.3.0,<2" grainy = ">=1.4.0,<2" git-url-parse = ">=1.1.0,<2" pluginmgr = ">=0.6" diff --git a/Ctl/requirements.txt b/Ctl/requirements.txt index b3582c5..0037aaa 100644 --- a/Ctl/requirements.txt +++ b/Ctl/requirements.txt @@ -1,5 +1,5 @@ munge >=0.4, <1 -cfu >= 1.2.0, < 2 +cfu >= 1.3.0, < 2 grainy >= 1.4.0, <2 git-url-parse >= 1.1.0, <2 pluginmgr >= 0.6 diff --git a/src/ctl/__init__.py b/src/ctl/__init__.py index eb4a635..b9616df 100644 --- a/src/ctl/__init__.py +++ b/src/ctl/__init__.py @@ -4,6 +4,7 @@ import os from pkg_resources import get_distribution import confu.config +import confu.exceptions import grainy.core import copy import logging @@ -279,11 +280,14 @@ class Ctl(object): # def set_config_dir(self): def __init__(self, ctx=None, config_dir=None, full_init=True): - self.init_context(ctx=ctx, config_dir=config_dir) + self.init_context(ctx=ctx, config_dir=config_dir) self.init_logging() - self.init_permissions() + if self.config.errors: + return self.log_config_issues() + + self.init_permissions() self.expose_plugin_vars() if full_init: @@ -330,8 +334,10 @@ class Ctl(object): Apply python logging config and create `log` and `usage_log` properties """ + # allow setting up python logging from ctl config set_pylogger_config(self.ctx.config.get_nested("ctl", "log")) + # instantiate logger self.log = Log("ctl") self.usage_log = Log("usage") diff --git a/src/ctl/util/versioning.py b/src/ctl/util/versioning.py index 22bdb09..23e1390 100644 --- a/src/ctl/util/versioning.py +++ b/src/ctl/util/versioning.py @@ -1,5 +1,4 @@ def version_tuple(version): - print("VERSION", version) """ Returns a tuple from version string """ return tuple(version.split(".")) @@ -9,27 +8,35 @@ def version_string(version): return ".".join(["{}".format(v) for v in version]) -def validate_semantic(version): +def validate_semantic(version, pad=0): if not isinstance(version, (list, tuple)): version = version_tuple(version) - try: - major, minor, patch, dev = version - except ValueError: - major, minor, patch = version + parts = len(version) + + if parts < 1: + raise ValueError("Semantic version needs to contain at least a major version") + if parts > 4: + raise ValueError("Semantic version can not contain more than 4 parts") + + if parts < pad: + version = tuple(list(version) + [0 for i in range(0, pad - parts)]) return tuple([int(n) for n in version]) def bump_semantic(version, segment): - version = list(validate_semantic(version)) if segment == "major": + version = list(validate_semantic(version)) return (version[0] + 1, 0, 0) elif segment == "minor": + version = list(validate_semantic(version, pad=2)) return (version[0], version[1] + 1, 0) elif segment == "patch": + version = list(validate_semantic(version, pad=3)) return (version[0], version[1], version[2] + 1) elif segment == "dev": + version = list(validate_semantic(version, pad=4)) try: return (version[0], version[1], version[2], version[3] + 1) except IndexError:
20c/ctl
be7f350f8f2d92918922d82fce0266fcd72decd2
diff --git a/tests/test_plugin_version.py b/tests/test_plugin_version.py index 6745c78..4b9617a 100644 --- a/tests/test_plugin_version.py +++ b/tests/test_plugin_version.py @@ -138,6 +138,30 @@ def test_bump(tmpdir, ctlr): plugin.bump(version="invalid", repo="dummy_repo") +def test_bump_truncated(tmpdir, ctlr): + plugin, dummy_repo = instantiate(tmpdir, ctlr) + plugin.tag(version="1.0", repo="dummy_repo") + + plugin.bump(version="minor", repo="dummy_repo") + assert dummy_repo.version == ("1", "1", "0") + assert dummy_repo._tag == "1.1.0" + + plugin.tag(version="1.0", repo="dummy_repo") + plugin.bump(version="patch", repo="dummy_repo") + assert dummy_repo.version == ("1", "0", "1") + assert dummy_repo._tag == "1.0.1" + + plugin.tag(version="2", repo="dummy_repo") + plugin.bump(version="patch", repo="dummy_repo") + assert dummy_repo.version == ("2", "0", "1") + assert dummy_repo._tag == "2.0.1" + + plugin.tag(version="3", repo="dummy_repo") + plugin.bump(version="major", repo="dummy_repo") + assert dummy_repo.version == ("4", "0", "0") + assert dummy_repo._tag == "4.0.0" + + def test_execute(tmpdir, ctlr): plugin, dummy_repo = instantiate(tmpdir, ctlr) plugin.execute(op="tag", version="1.0.0", repository="dummy_repo", init=True) diff --git a/tests/test_util_versioning.py b/tests/test_util_versioning.py index b89df79..6624816 100644 --- a/tests/test_util_versioning.py +++ b/tests/test_util_versioning.py @@ -19,7 +19,7 @@ def test_version_tuple(version, string): ((1, 0, 0), (1, 0, 0), None), ("1.0.0.0", (1, 0, 0, 0), None), ((1, 0, 0, 0), (1, 0, 0, 0), None), - ("1.0", None, ValueError), + ("1.0", (1, 0), None), ("a.b.c", None, ValueError), ], )
Better error handling for config errors outside of `plugins` Example: having a schema error in `permissions` exits ctl with traceback that's not very telling as to what is failing reproduce: ``` permissions: namespace: ctl permission: crud ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_util_versioning.py::test_validate_semantic[1.0-expected4-None]", "tests/test_plugin_version.py::test_bump_truncated[standard]" ]
[ "tests/test_util_versioning.py::test_bump_semantic[1.2.3-minor-expected1]", "tests/test_util_versioning.py::test_validate_semantic[1.0.0-expected0-None]", "tests/test_util_versioning.py::test_validate_semantic[version3-expected3-None]", "tests/test_util_versioning.py::test_validate_semantic[version1-expected1-None]", "tests/test_util_versioning.py::test_bump_semantic[1.2.3.4-dev-expected3]", "tests/test_util_versioning.py::test_version_tuple[version0-1.0.0]", "tests/test_util_versioning.py::test_validate_semantic[1.0.0.0-expected2-None]", "tests/test_util_versioning.py::test_bump_semantic[1.2.3.4-patch-expected2]", "tests/test_util_versioning.py::test_bump_semantic[1.2.3.4-major-expected0]", "tests/test_util_versioning.py::test_validate_semantic[a.b.c-None-ValueError]", "tests/test_plugin_version.py::test_execute_permissions[permission_denied]", "tests/test_plugin_version.py::test_tag[standard]", "tests/test_plugin_version.py::test_repository[standard]", "tests/test_plugin_version.py::test_bump[standard]", "tests/test_plugin_version.py::test_execute[standard]", "tests/test_plugin_version.py::test_init" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-10-21T11:05:40Z"
apache-2.0
3YOURMIND__django-migration-linter-186
diff --git a/.github/workflows/ci-build.yml b/.github/workflows/ci-build.yml index 085b32c..fef9e0e 100644 --- a/.github/workflows/ci-build.yml +++ b/.github/workflows/ci-build.yml @@ -28,7 +28,7 @@ jobs: strategy: matrix: - python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9'] + python-version: ['3.6', '3.7', '3.8', '3.9'] name: Build with Python ${{ matrix.python-version }} steps: diff --git a/CHANGELOG.md b/CHANGELOG.md index d1ec8e5..15fefc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,8 @@ -## 4.0.0 +## 4.0.0 (unreleased) - Drop support for Python 2.7 and 3.5 - Drop support for Django 1.11, 2.0, 2.1, 3.0 +- Fix index creation detection when table is being created in the transaction (issue #178) ## 3.0.1 diff --git a/django_migration_linter/sql_analyser/postgresql.py b/django_migration_linter/sql_analyser/postgresql.py index 140aba3..3eb18a5 100644 --- a/django_migration_linter/sql_analyser/postgresql.py +++ b/django_migration_linter/sql_analyser/postgresql.py @@ -3,14 +3,32 @@ import re from .base import BaseAnalyser +def has_create_index(sql_statements, **kwargs): + regex_result = None + for sql in sql_statements: + regex_result = re.search(r"CREATE (UNIQUE )?INDEX.*ON (.*) \(", sql) + if re.search("INDEX CONCURRENTLY", sql): + regex_result = None + elif regex_result: + break + if not regex_result: + return False + + concerned_table = regex_result.group(2) + table_is_added_in_transaction = any( + sql.startswith("CREATE TABLE {}".format(concerned_table)) + for sql in sql_statements + ) + return not table_is_added_in_transaction + + class PostgresqlAnalyser(BaseAnalyser): migration_tests = [ { "code": "CREATE_INDEX", - "fn": lambda sql, **kw: re.search("CREATE (UNIQUE )?INDEX", sql) - and not re.search("INDEX CONCURRENTLY", sql), + "fn": has_create_index, "msg": "CREATE INDEX locks table", - "mode": "one_liner", + "mode": "transaction", "type": "warning", }, {
3YOURMIND/django-migration-linter
aef3db3e4198d06c38bc4b0874e72ed657891eea
diff --git a/tests/unit/test_sql_analyser.py b/tests/unit/test_sql_analyser.py index 00dd50e..65ab7f0 100644 --- a/tests/unit/test_sql_analyser.py +++ b/tests/unit/test_sql_analyser.py @@ -233,6 +233,23 @@ class PostgresqlAnalyserTestCase(SqlAnalyserTestCase): sql = "CREATE UNIQUE INDEX title_idx ON films (title);" self.assertWarningSql(sql) + def test_create_index_non_concurrently_with_table_creation(self): + sql = [ + 'CREATE TABLE "films" ("title" text);', + 'CREATE INDEX ON "films" ((lower("title")));', + ] + self.assertValidSql(sql) + sql = [ + 'CREATE TABLE "some_table" ("title" text);', + 'CREATE INDEX ON "films" ((lower("title")));', + ] + self.assertWarningSql(sql) + sql = [ + 'CREATE TABLE "films" ("title" text);', + 'CREATE INDEX ON "some_table" ((lower("title")));', + ] + self.assertWarningSql(sql) + def test_create_index_concurrently(self): sql = "CREATE INDEX CONCURRENTLY ON films (lower(title));" self.assertValidSql(sql)
Linter fails on CREATE INDEX when creating a new table Here is an example `CreateModel` from Django: ```python migrations.CreateModel( name='ShipmentMetadataAlert', fields=[ ('deleted_at', models.DateTimeField(blank=True, db_index=True, null=True)), ('created_at', common.fields.CreatedField(default=django.utils.timezone.now, editable=False)), ('updated_at', common.fields.LastModifiedField(default=django.utils.timezone.now, editable=False)), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField(blank=True, null=True)), ('level', models.CharField(blank=True, choices=[('HIGH', 'high'), ('MEDIUM', 'medium'), ('LOW', 'low')], max_length=16, null=True)), ('type', models.CharField(blank=True, choices=[('MOBILE_DEVICE_ALERT', 'MOBILE_DEVICE_ALERT'), ('NON_ACTIVE_CARRIER', 'NON_ACTIVE_CARRIER'), ('OTHER', 'OTHER')], max_length=32, null=True)), ('subtype', models.CharField(blank=True, choices=[('DRIVER_PERMISSIONS', 'DRIVER_PERMISSIONS'), ('DRIVER_LOCATION', 'DRIVER_LOCATION'), ('OTHER', 'OTHER')], max_length=32, null=True)), ('occurred_at', models.DateTimeField(null=True)), ('clear_alert_job_id', models.UUIDField(default=None, null=True)), ('metadata', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='alerts', to='shipments.ShipmentMetadata')), ], options={ 'abstract': False, } ) ``` Here are the SQL statements that this spits out in `sqlmigrate`: ```sql BEGIN; -- -- Create model ShipmentMetadataAlert -- CREATE TABLE "shipments_shipmentmetadataalert" ("deleted_at" timestamp with time zone NULL, "created_at" timestamp with time zone NOT NULL, "updated_at" timestamp with time zone NOT NULL, "id" uuid NOT NULL PRIMARY KEY, "message" text NULL, "level" varchar(16) NULL, "type" varchar(32) NULL, "subtype" varchar(32) NULL, "occurred_at" timestamp with time zone NULL, "clear_alert_job_id" uuid NULL, "metadata_id" uuid NOT NULL); ALTER TABLE "shipments_shipmentmetadataalert" ADD CONSTRAINT "shipments_shipmentme_metadata_id_f20850e8_fk_shipments" FOREIGN KEY ("metadata_id") REFERENCES "shipments_shipmentmetadata" ("id") DEFERRABLE INITIALLY DEFERRED; CREATE INDEX "shipments_shipmentmetadataalert_deleted_at_c9a93342" ON "shipments_shipmentmetadataalert" ("deleted_at"); CREATE INDEX "shipments_shipmentmetadataalert_metadata_id_f20850e8" ON "shipments_shipmentmetadataalert" ("metadata_id"); COMMIT; ``` This is an error from the linter as it outputs the error `CREATE INDEX locks table`. But the table is being created within the migration, it just needs to recognize that. It seems like the `CREATE INDEX` detection should work the same way that the `ADD_UNIQUE` detection works where it detects that the create table is happening in the same migration: https://github.com/3YOURMIND/django-migration-linter/blob/db71a9db23746f64d41d681f3fecb9b066c87338/django_migration_linter/sql_analyser/base.py#L26-L40
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently_with_table_creation" ]
[ "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column_after_django22", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_create_table_with_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_rename_table", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_field_to_not_null_with_dropped_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_onetoonefield_to_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_reindex", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_unique_together" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-12-20T21:27:38Z"
apache-2.0
3YOURMIND__django-migration-linter-258
diff --git a/CHANGELOG.md b/CHANGELOG.md index 3069d91..beafd65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,21 @@ Instead, the linter crashes and lets the `sqlmigrate` error raise, in order to avoid letting a problematic migration pass. One common reason for such an error is the SQL generation which requires the database to be actually migrated in order to fetch actual constraint names from it. The crash is a sign to double-check the migration. But if you are certain the migration is safe, you can ignore it (issue #209) + +Features: + - Fixed `RunPython` model import check when using a `through` object like `MyModel.many_to_many.through.objects.filter(...)` (issue #218) - Mark the `IgnoreMigration` operation as `elidable=True` + +Bug: + +- Don't detect not nullable field on partial index creation (issue #250) + +Miscellaneous: + - Add support for Python 3.11 - Add support for Django 4.1 +- Add support for Django 4.2 - Drop support for Django 2.2 - Internally rename "migration tests" to "migration checks" - Add dataclasses internally instead of custom dicts diff --git a/django_migration_linter/sql_analyser/base.py b/django_migration_linter/sql_analyser/base.py index 2fa0646..131652e 100644 --- a/django_migration_linter/sql_analyser/base.py +++ b/django_migration_linter/sql_analyser/base.py @@ -40,7 +40,8 @@ def has_not_null_column(sql_statements: list[str], **kwargs) -> bool: ends_with_default = False return ( any( - re.search("(?<!DROP )NOT NULL", sql) and not sql.startswith("CREATE TABLE") + re.search("(?<!DROP )NOT NULL", sql) + and not (sql.startswith("CREATE TABLE") or sql.startswith("CREATE INDEX")) for sql in sql_statements ) and ends_with_default is False
3YOURMIND/django-migration-linter
366d16b01a72d0baa54fef55761d846b0f05b8dd
diff --git a/tests/unit/test_sql_analyser.py b/tests/unit/test_sql_analyser.py index d7349fc..012d53c 100644 --- a/tests/unit/test_sql_analyser.py +++ b/tests/unit/test_sql_analyser.py @@ -297,6 +297,10 @@ class PostgresqlAnalyserTestCase(SqlAnalyserTestCase): sql = "CREATE UNIQUE INDEX CONCURRENTLY title_idx ON films (title);" self.assertValidSql(sql) + def test_create_index_concurrently_where(self): + sql = 'CREATE INDEX CONCURRENTLY "index_name" ON "table_name" ("a_column") WHERE ("some_column" IS NOT NULL);' + self.assertValidSql(sql) + def test_drop_index_non_concurrently(self): sql = "DROP INDEX ON films" self.assertWarningSql(sql)
Adding an index with a NOT NULL condition incorrectly triggers NOT_NULL rule Adding an index with a `WHERE` clause including `NOT NULL` gets flagged as a `NOT NULL constraint on columns` error. ## Steps to reproduce The follow migration operation: ```python AddIndexConcurrently( model_name="prediction", index=models.Index( condition=models.Q( ("data_deleted_at__isnull", True), ("delete_data_after__isnull", False), ), fields=["delete_data_after"], name="delete_data_after_idx", ), ), ``` Generates the following SQL: ```sql CREATE INDEX CONCURRENTLY "delete_data_after_idx" ON "models_prediction" ("delete_data_after") WHERE ("data_deleted_at" IS NULL AND "delete_data_after" IS NOT NULL); ``` When linted this is flagged as an error because of the `NOT NULL`, when it ought to be a safe operation. ## Investigation Looking at the condition used for this rule, I think it might just need to permit `CREATE INDEX` requests: ```python re.search("(?<!DROP )NOT NULL", sql) and not sql.startswith("CREATE TABLE") and not sql.startswith("CREATE INDEX") ``` https://github.com/3YOURMIND/django-migration-linter/blob/202a6d9d5dea83528cb52fd7481a5a0565cc6f83/django_migration_linter/sql_analyser/base.py#L43
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_concurrently_where" ]
[ "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_unique_index", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column_after_django22", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_create_table_with_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_rename_table", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_unique_index", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently_with_table_creation", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_field_to_not_null_with_dropped_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_onetoonefield_to_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_reindex", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_unique_index", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::SqlUtilsTestCase::test_unknown_analyser_string", "tests/unit/test_sql_analyser.py::SqlUtilsTestCase::test_unsupported_db_vendor" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2023-07-03T18:35:18Z"
apache-2.0
3YOURMIND__django-migration-linter-47
diff --git a/django_migration_linter/migration_linter.py b/django_migration_linter/migration_linter.py index f9c0ab1..03c2054 100644 --- a/django_migration_linter/migration_linter.py +++ b/django_migration_linter/migration_linter.py @@ -20,7 +20,7 @@ from subprocess import Popen, PIPE import sys from .cache import Cache -from .constants import DEFAULT_CACHE_PATH, MIGRATION_FOLDER_NAME +from .constants import DEFAULT_CACHE_PATH, MIGRATION_FOLDER_NAME, __version__ from .migration import Migration from .utils import is_directory, is_django_project, clean_bytes_to_str from .sql_analyser import analyse_sql_statements @@ -287,6 +287,9 @@ def _main(): action="store_true", help="print more information during execution", ) + parser.add_argument( + "--version", "-V", action="version", version="%(prog)s {}".format(__version__) + ) parser.add_argument( "--database", type=str,
3YOURMIND/django-migration-linter
fbf0f4419336fcb1235fa57f5575ad2593354e44
diff --git a/tests/functional/test_cmd_line_call.py b/tests/functional/test_cmd_line_call.py index a2861fa..47d7944 100644 --- a/tests/functional/test_cmd_line_call.py +++ b/tests/functional/test_cmd_line_call.py @@ -16,7 +16,7 @@ import os import shutil import unittest from subprocess import Popen, PIPE -from django_migration_linter import utils, DEFAULT_CACHE_PATH +from django_migration_linter import utils, DEFAULT_CACHE_PATH, constants from tests import fixtures import sys @@ -274,3 +274,25 @@ class CallLinterFromCommandLineTest(unittest.TestCase): self.assertTrue(lines[0].endswith('ERR')) self.assertTrue(lines[2].endswith('OK')) self.assertTrue(lines[3].startswith('*** Summary')) + + +class VersionOptionLinterFromCommandLineTest(CallLinterFromCommandLineTest): + def test_call_with_version_option(self): + cmd = "{} --version".format(self.linter_exec) + process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) + process.wait() + self.assertEqual(process.returncode, 0) + process_read_stream = process.stderr if sys.version_info.major == 2 else process.stdout + lines = list(map(utils.clean_bytes_to_str, process_read_stream.readlines())) + self.assertEqual(len(lines), 1) + self.assertEqual(lines[0], "django-migration-linter {}".format(constants.__version__)) + + def test_call_with_short_version_option(self): + cmd = "{} -V".format(self.linter_exec) + process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) + process.wait() + self.assertEqual(process.returncode, 0) + process_read_stream = process.stderr if sys.version_info.major == 2 else process.stdout + lines = list(map(utils.clean_bytes_to_str, process_read_stream.readlines())) + self.assertEqual(len(lines), 1) + self.assertEqual(lines[0], "django-migration-linter {}".format(constants.__version__))
Add --version option Pretty straightforward. Have a `--version` that prints the current version of the linter.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/functional/test_cmd_line_call.py::VersionOptionLinterFromCommandLineTest::test_call_with_version_option", "tests/functional/test_cmd_line_call.py::VersionOptionLinterFromCommandLineTest::test_call_with_short_version_option" ]
[ "tests/functional/test_cmd_line_call.py::VersionOptionLinterFromCommandLineTest::test_call_linter_cmd_line_cache", "tests/functional/test_cmd_line_call.py::CallLinterFromCommandLineTest::test_call_linter_cmd_line_cache" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-01-21T21:29:34Z"
apache-2.0
4degrees__clique-26
diff --git a/source/clique/collection.py b/source/clique/collection.py index 0c3b296..db9276c 100644 --- a/source/clique/collection.py +++ b/source/clique/collection.py @@ -251,15 +251,25 @@ class Collection(object): else: data['padding'] = '%d' - if self.indexes: + if '{holes}' in pattern: data['holes'] = self.holes().format('{ranges}') + if '{range}' in pattern or '{ranges}' in pattern: indexes = list(self.indexes) - if len(indexes) == 1: + indexes_count = len(indexes) + + if indexes_count == 0: + data['range'] = '' + + elif indexes_count == 1: data['range'] = '{0}'.format(indexes[0]) + else: - data['range'] = '{0}-{1}'.format(indexes[0], indexes[-1]) + data['range'] = '{0}-{1}'.format( + indexes[0], indexes[-1] + ) + if '{ranges}' in pattern: separated = self.separate() if len(separated) > 1: ranges = [collection.format('{range}') @@ -270,11 +280,6 @@ class Collection(object): data['ranges'] = ', '.join(ranges) - else: - data['holes'] = '' - data['range'] = '' - data['ranges'] = '' - return pattern.format(**data) def is_contiguous(self):
4degrees/clique
a89507304acce5931f940c34025a6547fa8227b5
diff --git a/test/unit/test_collection.py b/test/unit/test_collection.py index ce4daa7..11cb01e 100644 --- a/test/unit/test_collection.py +++ b/test/unit/test_collection.py @@ -2,6 +2,7 @@ # :copyright: Copyright (c) 2013 Martin Pengelly-Phillips # :license: See LICENSE.txt. +import sys import inspect import pytest @@ -242,7 +243,6 @@ def test_remove_non_member(): (PaddedCollection, '{range}', '1-12'), (PaddedCollection, '{ranges}', '1-3, 7, 9-12'), (PaddedCollection, '{holes}', '4-6, 8'), - ]) def test_format(CollectionCls, pattern, expected): '''Format collection according to pattern.''' @@ -250,6 +250,25 @@ def test_format(CollectionCls, pattern, expected): assert collection.format(pattern) == expected +def test_format_sparse_collection(): + '''Format sparse collection without recursion error.''' + recursion_limit = sys.getrecursionlimit() + recursion_error_occurred = False + + try: + collection = PaddedCollection( + indexes=set(range(0, recursion_limit * 2, 2)) + ) + collection.format() + except RuntimeError as error: + if 'maximum recursion depth exceeded' in str(error): + recursion_error_occurred = True + else: + raise + + assert not recursion_error_occurred + + @pytest.mark.parametrize(('collection', 'expected'), [ (PaddedCollection(indexes=set([])), True), (PaddedCollection(indexes=set([1])), True),
collection.format hits maximum recursion depth for collections with lots of holes. The following code gives an example. ```python paths = ["name.{0:04d}.jpg".format(x) for x in range(2000)[::2]] collection = clique.assemble(paths)[0][0] collection.format("{head}####{tail}") ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/unit/test_collection.py::test_format_sparse_collection" ]
[ "test/unit/test_collection.py::test_remove_non_member", "test/unit/test_collection.py::test_separate[non-contiguous", "test/unit/test_collection.py::test_is_compatible[incompatible", "test/unit/test_collection.py::test_compatible_merge[complimentary]", "test/unit/test_collection.py::test_holes[range", "test/unit/test_collection.py::test_comparisons[different", "test/unit/test_collection.py::test_format[PaddedCollection-{ranges}-1-3,", "test/unit/test_collection.py::test_is_compatible[compatible]", "test/unit/test_collection.py::test_holes[empty]", "test/unit/test_collection.py::test_contains[non-member", "test/unit/test_collection.py::test_compatible_merge[duplicates]", "test/unit/test_collection.py::test_is_contiguous[empty]", "test/unit/test_collection.py::test_match[padded-collection:unpadded", "test/unit/test_collection.py::test_add[unpadded-collection:padded", "test/unit/test_collection.py::test_add[padded-collection:unpadded", "test/unit/test_collection.py::test_format[PaddedCollection-{head}-/head.]", "test/unit/test_collection.py::test_holes[single", "test/unit/test_collection.py::test_add[padded-collection:padded", "test/unit/test_collection.py::test_is_contiguous[contiguous", "test/unit/test_collection.py::test_not_implemented_comparison", "test/unit/test_collection.py::test_format[PaddedCollection-{range}-1-12]", "test/unit/test_collection.py::test_format[PaddedCollection-{padding}-%04d]", "test/unit/test_collection.py::test_format[PaddedCollection-{holes}-4-6,", "test/unit/test_collection.py::test_is_contiguous[single]", "test/unit/test_collection.py::test_compatible_merge[both", "test/unit/test_collection.py::test_match[different", "test/unit/test_collection.py::test_str", "test/unit/test_collection.py::test_unsettable_indexes", "test/unit/test_collection.py::test_format[UnpaddedCollection-{padding}-%d]", "test/unit/test_collection.py::test_contains[different", "test/unit/test_collection.py::test_incompatible_merge[incompatible", "test/unit/test_collection.py::test_format[PaddedCollection-{tail}-.ext]", "test/unit/test_collection.py::test_separate[empty]", "test/unit/test_collection.py::test_add[unpadded-collection:unpadded", "test/unit/test_collection.py::test_repr", "test/unit/test_collection.py::test_match[padded-collection:padded", "test/unit/test_collection.py::test_change_property[padding-4-^head\\\\.(?P<index>(?P<padding>0*)\\\\d+?)\\\\.tail$-head.0001.tail]", "test/unit/test_collection.py::test_iterator[padded-collection]", "test/unit/test_collection.py::test_comparisons[equal]", "test/unit/test_collection.py::test_iterator[unpadded-collection]", "test/unit/test_collection.py::test_escaping_expression", "test/unit/test_collection.py::test_match[unpadded-collection:unpadded", "test/unit/test_collection.py::test_separate[single", "test/unit/test_collection.py::test_holes[multiple", "test/unit/test_collection.py::test_remove", "test/unit/test_collection.py::test_holes[contiguous", "test/unit/test_collection.py::test_holes[missing", "test/unit/test_collection.py::test_match[unpadded-collection:padded", "test/unit/test_collection.py::test_add_duplicate", "test/unit/test_collection.py::test_is_contiguous[non-contiguous]", "test/unit/test_collection.py::test_contains[valid", "test/unit/test_collection.py::test_separate[contiguous" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2016-04-30T17:21:04Z"
apache-2.0
6si__shipwright-79
diff --git a/CHANGES.rst b/CHANGES.rst index f034d37..89cf5f1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,8 @@ 0.5.1 (unreleased) ------------------ -- Nothing changed yet. +- Add --pull-cache to pull images from repository before building. + (`Issue #49 <https://github.com/6si/shipwright/issues/49>`_). 0.5.0 (2016-08-19) diff --git a/shipwright/base.py b/shipwright/base.py index 213d597..421f1af 100644 --- a/shipwright/base.py +++ b/shipwright/base.py @@ -4,10 +4,11 @@ from . import build, dependencies, docker, push class Shipwright(object): - def __init__(self, source_control, docker_client, tags): + def __init__(self, source_control, docker_client, tags, pull_cache=False): self.source_control = source_control self.docker_client = docker_client self.tags = tags + self._pull_cache = pull_cache def targets(self): return self.source_control.targets() @@ -18,7 +19,10 @@ class Shipwright(object): return self._build(this_ref_str, targets) def _build(self, this_ref_str, targets): - for evt in build.do_build(self.docker_client, this_ref_str, targets): + client = self.docker_client + pull_cache = self._pull_cache + ref = this_ref_str + for evt in build.do_build(client, ref, targets, pull_cache): yield evt # now that we're built and tagged all the images. diff --git a/shipwright/build.py b/shipwright/build.py index 707d4f9..4ee1558 100644 --- a/shipwright/build.py +++ b/shipwright/build.py @@ -13,7 +13,7 @@ def _merge(d1, d2): return d -def do_build(client, build_ref, targets): +def do_build(client, build_ref, targets, pull_cache): """ Generic function for building multiple images while notifying a callback function with output produced. @@ -39,11 +39,11 @@ def do_build(client, build_ref, targets): parent_ref = None if target.parent: parent_ref = build_index.get(target.parent) - for evt in build(client, parent_ref, target): + for evt in build(client, parent_ref, target, pull_cache): yield evt -def build(client, parent_ref, image): +def build(client, parent_ref, image, pull_cache): """ builds the given image tagged with <build_ref> and ensures that it depends on it's parent if it's part of this build group (shares @@ -62,7 +62,25 @@ def build(client, parent_ref, image): built_tags = docker.last_built_from_docker(client, image.name) if image.ref in built_tags: - return [] + return + + if pull_cache: + pull_evts = client.pull( + repository=image.name, + tag=image.ref, + stream=True, + ) + + failed = False + for evt in pull_evts: + event = process_event_(evt) + if 'error' in event: + failed = True + else: + yield event + + if not failed: + return build_evts = client.build( fileobj=mkcontext(parent_ref, image.path), @@ -73,4 +91,5 @@ def build(client, parent_ref, image): dockerfile=os.path.basename(image.path), ) - return (process_event_(evt) for evt in build_evts) + for evt in build_evts: + yield process_event_(evt) diff --git a/shipwright/cli.py b/shipwright/cli.py index 24f6f78..82eaf50 100644 --- a/shipwright/cli.py +++ b/shipwright/cli.py @@ -109,6 +109,11 @@ def argparser(): help='Build working tree, including uncommited and untracked changes', action='store_true', ) + common.add_argument( + '--pull-cache', + help='When building try to pull previously built images', + action='store_true', + ) a_arg( common, '-d', '--dependants', help='Build DEPENDANTS and all its dependants', @@ -157,7 +162,6 @@ def old_style_arg_dict(namespace): '--exclude': _flatten(ns.exclude), '--help': False, '--no-build': getattr(ns, 'no_build', False), - '--dirty': getattr(ns, 'dirty', False), '--upto': _flatten(ns.upto), '--x-assert-hostname': ns.x_assert_hostname, '-H': ns.docker_host, @@ -237,8 +241,10 @@ def run(path, arguments, client_cfg, environ, new_style_args=None): if new_style_args is None: dirty = False + pull_cache = False else: dirty = new_style_args.dirty + pull_cache = new_style_args.pull_cache namespace = config['namespace'] name_map = config.get('names', {}) @@ -249,7 +255,7 @@ def run(path, arguments, client_cfg, environ, new_style_args=None): 'to commit these changes, re-run with the --dirty flag.' ) - sw = Shipwright(scm, client, arguments['tags']) + sw = Shipwright(scm, client, arguments['tags'], pull_cache) command = getattr(sw, command_name) show_progress = sys.stdout.isatty()
6si/shipwright
7d3ccf39acc79bb6d33a787e773227358764dd2c
diff --git a/tests/integration/test_docker_builds.py b/tests/integration/test_docker_builds.py index 00aa6be..3a22616 100644 --- a/tests/integration/test_docker_builds.py +++ b/tests/integration/test_docker_builds.py @@ -12,7 +12,7 @@ from .utils import commit_untracked, create_repo, get_defaults def default_args(): - return argparse.Namespace(dirty=False) + return argparse.Namespace(dirty=False, pull_cache=False) def test_sample(tmpdir, docker_client): @@ -734,3 +734,85 @@ def test_build_with_repo_digest(tmpdir, docker_client, registry): ) for image in old_images: cli.remove_image(image, force=True) + + +def test_docker_buld_pull_cache(tmpdir, docker_client, registry): + path = str(tmpdir.join('shipwright-localhost-sample')) + source = pkg_resources.resource_filename( + __name__, + 'examples/shipwright-localhost-sample', + ) + repo = create_repo(path, source) + tag = repo.head.ref.commit.hexsha[:12] + + client_cfg = docker_utils.kwargs_from_env() + cli = docker_client + + defaults = get_defaults() + defaults['push'] = True + try: + shipw_cli.run( + path=path, + client_cfg=client_cfg, + arguments=defaults, + environ={}, + ) + + # Remove the build images: + old_images = ( + cli.images(name='localhost:5000/service1', quiet=True) + + cli.images(name='localhost:5000/shared', quiet=True) + + cli.images(name='localhost:5000/base', quiet=True) + ) + for image in old_images: + cli.remove_image(image, force=True) + + images_after_delete = ( + cli.images(name='localhost:5000/service1') + + cli.images(name='localhost:5000/shared') + + cli.images(name='localhost:5000/base') + ) + assert images_after_delete == [] + + args = default_args() + args.pull_cache = True + + shipw_cli.run( + path=path, + client_cfg=client_cfg, + arguments=defaults, + environ={}, + new_style_args=args, + ) + + service1, shared, base = ( + cli.images(name='localhost:5000/service1') + + cli.images(name='localhost:5000/shared') + + cli.images(name='localhost:5000/base') + ) + + assert set(service1['RepoTags']) == { + 'localhost:5000/service1:master', + 'localhost:5000/service1:latest', + 'localhost:5000/service1:' + tag, + } + + assert set(shared['RepoTags']) == { + 'localhost:5000/shared:master', + 'localhost:5000/shared:latest', + 'localhost:5000/shared:' + tag, + } + + assert set(base['RepoTags']) == { + 'localhost:5000/base:master', + 'localhost:5000/base:latest', + 'localhost:5000/base:' + tag, + } + finally: + old_images = ( + cli.images(name='localhost:5000/service1', quiet=True) + + cli.images(name='localhost:5000/shared', quiet=True) + + cli.images(name='localhost:5000/base', quiet=True) + ) + for image in old_images: + cli.remove_image(image, force=True) diff --git a/tests/test_cli.py b/tests/test_cli.py index 260eb92..064f931 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -16,7 +16,6 @@ def get_defaults(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': False, '--upto': [], '--x-assert-hostname': False, '-H': None, @@ -90,7 +89,6 @@ def test_args(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': False, '--upto': [], '--x-assert-hostname': True, '-H': None, @@ -105,7 +103,7 @@ def test_args_2(): args = [ '--account=x', '--x-assert-hostname', 'build', '-d', 'foo', 'bar', - '-t', 'foo', '--dirty', + '-t', 'foo', '--dirty', '--pull-cache', ] parser = cli.argparser() arguments = cli.old_style_arg_dict(parser.parse_args(args)) @@ -118,7 +116,6 @@ def test_args_2(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': True, '--upto': [], '--x-assert-hostname': True, '-H': None, @@ -142,7 +139,6 @@ def test_args_base(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': False, '--upto': [], '--x-assert-hostname': False, '-H': None,
docker pull all images for current branch and master before building Because our buildserver forgets the docker cache between builds we pull the previous build for all the images. it would be great if we could get shipwright to do it. Otherwise a command like "shipright images" which lists all the images that shipwright *would* build would let us write our own command to do this.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_cli.py::test_args", "tests/test_cli.py::test_args_2", "tests/test_cli.py::test_args_base" ]
[ "tests/integration/test_docker_builds.py::test_dirty_fails_without_flag", "tests/test_cli.py::test_without_json_manifest", "tests/test_cli.py::test_push_also_builds", "tests/test_cli.py::test_assert_hostname" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-08-22T09:51:49Z"
apache-2.0
ARM-software__mango-11
diff --git a/mango/domain/distribution.py b/mango/domain/distribution.py index 4f5b69d..bb9e14d 100644 --- a/mango/domain/distribution.py +++ b/mango/domain/distribution.py @@ -1,49 +1,5 @@ -# Defining loguniform distribution -""" -Credits: Extended from the original definition of rvs function in scipy/scipy/stats/_distn_infrastructure.py -for the class rv_generic and the _rvs function for the uniform distribution from -scipy/scipy/stats/_continuous_distns.py -""" +from scipy.stats import loguniform as _loguniform -from scipy.stats import rv_continuous -import numpy as np - -class log_uniform_gen(rv_continuous): - """A log uniform distribution with base 10 - """ - - def __init__(self, *args, **kwargs): - self.base = 10 - super(log_uniform_gen, self).__init__(*args, **kwargs) - - def _log(self, x): - return np.log(x) / np.log(self.base) - - def _argcheck(self, a, b): - return (a > 0) & (b > a) - - def _get_support(self, a, b): - return a, b - - def _pdf(self, x, a, b): - # reciprocal.pdf(x, a, b) = 1 / (x*log(b/a)) - return 1.0 / (x * self._log(b * 1.0 / a)) - - def _logpdf(self, x, a, b): - return np.log(x) - np.log(self._log(b * 1.0 / a)) - - def _cdf(self, x, a, b): - return (self._log(x) - self._log(a)) / self._log(b * 1.0 / a) - - def _ppf(self, q, a, b): - return a*pow(b*1.0/a, q) - - def _munp(self, n, a, b): - return 1.0/self._log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n)) - - def _entropy(self, a, b): - return 0.5*np.log(a*b)+np.log(self._log(b*1.0/a)) - - -loguniform = log_uniform_gen(name='loguniform') \ No newline at end of file +def loguniform(a, b): + return _loguniform(10 ** a, 10 ** (a + b))
ARM-software/mango
e2d4fd8ae61d2ab8921c94fa2f4dafc1119dbab2
diff --git a/mango/tests/test_domain_space.py b/mango/tests/test_domain_space.py index f393f2b..58fcbc6 100644 --- a/mango/tests/test_domain_space.py +++ b/mango/tests/test_domain_space.py @@ -2,6 +2,7 @@ import numpy as np from scipy.stats import uniform, loguniform from mango.domain.domain_space import domain_space +from mango.domain.distribution import loguniform as mango_loguniform def test_domain(): @@ -34,6 +35,15 @@ def test_domain(): assert (sample[param] in params[param]) +def test_mango_loguniform(): + space = { + 'a': mango_loguniform(-3, 6) + } + ds = domain_space(space, domain_size=1000) + samples = ds.get_domain() + assert all(1e-3 < sample['a'] < 1e3 for sample in samples) + + def test_gp_samples_to_params(): space = { 'a': range(10), @@ -91,7 +101,7 @@ def test_gp_space(): assert (X >= 0.0).all() assert (X[:, 0] == 1.).all() # a assert (X[:, 1] == 0.).all() # b - assert np.isin(X[:, 2], [0.0, 0.5, 1.0]).all() # c + assert np.isin(X[:, 2], [0.0, 0.5, 1.0]).all() # c assert np.isin(X[:, 4:7], np.eye(3)).all() # e assert X.shape == (ds.domain_size, 12) @@ -110,5 +120,3 @@ def test_gp_space(): X2 = ds.convert_to_gp(params) assert np.isclose(X2, X).all() - -
Domain error in loguniform Hi, seems that there is a problem with `loguniform` when one of its argument is negative. For example, my code is runnable when the first argument of `loguniform` is positive and it generates domain error when the first argument is a negative number. Any thought on this?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "mango/tests/test_domain_space.py::test_mango_loguniform" ]
[ "mango/tests/test_domain_space.py::test_domain", "mango/tests/test_domain_space.py::test_gp_samples_to_params", "mango/tests/test_domain_space.py::test_gp_space" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2020-06-11T05:56:07Z"
apache-2.0
ARM-software__mango-47
diff --git a/README.md b/README.md index 16b11cb..2f14c63 100644 --- a/README.md +++ b/README.md @@ -352,8 +352,9 @@ The configuration parameters are: ... return True/False ``` -Early stopping is one of Mango's important features that allow to early terminate the current parallel search based on the custom user-designed criteria, such as the total optimization time spent, current validation accuracy achieved, or improvements in the past few iterations. For usage see early stopping examples [notebook](https://github.com/ARM-software/mango/blob/master/examples/EarlyStopping.ipynb). + Early stopping is one of Mango's important features that allow to early terminate the current parallel search based on the custom user-designed criteria, such as the total optimization time spent, current validation accuracy achieved, or improvements in the past few iterations. For usage see early stopping examples [notebook](https://github.com/ARM-software/mango/blob/master/examples/EarlyStopping.ipynb). +- initial_custom: A list of initial evaluation points to warm up the optimizer instead of random sampling. For example, for a search space with two parameters `x1` and `x2` the input could be: `[{'x1': 10, 'x2': -5}, {'x1': 0, 'x2': 10}]`. This allows the user to customize the initial evaluation points and therefore guide the optimization process. If this option is given then `initial_random` is ignored. The default configuration parameters can be modified, as shown below. Only the parameters whose values need to adjusted can be passed as the dictionary. diff --git a/mango/tuner.py b/mango/tuner.py index 360a859..97f02a1 100644 --- a/mango/tuner.py +++ b/mango/tuner.py @@ -29,6 +29,7 @@ class Tuner: class Config: domain_size: int = None initial_random: int = 2 + initial_custom: dict = None num_iteration: int = 20 batch_size: int = 1 optimizer: str = 'Bayesian' @@ -151,25 +152,35 @@ class Tuner: self.maximize_objective = False return self.run() + + def run_initial(self): + if self.config.initial_custom is not None: + X_tried = copy.deepcopy(self.config.initial_custom) + X_list, Y_list = self.runUserObjective(X_tried) + else: + # getting first few random values + X_tried = self.ds.get_random_sample(self.config.initial_random) + X_list, Y_list = self.runUserObjective(X_tried) + + # in case initial random results are invalid try different samples + n_tries = 1 + while len(Y_list) < self.config.initial_random and n_tries < 3: + X_tried2 = self.ds.get_random_sample(self.config.initial_random - len(Y_list)) + X_list2, Y_list2 = self.runUserObjective(X_tried2) + X_tried2.extend(X_tried2) + X_list = np.append(X_list, X_list2) + Y_list = np.append(Y_list, Y_list2) + n_tries += 1 + + if len(Y_list) == 0: + raise ValueError("No valid configuration found to initiate the Bayesian Optimizer") + return X_list, Y_list, X_tried + def runBayesianOptimizer(self): results = dict() - # getting first few random values - random_hyper_parameters = self.ds.get_random_sample(self.config.initial_random) - X_list, Y_list = self.runUserObjective(random_hyper_parameters) - - # in case initial random results are invalid try different samples - n_tries = 1 - while len(Y_list) < self.config.initial_random and n_tries < 3: - random_hps = self.ds.get_random_sample(self.config.initial_random - len(Y_list)) - X_list2, Y_list2 = self.runUserObjective(random_hps) - random_hyper_parameters.extend(random_hps) - X_list = np.append(X_list, X_list2) - Y_list = np.append(Y_list, Y_list2) - n_tries += 1 + X_list, Y_list, X_tried = self.run_initial() - if len(Y_list) == 0: - raise ValueError("No valid configuration found to initiate the Bayesian Optimizer") # evaluated hyper parameters are used X_init = self.ds.convert_GP_space(X_list) @@ -186,7 +197,7 @@ class Tuner: X_sample = X_init Y_sample = Y_init - hyper_parameters_tried = random_hyper_parameters + hyper_parameters_tried = X_tried objective_function_values = Y_list surrogate_values = Y_list
ARM-software/mango
a71bc007a0c4e39462fd1810cdbcf99c4e854679
diff --git a/tests/test_tuner.py b/tests/test_tuner.py index 24e7c99..98e5fbd 100644 --- a/tests/test_tuner.py +++ b/tests/test_tuner.py @@ -14,7 +14,6 @@ import numpy as np from mango.domain.domain_space import domain_space from mango import Tuner, scheduler from scipy.stats import uniform -from mango.domain.distribution import loguniform # Simple param_dict param_dict = {"a": uniform(0, 1), # uniform distribution @@ -125,7 +124,7 @@ def test_rosenbrock(): results.append(result) return results - tuner = Tuner(param_dict, objfunc, conf_dict=dict(domain_size=100000)) + tuner = Tuner(param_dict, objfunc, conf_dict=dict(domain_size=100000, num_iteration=40)) results = tuner.run() print('best hyper parameters:', results['best_params']) @@ -190,6 +189,40 @@ def test_convex(): assert abs(results['best_params']['y'] - y_opt) <= 3 +def test_initial_custom(): + param_dict = { + 'x': range(-100, 10), + 'y': range(-10, 20), + } + + x_opt = 0 + y_opt = 0 + + def objfunc(args_list): + results = [] + for hyper_par in args_list: + x = hyper_par['x'] + y = hyper_par['y'] + result = (x ** 2 + y ** 2) / 1e4 + results.append(result) + return results + + config = dict(initial_custom=[dict(x=-100, y=20), + dict(x=10, y=20)] + ) + + tuner = Tuner(param_dict, objfunc, conf_dict=config) + results = tuner.minimize() + + print('best hyper parameters:', results['best_params']) + print('best Accuracy:', results['best_objective']) + + assert abs(results['best_params']['x'] - x_opt) <= 3 + assert abs(results['best_params']['y'] - y_opt) <= 3 + assert results['random_params'][0] == config['initial_custom'][0] + assert results['random_params'][1] == config['initial_custom'][1] + + def test_local_scheduler(): param_space = dict(x=range(-10, 10), y=range(-10, 10))
Is it possible to add an initial data point? The way Im currently using mango, I will always have a first run with good defaults. Is it possible to use this information somehow? I have quite wide ranges for my hyper parameters, and I think this would help a lot.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_tuner.py::test_initial_custom" ]
[ "tests/test_tuner.py::test_domain", "tests/test_tuner.py::test_tuner", "tests/test_tuner.py::test_rosenbrock", "tests/test_tuner.py::test_config", "tests/test_tuner.py::test_convex", "tests/test_tuner.py::test_six_hump", "tests/test_tuner.py::test_celery_scheduler", "tests/test_tuner.py::test_custom_scheduler", "tests/test_tuner.py::test_early_stopping_simple", "tests/test_tuner.py::test_early_stopping_complex" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-03-02T18:10:44Z"
apache-2.0
ARMmbed__greentea-237
diff --git a/mbed_greentea/mbed_report_api.py b/mbed_greentea/mbed_report_api.py index da3f0d9..82acb5c 100644 --- a/mbed_greentea/mbed_report_api.py +++ b/mbed_greentea/mbed_report_api.py @@ -38,6 +38,13 @@ def exporter_json(test_result_ext, test_suite_properties=None): @details This is a machine friendly format """ import json + for target in test_result_ext.values(): + for suite in target.values(): + try: + suite["single_test_output"] = suite["single_test_output"]\ + .decode("unicode_escape") + except KeyError: + pass return json.dumps(test_result_ext, indent=4) @@ -211,7 +218,10 @@ def exporter_testcase_junit(test_result_ext, test_suite_properties=None): test_cases.append(tc) ts_name = target_name - test_build_properties = test_suite_properties[target_name] if target_name in test_suite_properties else None + if test_suite_properties and target_name in test_suite_properties: + test_build_properties = test_suite_properties[target_name] + else: + test_build_properties = None ts = TestSuite(ts_name, test_cases, properties=test_build_properties) test_suites.append(ts) @@ -584,7 +594,9 @@ def get_result_overlay_dropdowns(result_div_id, test_results): result_output_div_id = "%s_output" % result_div_id result_output_dropdown = get_dropdown_html(result_output_div_id, "Test Output", - test_results['single_test_output'].rstrip("\n"), + test_results['single_test_output'] + .decode("unicode-escape") + .rstrip("\n"), output_text=True) # Add a dropdown for the testcases if they are present @@ -740,10 +752,14 @@ def exporter_html(test_result_ext, test_suite_properties=None): test_results['single_test_count'] += 1 result_class = get_result_colour_class(test_results['single_test_result']) + try: + percent_pass = int((test_results['single_test_passes']*100.0)/test_results['single_test_count']) + except ZeroDivisionError: + percent_pass = 100 this_row += result_cell_template % (result_class, result_div_id, test_results['single_test_result'], - int((test_results['single_test_passes']*100.0)/test_results['single_test_count']), + percent_pass, test_results['single_test_passes'], test_results['single_test_count'], result_overlay)
ARMmbed/greentea
86f5ec3211a8f7f324bcdd3201012945ee0534ac
diff --git a/test/report_api.py b/test/report_api.py new file mode 100644 index 0000000..122e26e --- /dev/null +++ b/test/report_api.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +""" +mbed SDK +Copyright (c) 2017 ARM Limited + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest +from mock import patch + +from mbed_greentea.mbed_report_api import exporter_html, \ + exporter_memory_metrics_csv, exporter_testcase_junit, \ + exporter_testcase_text, exporter_text, exporter_json + + +class ReportEmitting(unittest.TestCase): + + + report_fns = [exporter_html, exporter_memory_metrics_csv, + exporter_testcase_junit, exporter_testcase_text, + exporter_text, exporter_json] + def test_report_zero_tests(self): + test_data = {} + for report_fn in self.report_fns: + report_fn(test_data) + + def test_report_zero_testcases(self): + test_data = { + 'k64f-gcc_arm': { + 'garbage_test_suite' :{ + u'single_test_result': u'NOT_RAN', + u'elapsed_time': 0.0, + u'build_path': u'N/A', + u'build_path_abs': u'N/A', + u'copy_method': u'N/A', + u'image_path': u'N/A', + u'single_test_output': b'N/A', + u'platform_name': u'k64f', + u'test_bin_name': u'N/A', + u'testcase_result': {}, + } + } + } + for report_fn in self.report_fns: + report_fn(test_data)
mbedgt crash with float division by zero Hi Here is my command: mbedgt -V -v -t NUCLEO_F401RE-ARM,NUCLEO_F401RE-GCC_ARM,NUCLEO_F401RE-IAR,NUCLEO_F410RB-ARM,NUCLEO_F410RB-GCC_ARM,NUCLEO_F410RB-IAR,NUCLEO_F411RE-ARM,NUCLEO_F411RE-GCC_ARM,NUCLEO_F411RE-IAR --report-html=/c/xxx.html It has crashed: ... mbedgt: all tests finished! mbedgt: shuffle seed: 0.3680156551 mbedgt: exporting to HTML file mbedgt: unexpected error: float division by zero Traceback (most recent call last): File "C:\Python27\Scripts\mbedgt-script.py", line 11, in <module> load_entry_point('mbed-greentea==1.2.6', 'console_scripts', 'mbedgt')() File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 401, in main cli_ret = main_cli(opts, args) File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 1050, in main_cli html_report = exporter_html(test_report) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 747, in exporter_html int((test_results['single_test_passes']*100.0)/test_results['single_test_count']), ZeroDivisionError: float division by zero
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/report_api.py::ReportEmitting::test_report_zero_testcases" ]
[ "test/report_api.py::ReportEmitting::test_report_zero_tests" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2017-09-25T13:51:40Z"
apache-2.0
ARMmbed__greentea-243
diff --git a/mbed_greentea/mbed_report_api.py b/mbed_greentea/mbed_report_api.py index 166bc29..22a3778 100644 --- a/mbed_greentea/mbed_report_api.py +++ b/mbed_greentea/mbed_report_api.py @@ -42,7 +42,7 @@ def exporter_json(test_result_ext, test_suite_properties=None): for suite in target.values(): try: suite["single_test_output"] = suite["single_test_output"]\ - .decode("unicode_escape") + .decode("utf-8", "replace") except KeyError: pass return json.dumps(test_result_ext, indent=4) @@ -603,7 +603,7 @@ def get_result_overlay_dropdowns(result_div_id, test_results): result_output_dropdown = get_dropdown_html(result_output_div_id, "Test Output", test_results['single_test_output'] - .decode("unicode-escape") + .decode("utf-8", "replace") .rstrip("\n"), output_text=True)
ARMmbed/greentea
8f7b28f8ec739156d238304fa4f5f2e5156536f5
diff --git a/test/report_api.py b/test/report_api.py index 122e26e..2a4275f 100644 --- a/test/report_api.py +++ b/test/report_api.py @@ -45,7 +45,7 @@ class ReportEmitting(unittest.TestCase): u'build_path_abs': u'N/A', u'copy_method': u'N/A', u'image_path': u'N/A', - u'single_test_output': b'N/A', + u'single_test_output': b'\x80abc\uXXXX' , u'platform_name': u'k64f', u'test_bin_name': u'N/A', u'testcase_result': {},
mbedgt crash with UnicodeDecodeError Hi I am sorry, but I still get some crash with the new green tea version ... mbedgt: exporting to HTML file 'C:/mcu/reports/report__mbed_os5_release_non_regression_F756ZG_mbed-os-5.5.7__2017_09_28_00_06.html'... mbedgt: unexpected error: 'unicodeescape' codec can't decode bytes in position 6308-6310: truncated \uXXXX escape Traceback (most recent call last): File "C:\Python27\Scripts\mbedgt-script.py", line 11, in <module> load_entry_point('mbed-greentea==1.3.0', 'console_scripts', 'mbedgt')() File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 416, in main cli_ret = main_cli(opts, args) File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 1067, in main_cli html_report = exporter_html(test_report) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 747, in exporter_html test_results) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 636, in get_result_overlay overlay_dropdowns = get_result_overlay_dropdowns(result_div_id, test_results) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 598, in get_result_overlay_dropdowns .decode("unicode-escape") UnicodeDecodeError: 'unicodeescape' codec can't decode bytes in position 6308-6310: truncated \uXXXX escape @theotherjimmy
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/report_api.py::ReportEmitting::test_report_zero_testcases" ]
[ "test/report_api.py::ReportEmitting::test_report_zero_tests" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2017-09-29T17:09:53Z"
apache-2.0
ARMmbed__greentea-250
diff --git a/mbed_greentea/mbed_target_info.py b/mbed_greentea/mbed_target_info.py index 356676b..c825bcf 100644 --- a/mbed_greentea/mbed_target_info.py +++ b/mbed_greentea/mbed_target_info.py @@ -20,6 +20,17 @@ Author: Przemyslaw Wirkus <Przemyslaw.Wirkus@arm.com> import os import re import json +from os import walk +try: + from contextlib import suppress +except ImportError: + from contextlib import contextmanager + @contextmanager + def suppress(*excs): + try: + yield + except excs: + pass from mbed_greentea.mbed_common_api import run_cli_process from mbed_greentea.mbed_greentea_log import gt_logger @@ -381,82 +392,65 @@ def get_platform_property(platform, property): :return: property value, None if property not found """ - # First load from targets.json if available - value_from_targets_json = get_platform_property_from_targets(platform, property) - if value_from_targets_json: - return value_from_targets_json - - # Check if info is available for a specific platform - if platform in TARGET_INFO_MAPPING: - if property in TARGET_INFO_MAPPING[platform]['properties']: - return TARGET_INFO_MAPPING[platform]['properties'][property] + default = _get_platform_property_from_default(property) + from_targets_json = _get_platform_property_from_targets( + platform, property, default) + if from_targets_json: + return from_targets_json + from_info_mapping = _get_platform_property_from_info_mapping(platform, property) + if from_info_mapping: + return from_info_mapping + return default + +def _get_platform_property_from_default(property): + with suppress(KeyError): + return TARGET_INFO_MAPPING['default'][property] + +def _get_platform_property_from_info_mapping(platform, property): + with suppress(KeyError): + return TARGET_INFO_MAPPING[platform]['properties'][property] + +def _platform_property_from_targets_json(targets, platform, property, default): + """! Get a platforms's property from the target data structure in + targets.json. Takes into account target inheritance. + @param targets Data structure parsed from targets.json + @param platform Name of the platform + @param property Name of the property + @param default the fallback value if none is found, but the target exists + @return property value, None if property not found - # Check if default data is available - if 'default' in TARGET_INFO_MAPPING: - if property in TARGET_INFO_MAPPING['default']: - return TARGET_INFO_MAPPING['default'][property] - - return None + """ + with suppress(KeyError): + return targets[platform][property] + with suppress(KeyError): + for inherited_target in targets[platform]['inherits']: + result = _platform_property_from_targets_json(targets, inherited_target, property, None) + if result: + return result + if platform in targets: + return default + +IGNORED_DIRS = ['.build', 'BUILD', 'tools'] + +def _find_targets_json(path): + for root, dirs, files in walk(path, followlinks=True): + for ignored_dir in IGNORED_DIRS: + if ignored_dir in dirs: + dirs.remove(ignored_dir) + if 'targets.json' in files: + yield os.path.join(root, 'targets.json') -def get_platform_property_from_targets(platform, property): +def _get_platform_property_from_targets(platform, property, default): """ Load properties from targets.json file somewhere in the project structure :param platform: :return: property value, None if property not found """ - - def get_platform_property_from_targets(targets, platform, property): - """! Get a platforms's property from the target data structure in - targets.json. Takes into account target inheritance. - @param targets Data structure parsed from targets.json - @param platform Name of the platform - @param property Name of the property - @return property value, None if property not found - - """ - - result = None - if platform in targets: - if property in targets[platform]: - result = targets[platform][property] - elif 'inherits' in targets[platform]: - result = None - for inherited_target in targets[platform]['inherits']: - result = get_platform_property_from_targets(targets, inherited_target, property) - - # Stop searching after finding the first value for the property - if result: - break - - return result - - result = None - targets_json_path = [] - for root, dirs, files in os.walk(os.getcwd(), followlinks=True): - ignored_dirs = ['.build', 'BUILD', 'tools'] - - for ignored_dir in ignored_dirs: - if ignored_dir in dirs: - dirs.remove(ignored_dir) - - if 'targets.json' in files: - targets_json_path.append(os.path.join(root, 'targets.json')) - - if not targets_json_path: - gt_logger.gt_log_warn("No targets.json files found, using default target properties") - - for targets_path in targets_json_path: - try: + for targets_path in _find_targets_json(os.getcwd()): + with suppress(IOError, ValueError): with open(targets_path, 'r') as f: targets = json.load(f) - - # Load property from targets.json - result = get_platform_property_from_targets(targets, platform, property) - - # If a valid property was found, stop looking + result = _platform_property_from_targets_json(targets, platform, property, default) if result: - break - except Exception: - continue - return result + return result diff --git a/setup.py b/setup.py index e98e109..0734dfe 100644 --- a/setup.py +++ b/setup.py @@ -50,13 +50,15 @@ setup(name='mbed-greentea', license=LICENSE, test_suite = 'test', entry_points={ - "console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main",], + "console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main",], }, install_requires=["PrettyTable>=0.7.2", - "PySerial>=3.0", - "mbed-host-tests>=1.2.0", - "mbed-ls>=1.2.15", - "junit-xml", - "lockfile", - "mock", - "colorama>=0.3,<0.4"]) + "PySerial>=3.0", + "mbed-host-tests>=1.2.0", + "mbed-ls>=1.2.15", + "junit-xml", + "lockfile", + "mock", + "six", + "colorama>=0.3,<0.4"]) +
ARMmbed/greentea
b8bcffbb7aaced094f252a4ddfe930e8237fb484
diff --git a/test/mbed_gt_target_info.py b/test/mbed_gt_target_info.py index e3f0a6a..96cd1db 100644 --- a/test/mbed_gt_target_info.py +++ b/test/mbed_gt_target_info.py @@ -21,6 +21,8 @@ import shutil import tempfile import unittest +from six import StringIO + from mock import patch from mbed_greentea import mbed_target_info @@ -338,8 +340,168 @@ mbed-gcc 1.1.0 result = mbed_target_info.add_target_info_mapping("null") - def test_get_platform_property_from_targets(self): - result = mbed_target_info.get_platform_property_from_targets({}, {}) + def test_get_platform_property_from_targets_no_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find: + _find.return_value = iter([]) + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_no_file(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.side_effect = IOError + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_invalid_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{") + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_empty_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{}") + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_no_value(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{\"K64F\": {}}") + result = mbed_target_info._get_platform_property_from_targets("K64F", "not_a_property", "default") + self.assertEqual(result, "default") + + def test_get_platform_property_from_targets_in_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{\"K64F\": {\"copy_method\": \"cp\"}}") + result = mbed_target_info._get_platform_property_from_targets("K64F", "copy_method", "default") + self.assertEqual("cp", result) + + def test_find_targets_json(self): + with patch("mbed_greentea.mbed_target_info.walk") as _walk: + _walk.return_value = iter([("", ["foo"], []), ("foo", [], ["targets.json"])]) + result = list(mbed_target_info._find_targets_json("bogus_path")) + self.assertEqual(result, ["foo/targets.json"]) + + def test_find_targets_json_ignored(self): + with patch("mbed_greentea.mbed_target_info.walk") as _walk: + walk_result =[("", [".build"], [])] + _walk.return_value = iter(walk_result) + result = list(mbed_target_info._find_targets_json("bogus_path")) + self.assertEqual(result, []) + self.assertEqual(walk_result, [("", [], [])]) + + def test_platform_property_from_targets_json_empty(self): + result = mbed_target_info._platform_property_from_targets_json( + {}, "not_a_target", "not_a_property", "default" + ) + self.assertIsNone(result) + + def test_platform_property_from_targets_json_base_target(self): + result = mbed_target_info._platform_property_from_targets_json( + {"K64F": {"copy_method": "cp"}}, "K64F", "copy_method", "default" + ) + self.assertEqual(result, "cp") + + def test_platform_property_from_targets_json_inherits(self): + result = mbed_target_info._platform_property_from_targets_json( + {"K64F": {"inherits": ["Target"]}, "Target": {"copy_method": "cp"}}, + "K64F", "copy_method", "default" + ) + self.assertEqual(result, "cp") + + def test_platform_property_from_default_missing(self): + result = mbed_target_info._get_platform_property_from_default("not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_default(self): + result = mbed_target_info._get_platform_property_from_default("copy_method") + self.assertEqual(result, "default") + + def test_platform_property_from_info_mapping_bad_platform(self): + result = mbed_target_info._get_platform_property_from_info_mapping("not_a_platform", "not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_info_mapping_missing(self): + result = mbed_target_info._get_platform_property_from_info_mapping("K64F", "not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_info_mapping(self): + result = mbed_target_info._get_platform_property_from_info_mapping("K64F", "copy_method") + self.assertEqual(result, "default") + + + # The following test cases are taken from this table: + # + # Num | In targets.json | In yotta blob | In Default | property used + # --- | --------------- | ------------- | ---------- | -------------- + # 1 | Yes | No | Yes |`targets.json` + # 2 | Yes | Yes | Yes |`targets.json` + # 3 | No | Yes | Yes | yotta blob + # 4 | No | No | Yes | default + # 5 | No | No | No | None + # 6 | Yes | No | No |`targets.json` + # 7 | Yes | Yes | No |`targets.json` + # 8 | No | Yes | No | yotta blob + def test_platform_property(self): + """Test that platform_property picks the property value preserving + the following priority relationship: + targets.json > yotta blob > default + """ + with patch("mbed_greentea.mbed_target_info._get_platform_property_from_targets") as _targets,\ + patch("mbed_greentea.mbed_target_info._get_platform_property_from_info_mapping") as _info_mapping,\ + patch("mbed_greentea.mbed_target_info._get_platform_property_from_default") as _default: + # 1 + _targets.return_value = "targets" + _info_mapping.return_value = None + _default.return_value = "default" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 2 + _info_mapping.return_value = "yotta" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 3 + _targets.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "yotta") + # 4 + _info_mapping.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "default") + # 5 + _default.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + None) + # 6 + _targets.return_value = "targets" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 7 + _info_mapping.return_value = "yotta" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 8 + _targets.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "yotta") def test_parse_yotta_json_for_build_name(self):
Target property priority incorrect Currently we have priority as follows: ``` internal yotta blob > targets.json > tool default ``` This is a bug. Instead the priority should be: ``` targets.json /w default > internal yotta blob > tool delaut ``` This implies a few test cases: In targets.json | In yotta blob | property used | Currently Works ---------------------- | ------------- | ---------------- | --------------- Yes, with property | No | `targets.json` | Yes Yes, without property| No | default | Yes Yes, with property | Yes | `targets.json` | No Yes, without property | Yes | default | No No | No | default | Yes No | Yes | yotta blob | Yes @bridadan Is this the issue masked by #248?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_inherits", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_invalid_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_in_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_base_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_bad_platform", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_file", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json_ignored", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_empty_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_value", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_empty", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default" ]
[ "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_keywords", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_failed_open", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_valid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_add_target_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_json_for_build_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_multiple", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_target_from_current_dir_ok", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_json_data", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text_2", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_chars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_version", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_valid", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_with_ssl_errors", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_fail", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_text" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2017-10-20T19:13:58Z"
apache-2.0
ARMmbed__greentea-263
diff --git a/mbed_greentea/mbed_greentea_cli.py b/mbed_greentea/mbed_greentea_cli.py index f6a13c4..446b965 100644 --- a/mbed_greentea/mbed_greentea_cli.py +++ b/mbed_greentea/mbed_greentea_cli.py @@ -23,6 +23,7 @@ import os import sys import random import optparse +import fnmatch from time import time try: from Queue import Queue @@ -119,18 +120,6 @@ def create_filtered_test_list(ctest_test_list, test_by_names, skip_test, test_sp @return """ - def filter_names_by_prefix(test_case_name_list, prefix_name): - """! - @param test_case_name_list List of all test cases - @param prefix_name Prefix of test name we are looking for - @result Set with names of test names starting with 'prefix_name' - """ - result = list() - for test_name in test_case_name_list: - if test_name.startswith(prefix_name): - result.append(test_name) - return sorted(result) - filtered_ctest_test_list = ctest_test_list test_list = None invalid_test_names = [] @@ -143,17 +132,15 @@ def create_filtered_test_list(ctest_test_list, test_by_names, skip_test, test_sp gt_logger.gt_log("test case filter (specified with -n option)") for test_name in set(test_list): - if test_name.endswith('*'): - # This 'star-sufix' filter allows users to filter tests with fixed prefixes - # Example: -n 'TESTS-mbed_drivers* will filter all test cases with name starting with 'TESTS-mbed_drivers' - for test_name_filtered in filter_names_by_prefix(ctest_test_list.keys(), test_name[:-1]): - gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(test_name_filtered)) - filtered_ctest_test_list[test_name_filtered] = ctest_test_list[test_name_filtered] - elif test_name not in ctest_test_list: - invalid_test_names.append(test_name) + gt_logger.gt_log_tab(test_name) + matches = [test for test in ctest_test_list.keys() if fnmatch.fnmatch(test, test_name)] + gt_logger.gt_log_tab(str(ctest_test_list)) + if matches: + for match in matches: + gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(match)) + filtered_ctest_test_list[match] = ctest_test_list[match] else: - gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(test_name)) - filtered_ctest_test_list[test_name] = ctest_test_list[test_name] + invalid_test_names.append(test_name) if skip_test: test_list = skip_test.split(',')
ARMmbed/greentea
68508c5f4d7cf0635c75399d0ff7cfa896fdf2cc
diff --git a/test/mbed_gt_cli.py b/test/mbed_gt_cli.py index 0646c20..8f4a1eb 100644 --- a/test/mbed_gt_cli.py +++ b/test/mbed_gt_cli.py @@ -21,6 +21,36 @@ import sys import unittest from mbed_greentea import mbed_greentea_cli +from mbed_greentea.tests_spec import TestSpec + +test_spec_def = { + "builds": { + "K64F-ARM": { + "platform": "K64F", + "toolchain": "ARM", + "base_path": "./.build/K64F/ARM", + "baud_rate": 115200, + "tests": { + "mbed-drivers-test-generic_tests":{ + "binaries":[ + { + "binary_type": "bootable", + "path": "./.build/K64F/ARM/mbed-drivers-test-generic_tests.bin" + } + ] + }, + "mbed-drivers-test-c_strings":{ + "binaries":[ + { + "binary_type": "bootable", + "path": "./.build/K64F/ARM/mbed-drivers-test-c_strings.bin" + } + ] + } + } + } + } +} class GreenteaCliFunctionality(unittest.TestCase): @@ -86,5 +116,36 @@ class GreenteaCliFunctionality(unittest.TestCase): os.chdir(curr_dir) shutil.rmtree(test1_dir) + def test_create_filtered_test_list(self): + test_spec = TestSpec() + test_spec.parse(test_spec_def) + test_build = test_spec.get_test_builds()[0] + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + 'mbed-drivers-test-generic_*', + None, + test_spec=test_spec) + self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-generic_tests'])) + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + '*_strings', + None, + test_spec=test_spec) + self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-c_strings'])) + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + 'mbed*s', + None, + test_spec=test_spec) + expected = set(['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests']) + self.assertEqual(set(test_list.keys()), expected) + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + '*-drivers-*', + None, + test_spec=test_spec) + expected = set(['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests']) + self.assertEqual(set(test_list.keys()), expected) + if __name__ == '__main__': unittest.main() diff --git a/test/mbed_gt_target_info.py b/test/mbed_gt_target_info.py index e630e7b..a12ba09 100644 --- a/test/mbed_gt_target_info.py +++ b/test/mbed_gt_target_info.py @@ -416,7 +416,7 @@ mbed-gcc 1.1.0 with patch("mbed_greentea.mbed_target_info.walk") as _walk: _walk.return_value = iter([("", ["foo"], []), ("foo", [], ["targets.json"])]) result = list(mbed_target_info._find_targets_json("bogus_path")) - self.assertEqual(result, ["foo/targets.json"]) + self.assertEqual(result, [os.path.join("foo", "targets.json")]) def test_find_targets_json_ignored(self): with patch("mbed_greentea.mbed_target_info.walk") as _walk:
Test names are not correctly globbed Test names only respect a wildcard that is placed at the end of the string. Ex. "mbed-os-*". However, it does not respect the wildcard anywhere else. Ex. "*-timer" The build tools accept these wildcards, so greentea should as well. This is the line responsible: https://github.com/ARMmbed/greentea/blob/32b95b44be653c3db527c02e1c5e1ffdc7d37f6f/mbed_greentea/mbed_greentea_cli.py#L146 Should be switched to `fnmatch`. (This is mostly a note to myself to fix it)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_create_filtered_test_list" ]
[ "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_invalid_path", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_hello_string", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_default_path", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_valid_path", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_greentea_version", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_print_version", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json_ignored", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_value", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_add_target_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_chars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_json_for_build_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_fail", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_multiple", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_version", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_valid", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_keywords", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_with_ssl_errors", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_no_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_invalid_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_inherits", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_failed_open", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_valid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_json_data", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_empty", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_bad_platform", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text_2", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_no_keywords", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_file", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_empty_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_in_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_target_from_current_dir_ok", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_base_target" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2018-02-15T17:29:56Z"
apache-2.0
ARMmbed__mbed-tools-138
diff --git a/news/20201201142709.bugfix b/news/20201201142709.bugfix new file mode 100644 index 0000000..0468f3e --- /dev/null +++ b/news/20201201142709.bugfix @@ -0,0 +1,1 @@ +Fix bug where we failed to handle config options that contain quotes (#125) diff --git a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl index e4820af..08ccced 100644 --- a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl +++ b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl @@ -65,7 +65,7 @@ set(MBED_CONFIG_DEFINITIONS # options {% for option in options -%} {% if option.value is not none -%} - {%if '{' in option.value|string or '(' in option.value|string %}"{% endif %}-D{{option.macro_name}}={{option.value}}{% if '}' in option.value|string or ')' in option.value|string %}"{% endif %} + "-D{{option.macro_name}}={{option.value|replace("\"", "\\\"")}}" {% endif %} {%- endfor %} # macros
ARMmbed/mbed-tools
94a3bd761d6ab3305c81da93517767aafff58d7e
diff --git a/tests/build/_internal/test_cmake_file.py b/tests/build/_internal/test_cmake_file.py index 1f59cb3..b0247a8 100644 --- a/tests/build/_internal/test_cmake_file.py +++ b/tests/build/_internal/test_cmake_file.py @@ -2,67 +2,69 @@ # Copyright (C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -from unittest import TestCase +import pytest -from tests.build._internal.config.factories import ConfigFactory +from tests.build._internal.config.factories import ConfigFactory, SourceFactory from mbed_tools.build._internal.cmake_file import generate_mbed_config_cmake_file, _render_mbed_config_cmake_template +from mbed_tools.build._internal.config.config import _create_config_option -class TestGenerateCMakeListsFile(TestCase): - def test_correct_arguments_passed(self): - target = dict() - target["labels"] = ["foo"] - target["extra_labels"] = ["morefoo"] - target["features"] = ["bar"] - target["components"] = ["baz"] - target["macros"] = ["macbaz"] - target["device_has"] = ["stuff"] - target["c_lib"] = ["c_lib"] - target["core"] = ["core"] - target["printf_lib"] = ["printf_lib"] - target["supported_form_factors"] = ["arduino"] +TOOLCHAIN_NAME = "gcc" + + +@pytest.fixture() +def fake_target(): + return { + "labels": ["foo"], + "extra_labels": ["morefoo"], + "features": ["bar"], + "components": ["baz"], + "macros": ["macbaz"], + "device_has": ["stuff"], + "c_lib": ["c_lib"], + "core": ["core"], + "printf_lib": ["printf_lib"], + "supported_form_factors": ["arduino"], + "supported_c_libs": {TOOLCHAIN_NAME: ["ginormous"]}, + "supported_application_profiles": ["full", "bare-metal"], + } + + +class TestGenerateCMakeListsFile: + def test_correct_arguments_passed(self, fake_target): config = ConfigFactory() mbed_target = "K64F" - toolchain_name = "GCC" - target["supported_c_libs"] = {toolchain_name.lower(): ["small", "std"]} - target["supported_application_profiles"] = ["full", "bare-metal"] - - result = generate_mbed_config_cmake_file(mbed_target, target, config, toolchain_name) - - self.assertEqual( - result, _render_mbed_config_cmake_template(target, config, toolchain_name, mbed_target,), - ) - - -class TestRendersCMakeListsFile(TestCase): - def test_returns_rendered_content(self): - target = dict() - target["labels"] = ["foo"] - target["extra_labels"] = ["morefoo"] - target["features"] = ["bar"] - target["components"] = ["baz"] - target["macros"] = ["macbaz"] - target["device_has"] = ["stuff"] - target["core"] = ["core"] - target["c_lib"] = ["c_lib"] - target["printf_lib"] = ["printf_lib"] - target["supported_form_factors"] = ["arduino"] + + result = generate_mbed_config_cmake_file(mbed_target, fake_target, config, TOOLCHAIN_NAME) + + assert result == _render_mbed_config_cmake_template(fake_target, config, TOOLCHAIN_NAME, mbed_target,) + + +class TestRendersCMakeListsFile: + def test_returns_rendered_content(self, fake_target): config = ConfigFactory() - toolchain_name = "baz" - target["supported_c_libs"] = {toolchain_name.lower(): ["small", "std"]} - target["supported_application_profiles"] = ["full", "bare-metal"] - result = _render_mbed_config_cmake_template(target, config, toolchain_name, "target_name") + result = _render_mbed_config_cmake_template(fake_target, config, TOOLCHAIN_NAME, "target_name") - for label in target["labels"] + target["extra_labels"]: - self.assertIn(label, result) + for label in fake_target["labels"] + fake_target["extra_labels"]: + assert label in result - for macro in target["features"] + target["components"] + [toolchain_name]: - self.assertIn(macro, result) + for macro in fake_target["features"] + fake_target["components"] + [TOOLCHAIN_NAME]: + assert macro in result - for toolchain in target["supported_c_libs"]: - self.assertIn(toolchain, result) + for toolchain in fake_target["supported_c_libs"]: + assert toolchain in result for supported_c_libs in toolchain: - self.assertIn(supported_c_libs, result) + assert supported_c_libs in result + + for supported_application_profiles in fake_target["supported_application_profiles"]: + assert supported_application_profiles in result + + def test_returns_quoted_content(self, fake_target): + config = ConfigFactory() + source = SourceFactory() + + # Add an option whose value contains quotes to the config. + _create_config_option(config, "iotc-mqtt-host", '{"mqtt.2030.ltsapis.goog", IOTC_MQTT_PORT}', source) - for supported_application_profiles in target["supported_application_profiles"]: - self.assertIn(supported_application_profiles, result) + result = _render_mbed_config_cmake_template(fake_target, config, TOOLCHAIN_NAME, "target_name") + assert '"-DMBED_CONF_IOTC_MQTT_HOST={\\"mqtt.2030.ltsapis.goog\\", IOTC_MQTT_PORT}"' in result
mbed-tools fails to handle config options that contain quotes ### Description From @rajkan01: For the below mbed_lib.json config ``` "iotc-mqtt-host": { "help": "IOTC MQTT host configuration. Defaults to mqtt.2030.ltsapis.goog host and port number 8883 if undefined", "value": "{\"mqtt.2030.ltsapis.goog\", IOTC_MQTT_PORT}", "macro_name": "IOTC_MQTT_HOST" } ``` Mbedtools is generating `"-DIOTC_MQTT_HOST={"mqtt.2030.ltsapis.goog", IOTC_MQTT_PORT}"` config starts double-quotes from -D itself, and CMake prepossessing time this macro gets divided into multiple #define like below because of this define begin with double-quotes and also the value ("mqtt.2030.ltsapis.goog") with double-quote consider to be a string ``` #define IOTC_MQTT_HOST { #define mqtt .2030.ltsapis.goog, IOTC_MQTT_PORT} 1 ``` Could someone check this, why is the mbedtools generating macros starts with double-quotes which include `-D` and fix. I've attached `main.ii` and `mbed_config.cmake` [mbed_config.zip](https://github.com/ARMmbed/mbed-tools/files/5602300/mbed_config.zip) ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [ ] Enhancement - [X] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/_internal/test_cmake_file.py::TestRendersCMakeListsFile::test_returns_quoted_content" ]
[ "tests/build/_internal/test_cmake_file.py::TestGenerateCMakeListsFile::test_correct_arguments_passed", "tests/build/_internal/test_cmake_file.py::TestRendersCMakeListsFile::test_returns_rendered_content" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2020-12-01T14:33:04Z"
apache-2.0
ARMmbed__mbed-tools-154
diff --git a/README.md b/README.md index fdd2e05..eff3449 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,10 @@ follows: ## Installation +`mbed-tools` relies on the Ninja build system and CMake. +- CMake. [Install version 3.19.0 or newer for all operating systems](https://cmake.org/install/). +- Ninja. [Install version 1.0 or newer for all operating systems](https://github.com/ninja-build/ninja/wiki/Pre-built-Ninja-packages). + We recommend installing `mbed-tools` in a Python virtual environment to avoid dependency conflicts. To install the most recent production quality release use: diff --git a/news/20201210131204.bugfix b/news/20201210131204.bugfix new file mode 100644 index 0000000..65ae014 --- /dev/null +++ b/news/20201210131204.bugfix @@ -0,0 +1,1 @@ +Emit more useful error messages if CMake or Ninja aren't found in PATH. diff --git a/src/mbed_tools/build/build.py b/src/mbed_tools/build/build.py index 66822bc..2334bc4 100644 --- a/src/mbed_tools/build/build.py +++ b/src/mbed_tools/build/build.py @@ -22,6 +22,7 @@ def build_project(build_dir: pathlib.Path, target: Optional[str] = None) -> None build_dir: Path to the CMake build tree. target: The CMake target to build (e.g 'install') """ + _check_ninja_found() target_flag = ["--target", target] if target is not None else [] _cmake_wrapper("--build", str(build_dir), *target_flag) @@ -34,6 +35,7 @@ def generate_build_system(source_dir: pathlib.Path, build_dir: pathlib.Path, pro build_dir: Path to the CMake build tree. profile: The Mbed build profile (develop, debug or release). """ + _check_ninja_found() _cmake_wrapper("-S", str(source_dir), "-B", str(build_dir), "-GNinja", f"-DCMAKE_BUILD_TYPE={profile}") @@ -41,5 +43,16 @@ def _cmake_wrapper(*cmake_args: str) -> None: try: logger.debug("Running CMake with args: %s", cmake_args) subprocess.run(["cmake", *cmake_args], check=True) + except FileNotFoundError: + raise MbedBuildError("Could not find CMake. Please ensure CMake is installed and added to PATH.") except subprocess.CalledProcessError: raise MbedBuildError("CMake invocation failed!") + + +def _check_ninja_found() -> None: + try: + subprocess.run(["ninja", "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except FileNotFoundError: + raise MbedBuildError( + "Could not find the 'Ninja' build program. Please ensure 'Ninja' is installed and added to PATH." + )
ARMmbed/mbed-tools
9d6b2c71a7ddc93bd71279482a7572cac30ed745
diff --git a/tests/build/test_build.py b/tests/build/test_build.py index b9d32af..5293966 100644 --- a/tests/build/test_build.py +++ b/tests/build/test_build.py @@ -2,45 +2,60 @@ # Copyright (C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -import pathlib +import subprocess -from tempfile import TemporaryDirectory -from unittest import TestCase, mock +from unittest import mock + +import pytest from mbed_tools.build.build import build_project, generate_build_system from mbed_tools.build.exceptions import MbedBuildError -class TestBuildProject(TestCase): - @mock.patch("mbed_tools.build.build._cmake_wrapper") - def test_invokes_cmake_with_correct_args(self, cmake_wrapper): +@pytest.fixture +def subprocess_run(): + with mock.patch("mbed_tools.build.build.subprocess.run", autospec=True) as subproc: + yield subproc + + +class TestBuildProject: + def test_invokes_cmake_with_correct_args(self, subprocess_run): build_project(build_dir="cmake_build", target="install") - cmake_wrapper.assert_called_once_with("--build", "cmake_build", "--target", "install") + subprocess_run.assert_called_with(["cmake", "--build", "cmake_build", "--target", "install"], check=True) - @mock.patch("mbed_tools.build.build._cmake_wrapper") - def test_invokes_cmake_with_correct_args_if_no_target_passed(self, cmake_wrapper): + def test_invokes_cmake_with_correct_args_if_no_target_passed(self, subprocess_run): build_project(build_dir="cmake_build") - cmake_wrapper.assert_called_once_with("--build", "cmake_build") + subprocess_run.assert_called_with(["cmake", "--build", "cmake_build"], check=True) - def test_raises_build_error_if_build_dir_doesnt_exist(self): - with TemporaryDirectory() as tmp_dir: - nonexistent_build_dir = pathlib.Path(tmp_dir, "cmake_build") + def test_raises_build_error_if_cmake_invocation_fails(self, subprocess_run): + subprocess_run.side_effect = (None, subprocess.CalledProcessError(1, "")) - with self.assertRaises(MbedBuildError): - build_project(nonexistent_build_dir) + with pytest.raises(MbedBuildError, match="CMake invocation failed"): + build_project(build_dir="cmake_build") -@mock.patch("mbed_tools.build.build._cmake_wrapper") -class TestConfigureProject(TestCase): - def test_invokes_cmake_with_correct_args(self, cmake_wrapper): +class TestConfigureProject: + def test_invokes_cmake_with_correct_args(self, subprocess_run): source_dir = "source_dir" build_dir = "cmake_build" profile = "debug" generate_build_system(source_dir, build_dir, profile) - cmake_wrapper.assert_called_once_with( - "-S", source_dir, "-B", build_dir, "-GNinja", f"-DCMAKE_BUILD_TYPE={profile}" + subprocess_run.assert_called_with( + ["cmake", "-S", source_dir, "-B", build_dir, "-GNinja", f"-DCMAKE_BUILD_TYPE={profile}"], check=True ) + + def test_raises_when_ninja_cannot_be_found(self, subprocess_run): + subprocess_run.side_effect = FileNotFoundError + + with pytest.raises(MbedBuildError, match="Ninja"): + generate_build_system("", "", "") + + def test_raises_when_cmake_cannot_be_found(self, subprocess_run): + subprocess_run.side_effect = (None, FileNotFoundError) + + with pytest.raises(MbedBuildError, match="Could not find CMake"): + generate_build_system("", "", "")
README.md : miss cmake and ninja information ### Description Hi This morning, I spent some time on a new PC to install this new mbed tool, This was not working, and I got several not friendly messages... ... till I remembered that I didn't install cmake yet... So my request would be: - to update tools when cmake is not installed with some friendly message "please install cmake" - same for ninja - to update README.md to add information how to install cmake and ninja Thx @0xc0170 @MarceloSalazar @JeanMarcR ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [x] Enhancement - [ ] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/test_build.py::TestBuildProject::test_raises_build_error_if_cmake_invocation_fails", "tests/build/test_build.py::TestConfigureProject::test_raises_when_ninja_cannot_be_found", "tests/build/test_build.py::TestConfigureProject::test_raises_when_cmake_cannot_be_found" ]
[ "tests/build/test_build.py::TestBuildProject::test_invokes_cmake_with_correct_args", "tests/build/test_build.py::TestBuildProject::test_invokes_cmake_with_correct_args_if_no_target_passed", "tests/build/test_build.py::TestConfigureProject::test_invokes_cmake_with_correct_args" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2020-12-10T13:15:11Z"
apache-2.0
ARMmbed__mbed-tools-190
diff --git a/news/169.bugfix b/news/169.bugfix new file mode 100644 index 0000000..78b6135 --- /dev/null +++ b/news/169.bugfix @@ -0,0 +1,1 @@ +Support use of user@host:directory syntax with the import subcommand. diff --git a/src/mbed_tools/project/mbed_program.py b/src/mbed_tools/project/mbed_program.py index d095e5b..c3a9536 100644 --- a/src/mbed_tools/project/mbed_program.py +++ b/src/mbed_tools/project/mbed_program.py @@ -113,6 +113,9 @@ def parse_url(name_or_url: str) -> Dict[str, str]: url_obj = urlparse(name_or_url) if url_obj.hostname: url = url_obj.geturl() + elif ":" in name_or_url.split("/", maxsplit=1)[0]: + # If non-standard and no slashes before first colon, git will recognize as scp ssh syntax + url = name_or_url else: url = f"https://github.com/armmbed/{url_obj.path}" # We need to create a valid directory name from the url path section.
ARMmbed/mbed-tools
d4dd48ce58952851f9cb2a9e98b0f788a61a23a3
diff --git a/tests/project/test_mbed_program.py b/tests/project/test_mbed_program.py index 7f700f0..be83aa9 100644 --- a/tests/project/test_mbed_program.py +++ b/tests/project/test_mbed_program.py @@ -127,6 +127,12 @@ class TestParseURL(TestCase): self.assertEqual(data["url"], url) self.assertEqual(data["dst_path"], "mbed-os-example-numskull") + def test_creates_valid_dst_dir_from_ssh_url(self): + url = "git@superversioncontrol:superorg/mbed-os-example-numskull" + data = parse_url(url) + self.assertEqual(data["url"], url) + self.assertEqual(data["dst_path"], "mbed-os-example-numskull") + class TestFindProgramRoot(TestCase): @patchfs
mbed-tools import fails to import an example with ssh url ### Description <!-- A detailed description of what is being reported. Please include steps to reproduce the problem. Things to consider sharing: - What version of the package is being used (pip show mbed-tools)? - What is the host platform and version (e.g. macOS 10.15.2, Windows 10, Ubuntu 18.04 LTS)? --> mbed-tools version: **5.0.0** Command: `mbed-tools -vv import git@github.com:ARMmbed/mbed-os-example-blinky.git` Expected: mbed-os-example-blinky example cloned onto a local machine. Output: ``` Cloning Mbed program 'git@github.com:ARMmbed/mbed-os-example-blinky.git' Resolving program library dependencies. ERROR: Cloning git repository from url 'https://github.com/armmbed/git@github.com:ARMmbed/mbed-os-example-blinky.git' failed. Error from VCS: Cmd('git') failed due to: exit code(128) cmdline: git clone --progress -v https://github.com/armmbed/git@github.com:ARMmbed/mbed-os-example-blinky.git mbed-os-example-blinky.git More information may be available by using the command line option '-vvv'. ``` ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [ ] Enhancement - [x] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/project/test_mbed_program.py::TestParseURL::test_creates_valid_dst_dir_from_ssh_url" ]
[ "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_raises_if_no_mbed_os_dir_found_and_check_mbed_os_is_true", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_raises_if_path_is_not_a_program", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_returns_valid_program", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_with_mbed_os_path_returns_valid_program", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_generates_valid_program_creating_directory", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_generates_valid_program_creating_directory_in_cwd", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_generates_valid_program_existing_directory", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_raises_if_path_is_existing_program", "tests/project/test_mbed_program.py::TestParseURL::test_creates_url_and_dst_dir_from_name", "tests/project/test_mbed_program.py::TestParseURL::test_creates_valid_dst_dir_from_url", "tests/project/test_mbed_program.py::TestFindProgramRoot::test_finds_program_at_current_path", "tests/project/test_mbed_program.py::TestFindProgramRoot::test_finds_program_higher_in_dir_tree", "tests/project/test_mbed_program.py::TestFindProgramRoot::test_raises_if_no_program_found" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2021-02-15T13:43:30Z"
apache-2.0
ARMmbed__mbed-tools-196
diff --git a/news/20210218112043.bugfix b/news/20210218112043.bugfix new file mode 100644 index 0000000..f595eac --- /dev/null +++ b/news/20210218112043.bugfix @@ -0,0 +1,1 @@ +Raise a nicer error from mbed-tools detect when running on an unrecognised OS. diff --git a/src/mbed_tools/devices/_internal/detect_candidate_devices.py b/src/mbed_tools/devices/_internal/detect_candidate_devices.py index 7078c4f..418cbc2 100644 --- a/src/mbed_tools/devices/_internal/detect_candidate_devices.py +++ b/src/mbed_tools/devices/_internal/detect_candidate_devices.py @@ -8,6 +8,7 @@ from typing import Iterable from mbed_tools.devices._internal.candidate_device import CandidateDevice from mbed_tools.devices._internal.base_detector import DeviceDetector +from mbed_tools.devices.exceptions import UnknownOSError def detect_candidate_devices() -> Iterable[CandidateDevice]: @@ -26,7 +27,12 @@ def _get_detector_for_current_os() -> DeviceDetector: from mbed_tools.devices._internal.linux.device_detector import LinuxDeviceDetector return LinuxDeviceDetector() - else: + if platform.system() == "Darwin": from mbed_tools.devices._internal.darwin.device_detector import DarwinDeviceDetector return DarwinDeviceDetector() + + raise UnknownOSError( + f"We have detected the OS you are running is '{platform.system()}'. " + "Unfortunately we haven't implemented device detection support for this OS yet. Sorry!" + ) diff --git a/src/mbed_tools/devices/exceptions.py b/src/mbed_tools/devices/exceptions.py index 4763b88..570941d 100644 --- a/src/mbed_tools/devices/exceptions.py +++ b/src/mbed_tools/devices/exceptions.py @@ -16,3 +16,7 @@ class DeviceLookupFailed(MbedDevicesError): class NoDevicesFound(MbedDevicesError): """No Mbed Enabled devices were found.""" + + +class UnknownOSError(MbedDevicesError): + """The current OS is not supported."""
ARMmbed/mbed-tools
fe56531239ba0a1cbe2ce696f00f9b58889f05bc
diff --git a/tests/devices/_internal/test_detect_candidate_devices.py b/tests/devices/_internal/test_detect_candidate_devices.py index 75c5032..74137b3 100644 --- a/tests/devices/_internal/test_detect_candidate_devices.py +++ b/tests/devices/_internal/test_detect_candidate_devices.py @@ -2,39 +2,49 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -from unittest import TestCase, mock +import pytest +from unittest import mock from tests.devices.markers import windows_only, darwin_only, linux_only from mbed_tools.devices._internal.base_detector import DeviceDetector +from mbed_tools.devices.exceptions import UnknownOSError from mbed_tools.devices._internal.detect_candidate_devices import ( detect_candidate_devices, _get_detector_for_current_os, ) -class TestDetectCandidateDevices(TestCase): +class TestDetectCandidateDevices: @mock.patch("mbed_tools.devices._internal.detect_candidate_devices._get_detector_for_current_os") def test_returns_candidates_using_os_specific_detector(self, _get_detector_for_current_os): detector = mock.Mock(spec_set=DeviceDetector) _get_detector_for_current_os.return_value = detector - self.assertEqual(detect_candidate_devices(), detector.find_candidates.return_value) + assert detect_candidate_devices() == detector.find_candidates.return_value -class TestGetDetectorForCurrentOS(TestCase): +class TestGetDetectorForCurrentOS: @windows_only def test_windows_uses_correct_module(self): from mbed_tools.devices._internal.windows.device_detector import WindowsDeviceDetector - self.assertIsInstance(_get_detector_for_current_os(), WindowsDeviceDetector) + assert isinstance(_get_detector_for_current_os(), WindowsDeviceDetector) @darwin_only def test_darwin_uses_correct_module(self): from mbed_tools.devices._internal.darwin.device_detector import DarwinDeviceDetector - self.assertIsInstance(_get_detector_for_current_os(), DarwinDeviceDetector) + assert isinstance(_get_detector_for_current_os(), DarwinDeviceDetector) @linux_only def test_linux_uses_correct_module(self): from mbed_tools.devices._internal.linux.device_detector import LinuxDeviceDetector - self.assertIsInstance(_get_detector_for_current_os(), LinuxDeviceDetector) + assert isinstance(_get_detector_for_current_os(), LinuxDeviceDetector) + + @mock.patch("platform.system") + def test_raises_when_os_is_unknown(self, platform_system): + os_name = "SomethingNobodyUses" + platform_system.return_value = os_name + + with pytest.raises(UnknownOSError): + _get_detector_for_current_os()
BSD: `mbed-tools detect` causes Python stack trace from mbed_tools/devices ### Description Desired behavior: - `mbed-tools detect` lists out USB-connected boards, or a friendly message if it can't Actual behavior: - `mbed-tools detect` causes a Python stack trace to be printed ``` [mbedtools] patater@example.com:~ % mbed-tools detect Traceback (most recent call last): File "/home/patater/venvs/mbedtools/bin/mbed-tools", line 8, in <module> sys.exit(cli()) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/cli/main.py", line 38, in invoke super().invoke(context) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/cli/list_connected_devices.py", line 29, in list_connected_devices connected_devices = get_connected_devices() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/devices.py", line 32, in get_connected_devices for candidate_device in detect_candidate_devices(): File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/detect_candidate_devices.py", line 16, in detect_candidate_devices return detector.find_candidates() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/darwin/device_detector.py", line 40, in find_candidates usb_devices_data = system_profiler.get_end_usb_devices_data() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/darwin/system_profiler.py", line 42, in get_end_usb_devices_data data = get_all_usb_devices_data() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/darwin/system_profiler.py", line 34, in get_all_usb_devices_data output = subprocess.check_output(["system_profiler", "-xml", "SPUSBDataType"], stderr=subprocess.DEVNULL) File "/usr/local/lib/python3.7/subprocess.py", line 411, in check_output **kwargs).stdout File "/usr/local/lib/python3.7/subprocess.py", line 488, in run with Popen(*popenargs, **kwargs) as process: File "/usr/local/lib/python3.7/subprocess.py", line 800, in __init__ restore_signals, start_new_session) File "/usr/local/lib/python3.7/subprocess.py", line 1551, in _execute_child raise child_exception_type(errno_num, err_msg, err_filename) FileNotFoundError: [Errno 2] No such file or directory: 'system_profiler': 'system_profiler' ``` It looks like something called `darwin/device_detector.py` is being used, but my host OS is not Darwin or macOS. It's just your friendly, neighborhood FreeBSD. I'd guess the OS detection is not very sophisticated. ``` [mbedtools] patater@example.com:~ % mbed-tools --version 5.4.0 ``` ### Issue request type - [ ] Enhancement - [X] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/devices/_internal/test_detect_candidate_devices.py::TestDetectCandidateDevices::test_returns_candidates_using_os_specific_detector", "tests/devices/_internal/test_detect_candidate_devices.py::TestGetDetectorForCurrentOS::test_linux_uses_correct_module", "tests/devices/_internal/test_detect_candidate_devices.py::TestGetDetectorForCurrentOS::test_raises_when_os_is_unknown" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-02-18T11:23:15Z"
apache-2.0
ARMmbed__mbed-tools-270
diff --git a/news/222.bugfix b/news/222.bugfix new file mode 100644 index 0000000..4bb5df6 --- /dev/null +++ b/news/222.bugfix @@ -0,0 +1,1 @@ +Add support for MBED_ROM_START, MBED_ROM_SIZE, MBED_RAM_START and MBED_RAM_SIZE in config system. diff --git a/src/mbed_tools/build/_internal/config/config.py b/src/mbed_tools/build/_internal/config/config.py index d93cfe4..bb493f2 100644 --- a/src/mbed_tools/build/_internal/config/config.py +++ b/src/mbed_tools/build/_internal/config/config.py @@ -8,7 +8,7 @@ import logging from collections import UserDict from typing import Any, Iterable, Hashable, Callable, List -from mbed_tools.build._internal.config.source import Override, ConfigSetting +from mbed_tools.build._internal.config.source import Memory, Override, ConfigSetting logger = logging.getLogger(__name__) @@ -18,13 +18,15 @@ class Config(UserDict): This object understands how to populate the different 'config sections' which all have different rules for how the settings are collected. - Applies overrides, appends macros and updates config settings. + Applies overrides, appends macros, updates memories, and updates config settings. """ def __setitem__(self, key: Hashable, item: Any) -> None: """Set an item based on its key.""" if key == CONFIG_SECTION: self._update_config_section(item) + elif key == MEMORIES_SECTION: + self._update_memories_section(item) elif key == OVERRIDES_SECTION: self._handle_overrides(item) elif key == MACROS_SECTION: @@ -67,6 +69,20 @@ class Config(UserDict): self.data[CONFIG_SECTION] = self.data.get(CONFIG_SECTION, []) + config_settings + def _update_memories_section(self, memories: List[Memory]) -> None: + defined_memories = self.data.get(MEMORIES_SECTION, []) + for memory in memories: + logger.debug(f"Adding memory settings `{memory.name}: start={memory.start} size={memory.size}`") + prev_defined = next((mem for mem in defined_memories if mem.name == memory.name), None) + if prev_defined is None: + defined_memories.append(memory) + else: + logger.warning( + f"You are attempting to redefine `{memory.name}` from {prev_defined.namespace}.\n" + f"The values from `{memory.namespace}` will be ignored" + ) + self.data[MEMORIES_SECTION] = defined_memories + def _find_first_config_setting(self, predicate: Callable) -> Any: """Find first config setting based on `predicate`. @@ -89,6 +105,7 @@ class Config(UserDict): CONFIG_SECTION = "config" MACROS_SECTION = "macros" +MEMORIES_SECTION = "memories" OVERRIDES_SECTION = "overrides" diff --git a/src/mbed_tools/build/_internal/config/source.py b/src/mbed_tools/build/_internal/config/source.py index 4ad7e37..59d01df 100644 --- a/src/mbed_tools/build/_internal/config/source.py +++ b/src/mbed_tools/build/_internal/config/source.py @@ -28,8 +28,8 @@ def prepare( ) -> dict: """Prepare a config source for entry into the Config object. - Extracts config and override settings from the source. Flattens these nested dictionaries out into lists of - objects which are namespaced in the way the Mbed config system expects. + Extracts memory, config and override settings from the source. Flattens these nested dictionaries out into + lists of objects which are namespaced in the way the Mbed config system expects. Args: input_data: The raw config JSON object parsed from the config file. @@ -46,6 +46,11 @@ def prepare( for key in data: data[key] = _sanitise_value(data[key]) + memories = _extract_memories(namespace, data) + + if memories: + data["memories"] = memories + if "config" in data: data["config"] = _extract_config_settings(namespace, data["config"]) @@ -78,6 +83,31 @@ class ConfigSetting: self.value = _sanitise_value(self.value) +@dataclass +class Memory: + """Representation of a defined RAM/ROM region.""" + + name: str + namespace: str + start: str + size: str + + def __post_init__(self) -> None: + """Convert start and size to hex format strings.""" + try: + self.start = hex(int(self.start, 0)) + except ValueError: + raise ValueError( + f"Value of MBED_{self.name}_START in {self.namespace}, {self.start} is invalid: must be an integer" + ) + try: + self.size = hex(int(self.size, 0)) + except ValueError: + raise ValueError( + f"Value of MBED_{self.name}_SIZE in {self.namespace}, {self.size} is invalid: must be an integer" + ) + + @dataclass class Override: """Representation of a config override. @@ -128,6 +158,27 @@ def _extract_config_settings(namespace: str, config_data: dict) -> List[ConfigSe return settings +def _extract_memories(namespace: str, data: dict) -> List[Memory]: + memories = [] + for mem in ["rom", "ram"]: + start_attr = f"mbed_{mem}_start" + size_attr = f"mbed_{mem}_size" + start = data.get(start_attr) + size = data.get(size_attr) + + if size is not None and start is not None: + logger.debug(f"Extracting MBED_{mem.upper()} definitions in {namespace}: _START={start}, _SIZE={size}.") + + memory = Memory(mem.upper(), namespace, start, size) + memories.append(memory) + elif start is not None or size is not None: + raise ValueError( + f"{size_attr.upper()} and {start_attr.upper()} must be defined together. Only " + f"{'START' if start is not None else 'SIZE'} is defined in the lib {namespace}." + ) + return memories + + def _extract_target_overrides( namespace: str, override_data: dict, allowed_target_labels: Iterable[str] ) -> List[Override]: diff --git a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl index 8fb2119..7fadeb1 100644 --- a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl +++ b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl @@ -75,6 +75,10 @@ set(MBED_CONFIG_DEFINITIONS "-D{{setting_name}}={{value}}" {% endif -%} {%- endfor -%} +{% for memory in memories %} + "-DMBED_{{memory.name}}_START={{memory.start}}" + "-DMBED_{{memory.name}}_SIZE={{memory.size}}" +{%- endfor -%} {% for macro in macros %} "{{macro|replace("\"", "\\\"")}}" {%- endfor %}
ARMmbed/mbed-tools
73fc6ed6fd728beea588e100c2de83c439c29228
diff --git a/tests/build/_internal/config/test_config.py b/tests/build/_internal/config/test_config.py index 980ed4d..c7e2e35 100644 --- a/tests/build/_internal/config/test_config.py +++ b/tests/build/_internal/config/test_config.py @@ -2,10 +2,11 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # +import logging import pytest from mbed_tools.build._internal.config.config import Config -from mbed_tools.build._internal.config.source import prepare, ConfigSetting, Override +from mbed_tools.build._internal.config.source import prepare, ConfigSetting, Memory, Override class TestConfig: @@ -24,6 +25,17 @@ class TestConfig: with pytest.raises(ValueError, match="lib.param already defined"): conf.update(prepare({"config": {"param": {"value": 0}}}, source_name="lib")) + def test_logs_ignore_mbed_ram_repeated(self, caplog): + caplog.set_level(logging.DEBUG) + input_dict = {"mbed_ram_size": "0x80000", "mbed_ram_start": "0x24000000"} + input_dict2 = {"mbed_ram_size": "0x78000", "mbed_ram_start": "0x24200000"} + + conf = Config(prepare(input_dict, source_name="lib1")) + conf.update(prepare(input_dict2, source_name="lib2")) + + assert "values from `lib2` will be ignored" in caplog.text + assert conf["memories"] == [Memory("RAM", "lib1", "0x24000000", "0x80000")] + def test_target_overrides_handled(self): conf = Config( { diff --git a/tests/build/_internal/config/test_source.py b/tests/build/_internal/config/test_source.py index 962315a..b7f4a2a 100644 --- a/tests/build/_internal/config/test_source.py +++ b/tests/build/_internal/config/test_source.py @@ -2,8 +2,10 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # +import pytest + from mbed_tools.build._internal.config import source -from mbed_tools.build._internal.config.source import Override +from mbed_tools.build._internal.config.source import Memory, Override class TestPrepareSource: @@ -118,3 +120,48 @@ class TestPrepareSource: assert conf["config"][0].value == {"ETHERNET", "WIFI"} assert conf["sectors"] == {0, 2048} assert conf["header_info"] == {0, 2048, "bobbins", "magic"} + + def test_memory_attr_extracted(self): + lib = { + "mbed_ram_size": "0x80000", + "mbed_ram_start": "0x24000000", + "mbed_rom_size": "0x200000", + "mbed_rom_start": "0x08000000", + } + + conf = source.prepare(lib, "lib") + + assert Memory("RAM", "lib", "0x24000000", "0x80000") in conf["memories"] + assert Memory("ROM", "lib", "0x8000000", "0x200000") in conf["memories"] + + def test_memory_attr_converted_as_hex(self): + input_dict = {"mbed_ram_size": "1024", "mbed_ram_start": "0x24000000"} + + conf = source.prepare(input_dict, source_name="lib") + + memory, *_ = conf["memories"] + assert memory.size == "0x400" + + def test_raises_memory_size_not_integer(self): + input_dict = {"mbed_ram_size": "NOT INT", "mbed_ram_start": "0x24000000"} + + with pytest.raises(ValueError, match="_SIZE in lib, NOT INT is invalid: must be an integer"): + source.prepare(input_dict, "lib") + + def test_raises_memory_start_not_integer(self): + input_dict = {"mbed_ram_size": "0x80000", "mbed_ram_start": "NOT INT"} + + with pytest.raises(ValueError, match="_START in lib, NOT INT is invalid: must be an integer"): + source.prepare(input_dict, "lib") + + def test_raises_memory_size_defined_not_start(self): + input_dict = {"mbed_ram_size": "0x80000"} + + with pytest.raises(ValueError, match="Only SIZE is defined"): + source.prepare(input_dict) + + def test_raises_memory_start_defined_not_size(self): + input_dict = {"mbed_ram_start": "0x24000000"} + + with pytest.raises(ValueError, match="Only START is defined"): + source.prepare(input_dict)
MBED_ROM_START and friends unavailable on Mbed CLI2 ### Description <!-- A detailed description of what is being reported. Please include steps to reproduce the problem. Things to consider sharing: - What version of the package is being used (pip show mbed-tools)? - What is the host platform and version (e.g. macOS 10.15.2, Windows 10, Ubuntu 18.04 LTS)? --> On Mbed CLI, the following symbols are generated and passed to compiler, linker, or both: ```sh mbed compile -m NUMAKER_IOT_M487 -t ARM ``` **BUILD/NUMAKER_IOT_M487/ARM/.profile.c**: ``` { "flags": [ ...... "-DMBED_RAM_SIZE=0x28000", "-DMBED_RAM_START=0x20000000", "-DMBED_ROM_SIZE=0x80000", "-DMBED_ROM_START=0x0", ...... ``` **BUILD/NUMAKER_IOT_M487/ARM/.profile.ld**: ``` { "flags": [ ...... "--predefine=\"-DMBED_BOOT_STACK_SIZE=1024\"", "--predefine=\"-DMBED_RAM_SIZE=0x28000\"", "--predefine=\"-DMBED_RAM_START=0x20000000\"", "--predefine=\"-DMBED_ROM_SIZE=0x80000\"", "--predefine=\"-DMBED_ROM_START=0x0\"", ...... ``` But on Mbed CLI2, they are unavailable in `cmake_build/NUMAKER_IOT_M487/develop/ARM/mbed_config.cmake` or elsewhere. ```sh mbed-tools compile -m NUMAKER_IOT_M487 -t ARM ``` ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [ ] Enhancement - [x] Bug ### Mbed/Tool version **mbed-os**: 6.8.0 **mbed-cli**: 1.10.5 **mbed-tools**:: 7.2.1
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/_internal/config/test_config.py::TestConfig::test_target_overrides_separate_namespace", "tests/build/_internal/config/test_config.py::TestConfig::test_target_overrides_handled", "tests/build/_internal/config/test_config.py::TestConfig::test_macros_are_appended_to", "tests/build/_internal/config/test_config.py::TestConfig::test_raises_when_trying_to_add_duplicate_config_setting", "tests/build/_internal/config/test_config.py::TestConfig::test_warns_and_skips_override_for_undefined_config_parameter", "tests/build/_internal/config/test_config.py::TestConfig::test_config_updated", "tests/build/_internal/config/test_config.py::TestConfig::test_logs_ignore_mbed_ram_repeated", "tests/build/_internal/config/test_config.py::TestConfig::test_lib_overrides_handled", "tests/build/_internal/config/test_config.py::TestConfig::test_cumulative_fields_can_be_modified", "tests/build/_internal/config/test_config.py::TestConfig::test_ignores_present_option", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_size_defined_not_start", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_start_defined_not_size", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_memory_attr_extracted", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_override_fields_from_lib_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_start_not_integer", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_override_fields_from_target_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_target_overrides_only_collected_for_valid_targets", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_memory_attr_converted_as_hex", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_cumulative_fields_parsed", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_size_not_integer", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_config_fields_from_lib_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_config_fields_from_target_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_converts_config_setting_value_lists_to_sets" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2021-04-13T11:28:54Z"
apache-2.0
ARMmbed__mbed-tools-284
diff --git a/CHANGELOG.md b/CHANGELOG.md index e61e039..29a1296 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,15 @@ beta releases are not included in this history. For a full list of all releases, [//]: # (begin_release_notes) +7.16.0 (2021-05-26) +=================== + +Features +-------- + +- Targets modified: MultiTech mDot. (#20210526050235) + + 7.15.0 (2021-05-15) =================== diff --git a/news/20210524113403.bugfix b/news/20210524113403.bugfix new file mode 100644 index 0000000..2f178f5 --- /dev/null +++ b/news/20210524113403.bugfix @@ -0,0 +1,1 @@ +Fix issue with memory region overrides being ignored. diff --git a/src/mbed_tools/build/_internal/cmake_file.py b/src/mbed_tools/build/_internal/cmake_file.py index 09d507c..d6b550b 100644 --- a/src/mbed_tools/build/_internal/cmake_file.py +++ b/src/mbed_tools/build/_internal/cmake_file.py @@ -5,6 +5,8 @@ """Module in charge of CMake file generation.""" import pathlib +from typing import Any + import jinja2 from mbed_tools.build._internal.config.config import Config @@ -25,7 +27,13 @@ def render_mbed_config_cmake_template(config: Config, toolchain_name: str, targe The rendered mbed_config template. """ env = jinja2.Environment(loader=jinja2.PackageLoader("mbed_tools.build", str(TEMPLATES_DIRECTORY)),) + env.filters["to_hex"] = to_hex template = env.get_template(TEMPLATE_NAME) config["supported_c_libs"] = [x for x in config["supported_c_libs"][toolchain_name.lower()]] context = {"target_name": target_name, "toolchain_name": toolchain_name, **config} return template.render(context) + + +def to_hex(s: Any) -> str: + """Filter to convert integers to hex.""" + return hex(int(s, 0)) diff --git a/src/mbed_tools/build/_internal/config/config.py b/src/mbed_tools/build/_internal/config/config.py index bb493f2..7f96862 100644 --- a/src/mbed_tools/build/_internal/config/config.py +++ b/src/mbed_tools/build/_internal/config/config.py @@ -8,7 +8,7 @@ import logging from collections import UserDict from typing import Any, Iterable, Hashable, Callable, List -from mbed_tools.build._internal.config.source import Memory, Override, ConfigSetting +from mbed_tools.build._internal.config.source import Override, ConfigSetting logger = logging.getLogger(__name__) @@ -18,15 +18,13 @@ class Config(UserDict): This object understands how to populate the different 'config sections' which all have different rules for how the settings are collected. - Applies overrides, appends macros, updates memories, and updates config settings. + Applies overrides, appends macros, and updates config settings. """ def __setitem__(self, key: Hashable, item: Any) -> None: """Set an item based on its key.""" if key == CONFIG_SECTION: self._update_config_section(item) - elif key == MEMORIES_SECTION: - self._update_memories_section(item) elif key == OVERRIDES_SECTION: self._handle_overrides(item) elif key == MACROS_SECTION: @@ -69,20 +67,6 @@ class Config(UserDict): self.data[CONFIG_SECTION] = self.data.get(CONFIG_SECTION, []) + config_settings - def _update_memories_section(self, memories: List[Memory]) -> None: - defined_memories = self.data.get(MEMORIES_SECTION, []) - for memory in memories: - logger.debug(f"Adding memory settings `{memory.name}: start={memory.start} size={memory.size}`") - prev_defined = next((mem for mem in defined_memories if mem.name == memory.name), None) - if prev_defined is None: - defined_memories.append(memory) - else: - logger.warning( - f"You are attempting to redefine `{memory.name}` from {prev_defined.namespace}.\n" - f"The values from `{memory.namespace}` will be ignored" - ) - self.data[MEMORIES_SECTION] = defined_memories - def _find_first_config_setting(self, predicate: Callable) -> Any: """Find first config setting based on `predicate`. @@ -105,7 +89,6 @@ class Config(UserDict): CONFIG_SECTION = "config" MACROS_SECTION = "macros" -MEMORIES_SECTION = "memories" OVERRIDES_SECTION = "overrides" diff --git a/src/mbed_tools/build/_internal/config/source.py b/src/mbed_tools/build/_internal/config/source.py index 59d01df..54008bc 100644 --- a/src/mbed_tools/build/_internal/config/source.py +++ b/src/mbed_tools/build/_internal/config/source.py @@ -28,7 +28,7 @@ def prepare( ) -> dict: """Prepare a config source for entry into the Config object. - Extracts memory, config and override settings from the source. Flattens these nested dictionaries out into + Extracts config and override settings from the source. Flattens these nested dictionaries out into lists of objects which are namespaced in the way the Mbed config system expects. Args: @@ -46,11 +46,6 @@ def prepare( for key in data: data[key] = _sanitise_value(data[key]) - memories = _extract_memories(namespace, data) - - if memories: - data["memories"] = memories - if "config" in data: data["config"] = _extract_config_settings(namespace, data["config"]) @@ -83,31 +78,6 @@ class ConfigSetting: self.value = _sanitise_value(self.value) -@dataclass -class Memory: - """Representation of a defined RAM/ROM region.""" - - name: str - namespace: str - start: str - size: str - - def __post_init__(self) -> None: - """Convert start and size to hex format strings.""" - try: - self.start = hex(int(self.start, 0)) - except ValueError: - raise ValueError( - f"Value of MBED_{self.name}_START in {self.namespace}, {self.start} is invalid: must be an integer" - ) - try: - self.size = hex(int(self.size, 0)) - except ValueError: - raise ValueError( - f"Value of MBED_{self.name}_SIZE in {self.namespace}, {self.size} is invalid: must be an integer" - ) - - @dataclass class Override: """Representation of a config override. @@ -158,27 +128,6 @@ def _extract_config_settings(namespace: str, config_data: dict) -> List[ConfigSe return settings -def _extract_memories(namespace: str, data: dict) -> List[Memory]: - memories = [] - for mem in ["rom", "ram"]: - start_attr = f"mbed_{mem}_start" - size_attr = f"mbed_{mem}_size" - start = data.get(start_attr) - size = data.get(size_attr) - - if size is not None and start is not None: - logger.debug(f"Extracting MBED_{mem.upper()} definitions in {namespace}: _START={start}, _SIZE={size}.") - - memory = Memory(mem.upper(), namespace, start, size) - memories.append(memory) - elif start is not None or size is not None: - raise ValueError( - f"{size_attr.upper()} and {start_attr.upper()} must be defined together. Only " - f"{'START' if start is not None else 'SIZE'} is defined in the lib {namespace}." - ) - return memories - - def _extract_target_overrides( namespace: str, override_data: dict, allowed_target_labels: Iterable[str] ) -> List[Override]: diff --git a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl index 7fadeb1..89308ac 100644 --- a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl +++ b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl @@ -54,6 +54,18 @@ set(MBED_TARGET_DEFINITIONS{% for component in components %} {% for form_factor in supported_form_factors %} TARGET_FF_{{form_factor}} {%- endfor %} +{% if mbed_rom_start is defined %} + MBED_ROM_START={{ mbed_rom_start | to_hex }} +{%- endif %} +{% if mbed_rom_size is defined %} + MBED_ROM_SIZE={{ mbed_rom_size | to_hex }} +{%- endif %} +{% if mbed_ram_start is defined %} + MBED_RAM_START={{ mbed_ram_start | to_hex }} +{%- endif %} +{% if mbed_ram_size is defined %} + MBED_RAM_SIZE={{ mbed_ram_size | to_hex }} +{%- endif %} TARGET_LIKE_MBED __MBED__=1 ) @@ -75,10 +87,6 @@ set(MBED_CONFIG_DEFINITIONS "-D{{setting_name}}={{value}}" {% endif -%} {%- endfor -%} -{% for memory in memories %} - "-DMBED_{{memory.name}}_START={{memory.start}}" - "-DMBED_{{memory.name}}_SIZE={{memory.size}}" -{%- endfor -%} {% for macro in macros %} "{{macro|replace("\"", "\\\"")}}" {%- endfor %} diff --git a/src/mbed_tools/targets/_internal/data/board_database_snapshot.json b/src/mbed_tools/targets/_internal/data/board_database_snapshot.json index 6b81247..6ef01a2 100644 --- a/src/mbed_tools/targets/_internal/data/board_database_snapshot.json +++ b/src/mbed_tools/targets/_internal/data/board_database_snapshot.json @@ -4921,7 +4921,6 @@ "slug": "MTS-mDot-F411", "build_variant": [], "mbed_os_support": [ - "Mbed OS 2", "Mbed OS 5.10", "Mbed OS 5.11", "Mbed OS 5.12", @@ -4935,7 +4934,14 @@ "Mbed OS 5.8", "Mbed OS 5.9", "Mbed OS 6.0", - "Mbed OS 6.1" + "Mbed OS 6.1", + "Mbed OS 6.2", + "Mbed OS 6.3", + "Mbed OS 6.4", + "Mbed OS 6.5", + "Mbed OS 6.6", + "Mbed OS 6.7", + "Mbed OS 6.8" ], "mbed_enabled": [ "Baseline"
ARMmbed/mbed-tools
71e9707b908c393691a4e509ced90ce608e68b81
diff --git a/tests/build/_internal/config/test_config.py b/tests/build/_internal/config/test_config.py index c7e2e35..980ed4d 100644 --- a/tests/build/_internal/config/test_config.py +++ b/tests/build/_internal/config/test_config.py @@ -2,11 +2,10 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -import logging import pytest from mbed_tools.build._internal.config.config import Config -from mbed_tools.build._internal.config.source import prepare, ConfigSetting, Memory, Override +from mbed_tools.build._internal.config.source import prepare, ConfigSetting, Override class TestConfig: @@ -25,17 +24,6 @@ class TestConfig: with pytest.raises(ValueError, match="lib.param already defined"): conf.update(prepare({"config": {"param": {"value": 0}}}, source_name="lib")) - def test_logs_ignore_mbed_ram_repeated(self, caplog): - caplog.set_level(logging.DEBUG) - input_dict = {"mbed_ram_size": "0x80000", "mbed_ram_start": "0x24000000"} - input_dict2 = {"mbed_ram_size": "0x78000", "mbed_ram_start": "0x24200000"} - - conf = Config(prepare(input_dict, source_name="lib1")) - conf.update(prepare(input_dict2, source_name="lib2")) - - assert "values from `lib2` will be ignored" in caplog.text - assert conf["memories"] == [Memory("RAM", "lib1", "0x24000000", "0x80000")] - def test_target_overrides_handled(self): conf = Config( { diff --git a/tests/build/_internal/config/test_source.py b/tests/build/_internal/config/test_source.py index b7f4a2a..962315a 100644 --- a/tests/build/_internal/config/test_source.py +++ b/tests/build/_internal/config/test_source.py @@ -2,10 +2,8 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -import pytest - from mbed_tools.build._internal.config import source -from mbed_tools.build._internal.config.source import Memory, Override +from mbed_tools.build._internal.config.source import Override class TestPrepareSource: @@ -120,48 +118,3 @@ class TestPrepareSource: assert conf["config"][0].value == {"ETHERNET", "WIFI"} assert conf["sectors"] == {0, 2048} assert conf["header_info"] == {0, 2048, "bobbins", "magic"} - - def test_memory_attr_extracted(self): - lib = { - "mbed_ram_size": "0x80000", - "mbed_ram_start": "0x24000000", - "mbed_rom_size": "0x200000", - "mbed_rom_start": "0x08000000", - } - - conf = source.prepare(lib, "lib") - - assert Memory("RAM", "lib", "0x24000000", "0x80000") in conf["memories"] - assert Memory("ROM", "lib", "0x8000000", "0x200000") in conf["memories"] - - def test_memory_attr_converted_as_hex(self): - input_dict = {"mbed_ram_size": "1024", "mbed_ram_start": "0x24000000"} - - conf = source.prepare(input_dict, source_name="lib") - - memory, *_ = conf["memories"] - assert memory.size == "0x400" - - def test_raises_memory_size_not_integer(self): - input_dict = {"mbed_ram_size": "NOT INT", "mbed_ram_start": "0x24000000"} - - with pytest.raises(ValueError, match="_SIZE in lib, NOT INT is invalid: must be an integer"): - source.prepare(input_dict, "lib") - - def test_raises_memory_start_not_integer(self): - input_dict = {"mbed_ram_size": "0x80000", "mbed_ram_start": "NOT INT"} - - with pytest.raises(ValueError, match="_START in lib, NOT INT is invalid: must be an integer"): - source.prepare(input_dict, "lib") - - def test_raises_memory_size_defined_not_start(self): - input_dict = {"mbed_ram_size": "0x80000"} - - with pytest.raises(ValueError, match="Only SIZE is defined"): - source.prepare(input_dict) - - def test_raises_memory_start_defined_not_size(self): - input_dict = {"mbed_ram_start": "0x24000000"} - - with pytest.raises(ValueError, match="Only START is defined"): - source.prepare(input_dict) diff --git a/tests/build/test_generate_config.py b/tests/build/test_generate_config.py index b18bb2b..6605f5b 100644 --- a/tests/build/test_generate_config.py +++ b/tests/build/test_generate_config.py @@ -48,6 +48,10 @@ TARGET_DATA = { "supported_toolchains": ["ARM", "GCC_ARM", "IAR"], "trustzone": False, "OUTPUT_EXT": "hex", + "mbed_ram_start": "0", + "mbed_ram_size": "0", + "mbed_rom_start": "0", + "mbed_rom_size": "0", } @@ -289,6 +293,10 @@ def test_overrides_target_config_param_from_app(matching_target_and_filter, prog ("target.macros", ["DEFINE"], "DEFINE"), ("target.device_has", ["NOTHING"], "DEVICE_NOTHING"), ("target.features", ["ELECTRICITY"], "FEATURE_ELECTRICITY"), + ("target.mbed_rom_start", "99", "MBED_ROM_START=0x63"), + ("target.mbed_rom_size", "1010", "MBED_ROM_SIZE=0x3f2"), + ("target.mbed_ram_start", "99", "MBED_RAM_START=0x63"), + ("target.mbed_ram_size", "1010", "MBED_RAM_SIZE=0x3f2"), ("OUTPUT_EXT", "hex", 'MBED_OUTPUT_EXT "hex"'), ], )
mbed_rom_size values from mbed_app.json are ignored **Describe the bug** #270 has implemented mbed_rom_size support in targets.json file, But it seems that if values is overwritten in the local mbed_app.json file, value is ignored @wernerlewis **To Reproduce** Steps to reproduce the behavior: - choose a target with "mbed_rom_size" defined - change the value in mbed_app.json: "target.mbed_rom_size" - see in mbed_config.cmake
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/test_generate_config.py::test_overrides_target_non_config_params_from_app[target:" ]
[ "tests/build/_internal/config/test_config.py::TestConfig::test_config_updated", "tests/build/_internal/config/test_config.py::TestConfig::test_raises_when_trying_to_add_duplicate_config_setting", "tests/build/_internal/config/test_config.py::TestConfig::test_target_overrides_handled", "tests/build/_internal/config/test_config.py::TestConfig::test_target_overrides_separate_namespace", "tests/build/_internal/config/test_config.py::TestConfig::test_lib_overrides_handled", "tests/build/_internal/config/test_config.py::TestConfig::test_cumulative_fields_can_be_modified", "tests/build/_internal/config/test_config.py::TestConfig::test_macros_are_appended_to", "tests/build/_internal/config/test_config.py::TestConfig::test_warns_and_skips_override_for_undefined_config_parameter", "tests/build/_internal/config/test_config.py::TestConfig::test_ignores_present_option", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_config_fields_from_target_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_override_fields_from_target_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_config_fields_from_lib_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_override_fields_from_lib_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_target_overrides_only_collected_for_valid_targets", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_cumulative_fields_parsed", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_converts_config_setting_value_lists_to_sets", "tests/build/test_generate_config.py::test_target_and_toolchain_collected", "tests/build/test_generate_config.py::test_custom_targets_data_found", "tests/build/test_generate_config.py::test_raises_error_when_attempting_to_customize_existing_target", "tests/build/test_generate_config.py::test_config_param_from_lib_processed_with_default_name_mangling", "tests/build/test_generate_config.py::test_config_param_from_lib_processed_with_user_set_name", "tests/build/test_generate_config.py::test_config_param_from_app_processed_with_default_name_mangling", "tests/build/test_generate_config.py::test_config_param_from_target_processed_with_default_name_mangling", "tests/build/test_generate_config.py::test_macros_from_lib_collected[single]", "tests/build/test_generate_config.py::test_macros_from_lib_collected[multiple]", "tests/build/test_generate_config.py::test_macros_from_app_collected[single]", "tests/build/test_generate_config.py::test_macros_from_app_collected[multiple]", "tests/build/test_generate_config.py::test_macros_from_target_collected", "tests/build/test_generate_config.py::test_target_labels_collected_as_defines", "tests/build/test_generate_config.py::test_overrides_lib_config_param_from_app[target:", "tests/build/test_generate_config.py::test_overrides_target_config_param_from_app[target:", "tests/build/test_generate_config.py::test_overrides_target_config_param_from_lib[target:", "tests/build/test_generate_config.py::test_overrides_lib_config_param_from_same_lib[target:", "tests/build/test_generate_config.py::test_raises_when_attempting_to_override_lib_config_param_from_other_lib[target:", "tests/build/test_generate_config.py::test_target_list_params_can_be_added_to[target:", "tests/build/test_generate_config.py::test_target_list_params_can_be_removed[target:", "tests/build/test_generate_config.py::test_warns_when_attempting_to_override_nonexistent_param[target:", "tests/build/test_generate_config.py::test_settings_from_multiple_libs_included[target:", "tests/build/test_generate_config.py::test_requires_config_option", "tests/build/test_generate_config.py::test_target_requires_config_option", "tests/build/test_generate_config.py::test_config_parsed_when_mbed_os_outside_project_root[target:" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2021-05-24T11:45:52Z"
apache-2.0
ARMmbed__mbed-tools-285
diff --git a/news/20210524175020.bugfix b/news/20210524175020.bugfix new file mode 100644 index 0000000..e9b7b63 --- /dev/null +++ b/news/20210524175020.bugfix @@ -0,0 +1,1 @@ +Avoid searching config file paths twice when mbed-os-path is used and it is a subdirectory of the project path. diff --git a/src/mbed_tools/build/_internal/config/assemble_build_config.py b/src/mbed_tools/build/_internal/config/assemble_build_config.py index 676bc4a..e61cd9c 100644 --- a/src/mbed_tools/build/_internal/config/assemble_build_config.py +++ b/src/mbed_tools/build/_internal/config/assemble_build_config.py @@ -33,7 +33,11 @@ def assemble_config(target_attributes: dict, search_paths: Iterable[Path], mbed_ mbed_app_file: The path to mbed_app.json. This can be None. """ mbed_lib_files = list( - set(itertools.chain.from_iterable(find_files("mbed_lib.json", path) for path in search_paths)) + set( + itertools.chain.from_iterable( + find_files("mbed_lib.json", path.absolute().resolve()) for path in search_paths + ) + ) ) return _assemble_config_from_sources(target_attributes, mbed_lib_files, mbed_app_file) diff --git a/src/mbed_tools/build/_internal/find_files.py b/src/mbed_tools/build/_internal/find_files.py index 9f663bb..1dba384 100644 --- a/src/mbed_tools/build/_internal/find_files.py +++ b/src/mbed_tools/build/_internal/find_files.py @@ -52,6 +52,9 @@ def _find_files(filename: str, directory: Path, filters: Optional[List[Callable] filtered_children = filter_files(children, filters) for child in filtered_children: + if child.is_symlink(): + child = child.absolute().resolve() + if child.is_dir(): # If processed child is a directory, recurse with current set of filters result += _find_files(filename, child, filters)
ARMmbed/mbed-tools
ff2da40abec773902b6fda86d36de154d83a7d9f
diff --git a/tests/build/_internal/config/test_assemble_build_config.py b/tests/build/_internal/config/test_assemble_build_config.py index 79acb8d..47fcc5f 100644 --- a/tests/build/_internal/config/test_assemble_build_config.py +++ b/tests/build/_internal/config/test_assemble_build_config.py @@ -6,7 +6,7 @@ import json from pathlib import Path from tempfile import TemporaryDirectory -from mbed_tools.build._internal.config.assemble_build_config import _assemble_config_from_sources +from mbed_tools.build._internal.config.assemble_build_config import _assemble_config_from_sources, assemble_config from mbed_tools.build._internal.config.config import Config from mbed_tools.build._internal.find_files import find_files from mbed_tools.build._internal.config.source import prepare @@ -157,3 +157,47 @@ class TestAssembleConfigFromSourcesAndLibFiles: assert config["extra_labels"] == {"EXTRA_HOT"} assert config["labels"] == {"A", "PICKLE"} assert config["macros"] == {"TICKER", "RED_MACRO"} + + def test_ignores_duplicate_paths_to_lib_files(self, tmp_path, monkeypatch): + target = { + "labels": {"A"}, + } + mbed_lib_files = [ + { + "path": Path("mbed-os", "TARGET_A", "mbed_lib.json"), + "json_contents": {"name": "a", "config": {"a": {"value": 4}}}, + }, + ] + _ = create_files(tmp_path, mbed_lib_files) + monkeypatch.chdir(tmp_path) + + config = assemble_config(target, [tmp_path, Path("mbed-os")], None) + + assert config["config"][0].name == "a" + assert config["config"][0].value == 4 + + def test_does_not_search_symlinks_in_proj_dir_twice(self, tmp_path, monkeypatch): + target = { + "labels": {"A"}, + } + mbed_lib_files = [ + { + "path": Path("mbed-os", "TARGET_A", "mbed_lib.json"), + "json_contents": {"name": "a", "config": {"a": {"value": 4}}}, + }, + ] + project_dir = tmp_path / "project" + project_dir.mkdir() + + mbed_os_dir = tmp_path / "other" / "mbed-os" + mbed_os_dir.mkdir(parents=True) + _ = create_files(mbed_os_dir, mbed_lib_files) + + monkeypatch.chdir(project_dir) + mbed_symlink = Path("mbed-os") + mbed_symlink.symlink_to(mbed_os_dir, target_is_directory=True) + + config = assemble_config(target, [project_dir, mbed_symlink], None) + + assert config["config"][0].name == "a" + assert config["config"][0].value == 4
Does not compile with --mbed-os-path option **Describe the bug** As in title. **To Reproduce** Steps to reproduce the behavior: 1. Import an example project. 1. mbed-tools compile -m K64F -t GCC_ARM --mbed-os-path mbed-os Error: `ValueError: Setting storage_filesystem.rbp_internal_size already defined. You cannot duplicate config settings!` The setting name changes every time the command is executed. **Expected behavior** Should work. **Screenshots** If applicable, add screenshots to help explain your problem. **Desktop (please complete the following information):** - OS: Windows - Version: 10 **Mbed (please complete the following information):** - Device: any - Mbed OS Version: 6.9.0 - Mbed CLI 2 Version: 7.8.0 **Additional context** I assume this has something to do with the dependencies. Here is my pipdeptree output: ``` mbed-tools==7.8.0 - Click [required: >=7.1,<8, installed: 7.1] - GitPython [required: Any, installed: 3.1.14] - gitdb [required: >=4.0.1,<5, installed: 4.0.5] - smmap [required: >=3.0.1,<4, installed: 3.0.5] - Jinja2 [required: Any, installed: 2.11.3] - MarkupSafe [required: >=0.23, installed: 1.1.1] - pdoc3 [required: Any, installed: 0.9.2] - mako [required: Any, installed: 1.1.4] - MarkupSafe [required: >=0.9.2, installed: 1.1.1] - markdown [required: >=3.0, installed: 3.3.4] - pyserial [required: Any, installed: 3.5] - python-dotenv [required: Any, installed: 0.15.0] - pywin32 [required: Any, installed: 300] - requests [required: >=2.20, installed: 2.25.1] - certifi [required: >=2017.4.17, installed: 2020.12.5] - chardet [required: >=3.0.2,<5, installed: 4.0.0] - idna [required: >=2.5,<3, installed: 2.10] - urllib3 [required: >=1.21.1,<1.27, installed: 1.26.4] - tabulate [required: Any, installed: 0.8.9] - tqdm [required: Any, installed: 4.59.0] - typing-extensions [required: Any, installed: 3.7.4.3] ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/_internal/config/test_assemble_build_config.py::TestAssembleConfigFromSourcesAndLibFiles::test_does_not_search_symlinks_in_proj_dir_twice", "tests/build/_internal/config/test_assemble_build_config.py::TestAssembleConfigFromSourcesAndLibFiles::test_ignores_duplicate_paths_to_lib_files" ]
[ "tests/build/_internal/config/test_assemble_build_config.py::TestAssembleConfigFromSourcesAndLibFiles::test_updates_target_labels_from_config", "tests/build/_internal/config/test_assemble_build_config.py::TestAssembleConfigFromSourcesAndLibFiles::test_assembles_config_using_all_relevant_files" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-05-24T18:08:57Z"
apache-2.0
ARMmbed__mbed-tools-288
diff --git a/news/286.bugfix b/news/286.bugfix new file mode 100644 index 0000000..745bfb1 --- /dev/null +++ b/news/286.bugfix @@ -0,0 +1,1 @@ +Properly handle --custom-targets-json in configure sub command diff --git a/news/287.bugfix b/news/287.bugfix new file mode 100644 index 0000000..586d532 --- /dev/null +++ b/news/287.bugfix @@ -0,0 +1,1 @@ +Allow choosing output directory in configure sub command diff --git a/src/mbed_tools/cli/configure.py b/src/mbed_tools/cli/configure.py index 71e2c79..553266e 100644 --- a/src/mbed_tools/cli/configure.py +++ b/src/mbed_tools/cli/configure.py @@ -14,6 +14,9 @@ from mbed_tools.build import generate_config @click.command( help="Generate an Mbed OS config CMake file and write it to a .mbedbuild folder in the program directory." ) +@click.option( + "--custom-targets-json", type=click.Path(), default=None, help="Path to custom_targets.json.", +) @click.option( "-t", "--toolchain", @@ -22,6 +25,7 @@ from mbed_tools.build import generate_config help="The toolchain you are using to build your app.", ) @click.option("-m", "--mbed-target", required=True, help="A build target for an Mbed-enabled device, eg. K64F") +@click.option("-o", "--output-dir", type=click.Path(), default=None, help="Path to output directory.") @click.option( "-p", "--program-path", @@ -32,7 +36,9 @@ from mbed_tools.build import generate_config @click.option( "--mbed-os-path", type=click.Path(), default=None, help="Path to local Mbed OS directory.", ) -def configure(toolchain: str, mbed_target: str, program_path: str, mbed_os_path: str) -> None: +def configure( + toolchain: str, mbed_target: str, program_path: str, mbed_os_path: str, output_dir: str, custom_targets_json: str +) -> None: """Exports a mbed_config.cmake file to build directory in the program root. The parameters set in the CMake file will be dependent on the combination of @@ -43,16 +49,23 @@ def configure(toolchain: str, mbed_target: str, program_path: str, mbed_os_path: exist. Args: + custom_targets_json: the path to custom_targets.json toolchain: the toolchain you are using (eg. GCC_ARM, ARM) mbed_target: the target you are building for (eg. K64F) program_path: the path to the local Mbed program mbed_os_path: the path to the local Mbed OS directory + output_dir: the path to the output directory """ cmake_build_subdir = pathlib.Path(mbed_target.upper(), "develop", toolchain.upper()) if mbed_os_path is None: program = MbedProgram.from_existing(pathlib.Path(program_path), cmake_build_subdir) else: program = MbedProgram.from_existing(pathlib.Path(program_path), cmake_build_subdir, pathlib.Path(mbed_os_path)) + if custom_targets_json is not None: + program.files.custom_targets_json = pathlib.Path(custom_targets_json) + if output_dir is not None: + program.files.cmake_build_dir = pathlib.Path(output_dir) + mbed_target = mbed_target.upper() output_path = generate_config(mbed_target, toolchain, program) click.echo(f"mbed_config.cmake has been generated and written to '{str(output_path.resolve())}'")
ARMmbed/mbed-tools
673552826ac7e1e60477e8a212a522412e45ef7e
diff --git a/tests/cli/test_configure.py b/tests/cli/test_configure.py index edb2341..a0c61fd 100644 --- a/tests/cli/test_configure.py +++ b/tests/cli/test_configure.py @@ -2,6 +2,8 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # +import pathlib + from unittest import TestCase, mock from click.testing import CliRunner @@ -23,3 +25,25 @@ class TestConfigureCommand(TestCase): CliRunner().invoke(configure, ["-m", "k64f", "-t", "gcc_arm", "--mbed-os-path", "./extern/mbed-os"]) generate_config.assert_called_once_with("K64F", "GCC_ARM", program.from_existing()) + + @mock.patch("mbed_tools.cli.configure.generate_config") + @mock.patch("mbed_tools.cli.configure.MbedProgram") + def test_custom_targets_location_used_when_passed(self, program, generate_config): + program = program.from_existing() + custom_targets_json_path = pathlib.Path("custom", "custom_targets.json") + CliRunner().invoke( + configure, ["-t", "gcc_arm", "-m", "k64f", "--custom-targets-json", custom_targets_json_path] + ) + + generate_config.assert_called_once_with("K64F", "GCC_ARM", program) + self.assertEqual(program.files.custom_targets_json, custom_targets_json_path) + + @mock.patch("mbed_tools.cli.configure.generate_config") + @mock.patch("mbed_tools.cli.configure.MbedProgram") + def test_custom_output_directory_used_when_passed(self, program, generate_config): + program = program.from_existing() + output_dir = pathlib.Path("build") + CliRunner().invoke(configure, ["-t", "gcc_arm", "-m", "k64f", "-o", output_dir]) + + generate_config.assert_called_once_with("K64F", "GCC_ARM", program) + self.assertEqual(program.files.cmake_build_dir, output_dir)
Allow setting custom output directory in "configure" sub command **Is your feature request related to a problem? Please describe.** I use a custom build tool (that calls cmake under the hood) to build packages in my project. The fact mbed-tools use such an unusual build directory makes things awkward to integrate. **Describe the solution you'd like** It would be great if mbed-tools could allow passing an "-o/--output-dir" option to configure that would allow us to customize where mbed_config.cmake will be written to. **Describe alternatives you've considered** Heuristically trying to find out where mbed_config.cmake was generated and moving to an appropriate location. This solution is suboptimal, at best.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/cli/test_configure.py::TestConfigureCommand::test_custom_output_directory_used_when_passed", "tests/cli/test_configure.py::TestConfigureCommand::test_custom_targets_location_used_when_passed" ]
[ "tests/cli/test_configure.py::TestConfigureCommand::test_generate_config_called_with_correct_arguments", "tests/cli/test_configure.py::TestConfigureCommand::test_generate_config_called_with_mbed_os_path" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-05-25T13:50:10Z"
apache-2.0
ARMmbed__mbed-tools-292
diff --git a/news/291.bugfix b/news/291.bugfix new file mode 100644 index 0000000..c7487e4 --- /dev/null +++ b/news/291.bugfix @@ -0,0 +1,1 @@ +Add an option `--app-config` to `configure` and `build` commands to allow users to specify an application configuration file. diff --git a/src/mbed_tools/cli/build.py b/src/mbed_tools/cli/build.py index f548d60..60d2e06 100644 --- a/src/mbed_tools/cli/build.py +++ b/src/mbed_tools/cli/build.py @@ -22,9 +22,10 @@ from mbed_tools.sterm import terminal "-t", "--toolchain", type=click.Choice(["ARM", "GCC_ARM"], case_sensitive=False), + required=True, help="The toolchain you are using to build your app.", ) -@click.option("-m", "--mbed-target", help="A build target for an Mbed-enabled device, e.g. K64F.") +@click.option("-m", "--mbed-target", required=True, help="A build target for an Mbed-enabled device, e.g. K64F.") @click.option("-b", "--profile", default="develop", help="The build type (release, develop or debug).") @click.option("-c", "--clean", is_flag=True, default=False, help="Perform a clean build.") @click.option( @@ -39,6 +40,9 @@ from mbed_tools.sterm import terminal @click.option( "--custom-targets-json", type=click.Path(), default=None, help="Path to custom_targets.json.", ) +@click.option( + "--app-config", type=click.Path(), default=None, help="Path to application configuration file.", +) @click.option( "-f", "--flash", is_flag=True, default=False, help="Flash the binary onto a device", ) @@ -54,14 +58,15 @@ from mbed_tools.sterm import terminal def build( program_path: str, profile: str, - toolchain: str = "", - mbed_target: str = "", - clean: bool = False, - flash: bool = False, - sterm: bool = False, - baudrate: int = 9600, - mbed_os_path: str = None, - custom_targets_json: str = None, + toolchain: str, + mbed_target: str, + clean: bool, + flash: bool, + sterm: bool, + baudrate: int, + mbed_os_path: str, + custom_targets_json: str, + app_config: str, ) -> None: """Configure and build an Mbed project using CMake and Ninja. @@ -75,12 +80,12 @@ def build( custom_targets_json: Path to custom_targets.json. toolchain: The toolchain to use for the build. mbed_target: The name of the Mbed target to build for. + app_config: the path to the application configuration file clean: Perform a clean build. flash: Flash the binary onto a device. sterm: Open a serial terminal to the connected target. baudrate: Change the serial baud rate (ignored unless --sterm is also given). """ - _validate_target_and_toolchain_args(mbed_target, toolchain) mbed_target, target_id = _get_target_id(mbed_target) cmake_build_subdir = pathlib.Path(mbed_target.upper(), profile.lower(), toolchain.upper()) @@ -95,6 +100,8 @@ def build( click.echo("Configuring project and generating build system...") if custom_targets_json is not None: program.files.custom_targets_json = pathlib.Path(custom_targets_json) + if app_config is not None: + program.files.app_config_file = pathlib.Path(app_config) config, _ = generate_config(mbed_target.upper(), toolchain, program) generate_build_system(program.root, build_tree, profile) @@ -124,13 +131,6 @@ def build( terminal.run(dev.serial_port, baudrate) -def _validate_target_and_toolchain_args(target: str, toolchain: str) -> None: - if not all([toolchain, target]): - raise click.UsageError( - "Both --toolchain and --mbed-target arguments are required when using the compile subcommand." - ) - - def _get_target_id(target: str) -> Tuple[str, Optional[int]]: if "[" in target: target_name, target_id = target.replace("]", "").split("[", maxsplit=1) diff --git a/src/mbed_tools/cli/configure.py b/src/mbed_tools/cli/configure.py index e7279d6..360c389 100644 --- a/src/mbed_tools/cli/configure.py +++ b/src/mbed_tools/cli/configure.py @@ -36,8 +36,17 @@ from mbed_tools.build import generate_config @click.option( "--mbed-os-path", type=click.Path(), default=None, help="Path to local Mbed OS directory.", ) +@click.option( + "--app-config", type=click.Path(), default=None, help="Path to application configuration file.", +) def configure( - toolchain: str, mbed_target: str, program_path: str, mbed_os_path: str, output_dir: str, custom_targets_json: str + toolchain: str, + mbed_target: str, + program_path: str, + mbed_os_path: str, + output_dir: str, + custom_targets_json: str, + app_config: str ) -> None: """Exports a mbed_config.cmake file to build directory in the program root. @@ -55,6 +64,7 @@ def configure( program_path: the path to the local Mbed program mbed_os_path: the path to the local Mbed OS directory output_dir: the path to the output directory + app_config: the path to the application configuration file """ cmake_build_subdir = pathlib.Path(mbed_target.upper(), "develop", toolchain.upper()) if mbed_os_path is None: @@ -65,6 +75,8 @@ def configure( program.files.custom_targets_json = pathlib.Path(custom_targets_json) if output_dir is not None: program.files.cmake_build_dir = pathlib.Path(output_dir) + if app_config is not None: + program.files.app_config_file = pathlib.Path(app_config) mbed_target = mbed_target.upper() _, output_path = generate_config(mbed_target, toolchain, program)
ARMmbed/mbed-tools
f55d2eb5f6aec73e33a85331c82d0d3d71cc09b4
diff --git a/tests/cli/test_build.py b/tests/cli/test_build.py index 860d275..d680ee9 100644 --- a/tests/cli/test_build.py +++ b/tests/cli/test_build.py @@ -116,18 +116,6 @@ class TestBuildCommand(TestCase): self.assertIsNotNone(result.exception) self.assertRegex(result.output, "--mbed-target") - def test_raises_if_gen_config_target_toolchain_not_passed( - self, generate_config, mbed_program, build_project, generate_build_system - ): - program = mbed_program.from_existing() - with mock_project_directory(program): - runner = CliRunner() - result = runner.invoke(build) - - self.assertIsNotNone(result.exception) - self.assertRegex(result.output, "--mbed-target") - self.assertRegex(result.output, "--toolchain") - def test_raises_if_target_identifier_not_int( self, generate_config, mbed_program, build_project, generate_build_system ): @@ -183,6 +171,21 @@ class TestBuildCommand(TestCase): generate_config.assert_called_once_with(target.upper(), toolchain.upper(), program) self.assertEqual(program.files.custom_targets_json, custom_targets_json_path) + def test_app_config_used_when_passed( + self, generate_config, mbed_program, build_project, generate_build_system + ): + program = mbed_program.from_existing() + with mock_project_directory(program, mbed_config_exists=True, build_tree_exists=True): + toolchain = "gcc_arm" + target = "k64f" + app_config_path = pathlib.Path("alternative_config.json") + + runner = CliRunner() + runner.invoke(build, ["-t", toolchain, "-m", target, "--app-config", app_config_path]) + + generate_config.assert_called_once_with(target.upper(), toolchain.upper(), program) + self.assertEqual(program.files.app_config_file, app_config_path) + def test_build_folder_removed_when_clean_flag_passed( self, generate_config, mbed_program, build_project, generate_build_system ): diff --git a/tests/cli/test_configure.py b/tests/cli/test_configure.py index a0c61fd..2ae90b1 100644 --- a/tests/cli/test_configure.py +++ b/tests/cli/test_configure.py @@ -47,3 +47,15 @@ class TestConfigureCommand(TestCase): generate_config.assert_called_once_with("K64F", "GCC_ARM", program) self.assertEqual(program.files.cmake_build_dir, output_dir) + + @mock.patch("mbed_tools.cli.configure.generate_config") + @mock.patch("mbed_tools.cli.configure.MbedProgram") + def test_app_config_used_when_passed(self, program, generate_config): + program = program.from_existing() + app_config_path = pathlib.Path("alternative_config.json") + CliRunner().invoke( + configure, ["-t", "gcc_arm", "-m", "k64f", "--app-config", app_config_path] + ) + + generate_config.assert_called_once_with("K64F", "GCC_ARM", program) + self.assertEqual(program.files.app_config_file, app_config_path)
Missing `--app-config` option **Describe the bug** Mbed CLI 1 offers an `--app-config` option to let users specify the application configuration JSON instead of assuming `mbed_app.json`. This is not currently provided by mbed-tools, but useful when * one application provides multiple configurations for different use cases * different applications or tests share one common configuration (e.g. [experimental.json](https://github.com/ARMmbed/mbed-os/blob/master/TESTS/configs/experimental.json)) **To Reproduce** Try to build an application (e.g. blinky) with `--app-config`, for example ``` mbed-tools compile -t GCC_ARM -m K64F --app-config mbed-os/TESTS/configs/experimental.json ``` and the error is ``` Error: no such option: --app-config ``` **Expected behavior** An option `--app-config` is available to both `mbed-tools configure` and `mbed-tools compile`. The specified JSON is used for generating `mbed_config.cmake`. **Screenshots** N/A **Desktop (please complete the following information):** - OS: Any - Version: Any **Mbed (please complete the following information):** - Device: Any - Mbed OS Version: 6.11.0 - Mbed CLI 2 Version: 7.23.0 **Additional context** N/A
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/cli/test_build.py::TestBuildCommand::test_app_config_used_when_passed", "tests/cli/test_configure.py::TestConfigureCommand::test_app_config_used_when_passed" ]
[ "tests/cli/test_build.py::TestBuildCommand::test_build_flash_both_two_devices", "tests/cli/test_build.py::TestBuildCommand::test_build_flash_only_identifier_device", "tests/cli/test_build.py::TestBuildCommand::test_build_flash_options_bin_target", "tests/cli/test_build.py::TestBuildCommand::test_build_flash_options_hex_target", "tests/cli/test_build.py::TestBuildCommand::test_build_folder_removed_when_clean_flag_passed", "tests/cli/test_build.py::TestBuildCommand::test_build_system_regenerated_when_mbed_os_path_passed", "tests/cli/test_build.py::TestBuildCommand::test_calls_generate_build_system_if_build_tree_nonexistent", "tests/cli/test_build.py::TestBuildCommand::test_custom_targets_location_used_when_passed", "tests/cli/test_build.py::TestBuildCommand::test_generate_config_called_if_config_script_nonexistent", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_device_does_not_have_serial_port_and_sterm_flag_given", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_gen_config_target_not_passed_when_required", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_gen_config_toolchain_not_passed_when_required", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_target_identifier_negative", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_target_identifier_not_int", "tests/cli/test_build.py::TestBuildCommand::test_searches_for_mbed_program_at_default_project_path", "tests/cli/test_build.py::TestBuildCommand::test_searches_for_mbed_program_at_user_defined_project_root", "tests/cli/test_build.py::TestBuildCommand::test_sterm_is_started_when_flag_passed", "tests/cli/test_configure.py::TestConfigureCommand::test_custom_output_directory_used_when_passed", "tests/cli/test_configure.py::TestConfigureCommand::test_custom_targets_location_used_when_passed", "tests/cli/test_configure.py::TestConfigureCommand::test_generate_config_called_with_correct_arguments", "tests/cli/test_configure.py::TestConfigureCommand::test_generate_config_called_with_mbed_os_path" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-06-22T11:53:33Z"
apache-2.0
ARMmbed__mbed-tools-293
diff --git a/news/262.bugfix b/news/262.bugfix new file mode 100644 index 0000000..a438a13 --- /dev/null +++ b/news/262.bugfix @@ -0,0 +1,1 @@ +Add `-b`, `--profile` to the `configure` subcommand for specifying build profile. diff --git a/src/mbed_tools/cli/configure.py b/src/mbed_tools/cli/configure.py index 360c389..24b49d8 100644 --- a/src/mbed_tools/cli/configure.py +++ b/src/mbed_tools/cli/configure.py @@ -25,6 +25,7 @@ from mbed_tools.build import generate_config help="The toolchain you are using to build your app.", ) @click.option("-m", "--mbed-target", required=True, help="A build target for an Mbed-enabled device, eg. K64F") +@click.option("-b", "--profile", default="develop", help="The build type (release, develop or debug).") @click.option("-o", "--output-dir", type=click.Path(), default=None, help="Path to output directory.") @click.option( "-p", @@ -42,6 +43,7 @@ from mbed_tools.build import generate_config def configure( toolchain: str, mbed_target: str, + profile: str, program_path: str, mbed_os_path: str, output_dir: str, @@ -61,12 +63,13 @@ def configure( custom_targets_json: the path to custom_targets.json toolchain: the toolchain you are using (eg. GCC_ARM, ARM) mbed_target: the target you are building for (eg. K64F) + profile: The Mbed build profile (debug, develop or release). program_path: the path to the local Mbed program mbed_os_path: the path to the local Mbed OS directory output_dir: the path to the output directory app_config: the path to the application configuration file """ - cmake_build_subdir = pathlib.Path(mbed_target.upper(), "develop", toolchain.upper()) + cmake_build_subdir = pathlib.Path(mbed_target.upper(), profile.lower(), toolchain.upper()) if mbed_os_path is None: program = MbedProgram.from_existing(pathlib.Path(program_path), cmake_build_subdir) else:
ARMmbed/mbed-tools
a97be74fef509c90c820c9a96961377e14412c92
diff --git a/tests/cli/test_build.py b/tests/cli/test_build.py index d680ee9..42e5852 100644 --- a/tests/cli/test_build.py +++ b/tests/cli/test_build.py @@ -186,6 +186,29 @@ class TestBuildCommand(TestCase): generate_config.assert_called_once_with(target.upper(), toolchain.upper(), program) self.assertEqual(program.files.app_config_file, app_config_path) + def test_profile_used_when_passed( + self, generate_config, mbed_program, build_project, generate_build_system + ): + program = mbed_program.from_existing() + mbed_program.reset_mock() # clear call count from previous line + + with mock_project_directory(program, mbed_config_exists=True, build_tree_exists=True): + generate_config.return_value = [mock.MagicMock(), mock.MagicMock()] + + toolchain = "gcc_arm" + target = "k64f" + profile = "release" + + runner = CliRunner() + runner.invoke(build, ["-t", toolchain, "-m", target, "--profile", profile]) + + mbed_program.from_existing.assert_called_once_with( + pathlib.Path(os.getcwd()), + pathlib.Path(target.upper(), profile, toolchain.upper()) + ) + generate_config.assert_called_once_with(target.upper(), toolchain.upper(), program) + generate_build_system.assert_called_once_with(program.root, program.files.cmake_build_dir, profile) + def test_build_folder_removed_when_clean_flag_passed( self, generate_config, mbed_program, build_project, generate_build_system ): diff --git a/tests/cli/test_configure.py b/tests/cli/test_configure.py index 2ae90b1..0483e99 100644 --- a/tests/cli/test_configure.py +++ b/tests/cli/test_configure.py @@ -59,3 +59,23 @@ class TestConfigureCommand(TestCase): generate_config.assert_called_once_with("K64F", "GCC_ARM", program) self.assertEqual(program.files.app_config_file, app_config_path) + + @mock.patch("mbed_tools.cli.configure.generate_config") + @mock.patch("mbed_tools.cli.configure.MbedProgram") + def test_profile_used_when_passed(self, program, generate_config): + test_program = program.from_existing() + program.reset_mock() # clear call count from previous line + + toolchain = "gcc_arm" + target = "k64f" + profile = "release" + + CliRunner().invoke( + configure, ["-t", toolchain, "-m", target, "--profile", profile] + ) + + program.from_existing.assert_called_once_with( + pathlib.Path("."), + pathlib.Path(target.upper(), profile, toolchain.upper()) + ) + generate_config.assert_called_once_with("K64F", "GCC_ARM", test_program)
`mbed-tools configure` generates `mbed_config.cmake` under `develop`, and doesn't have a `-b` option From the PR description and commit msg, it says that configs and builds should now live under `cmake_build/TARGET/PROFILE/TOOLCHAIN`, but this line makes `mbed-tools configure` always generate the `mbed_config.cmake` under `cmake_build/TARGET/develop/TOOLCHAIN`. Why is this? _Originally posted by @wmmc88 in https://github.com/ARMmbed/mbed-tools/pull/175#discussion_r606712097_
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/cli/test_configure.py::TestConfigureCommand::test_profile_used_when_passed" ]
[ "tests/cli/test_build.py::TestBuildCommand::test_raises_if_target_identifier_not_int", "tests/cli/test_build.py::TestBuildCommand::test_build_flash_options_bin_target", "tests/cli/test_build.py::TestBuildCommand::test_build_flash_only_identifier_device", "tests/cli/test_build.py::TestBuildCommand::test_calls_generate_build_system_if_build_tree_nonexistent", "tests/cli/test_build.py::TestBuildCommand::test_build_system_regenerated_when_mbed_os_path_passed", "tests/cli/test_build.py::TestBuildCommand::test_sterm_is_started_when_flag_passed", "tests/cli/test_build.py::TestBuildCommand::test_profile_used_when_passed", "tests/cli/test_build.py::TestBuildCommand::test_searches_for_mbed_program_at_user_defined_project_root", "tests/cli/test_build.py::TestBuildCommand::test_build_folder_removed_when_clean_flag_passed", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_gen_config_toolchain_not_passed_when_required", "tests/cli/test_build.py::TestBuildCommand::test_app_config_used_when_passed", "tests/cli/test_build.py::TestBuildCommand::test_searches_for_mbed_program_at_default_project_path", "tests/cli/test_build.py::TestBuildCommand::test_generate_config_called_if_config_script_nonexistent", "tests/cli/test_build.py::TestBuildCommand::test_build_flash_options_hex_target", "tests/cli/test_build.py::TestBuildCommand::test_custom_targets_location_used_when_passed", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_gen_config_target_not_passed_when_required", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_target_identifier_negative", "tests/cli/test_build.py::TestBuildCommand::test_raises_if_device_does_not_have_serial_port_and_sterm_flag_given", "tests/cli/test_build.py::TestBuildCommand::test_build_flash_both_two_devices", "tests/cli/test_configure.py::TestConfigureCommand::test_generate_config_called_with_correct_arguments", "tests/cli/test_configure.py::TestConfigureCommand::test_generate_config_called_with_mbed_os_path", "tests/cli/test_configure.py::TestConfigureCommand::test_custom_output_directory_used_when_passed", "tests/cli/test_configure.py::TestConfigureCommand::test_custom_targets_location_used_when_passed", "tests/cli/test_configure.py::TestConfigureCommand::test_app_config_used_when_passed" ]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2021-06-30T14:52:52Z"
apache-2.0
ARMmbed__mbed-tools-309
diff --git a/news/233.bugfix b/news/233.bugfix new file mode 100644 index 0000000..dc0a11e --- /dev/null +++ b/news/233.bugfix @@ -0,0 +1,1 @@ +Generate .mbedignore in a project's build directory, to prevent Mbed CLI 1 from picking up CMake build files. diff --git a/src/mbed_tools/build/config.py b/src/mbed_tools/build/config.py index bea354e..e408564 100644 --- a/src/mbed_tools/build/config.py +++ b/src/mbed_tools/build/config.py @@ -16,6 +16,7 @@ from mbed_tools.build._internal.write_files import write_file from mbed_tools.build.exceptions import MbedBuildError CMAKE_CONFIG_FILE = "mbed_config.cmake" +MBEDIGNORE_FILE = ".mbedignore" def generate_config(target_name: str, toolchain: str, program: MbedProgram) -> Tuple[Config, pathlib.Path]: @@ -40,6 +41,8 @@ def generate_config(target_name: str, toolchain: str, program: MbedProgram) -> T ) cmake_config_file_path = program.files.cmake_build_dir / CMAKE_CONFIG_FILE write_file(cmake_config_file_path, cmake_file_contents) + mbedignore_path = program.files.cmake_build_dir / MBEDIGNORE_FILE + write_file(mbedignore_path, "*") return config, cmake_config_file_path
ARMmbed/mbed-tools
ec83fe02e2ce63938f6262c69b6f95019e5d7d03
diff --git a/tests/build/test_generate_config.py b/tests/build/test_generate_config.py index e7d7eb7..9e94993 100644 --- a/tests/build/test_generate_config.py +++ b/tests/build/test_generate_config.py @@ -4,11 +4,12 @@ # import json +import os import pytest from mbed_tools.project import MbedProgram from mbed_tools.build import generate_config -from mbed_tools.build.config import CMAKE_CONFIG_FILE +from mbed_tools.build.config import CMAKE_CONFIG_FILE, MBEDIGNORE_FILE from mbed_tools.lib.exceptions import ToolsError @@ -96,6 +97,17 @@ def matching_target_and_filter(request): return request.param +def test_mbedignore_generated(program): + target = "K64F" + toolchain = "GCC_ARM" + + generate_config(target, toolchain, program) + + mbedignore_file = (program.files.cmake_build_dir / MBEDIGNORE_FILE) + + assert os.path.isfile(mbedignore_file) + + def test_target_and_toolchain_collected(program): target = "K64F" toolchain = "GCC_ARM"
Users are building cli1 and cli2 within the same app **Describe the bug** A clear and concise description of what the bug is. I've received 2 questions with the similar topic within the last 24 hours - "my build is failing with a weird error" - a user first run cli2 and then later another day switched to cli1 and received an error. A question is: should we fix this and avoid surprises if users use either cli1 or cli2 ? One suggestion that many of us came to was adding .mbedignore and add there `cmake_build` directory. Is there better way to make this use case working without errors ? **To Reproduce** Steps to reproduce the behavior: Build blinky with cli2 first, then build with cli1. **Expected behavior** Both builds would work as they work on the same codebase, in separate build directories. As we support both at the time being, we should allow users to build both or at least warn them if they are doing this (if we do not want to support the use case) **Mbed (please complete the following information):** mbed-tools, mbed-cli and Mbed OS - any version
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/test_generate_config.py::test_overrides_target_non_config_params_from_app[target:", "tests/build/test_generate_config.py::test_macros_from_lib_collected[single]", "tests/build/test_generate_config.py::test_target_list_params_can_be_added_to[target:", "tests/build/test_generate_config.py::test_target_and_toolchain_collected", "tests/build/test_generate_config.py::test_forced_reset_timeout_unspecified", "tests/build/test_generate_config.py::test_output_ext_hex", "tests/build/test_generate_config.py::test_config_parsed_when_mbed_os_outside_project_root[target:", "tests/build/test_generate_config.py::test_overrides_target_config_param_from_lib[target:", "tests/build/test_generate_config.py::test_config_param_from_lib_processed_with_default_name_mangling", "tests/build/test_generate_config.py::test_target_labels_collected_as_defines", "tests/build/test_generate_config.py::test_raises_when_attempting_to_override_lib_config_param_from_other_lib[target:", "tests/build/test_generate_config.py::test_target_list_params_can_be_removed[target:", "tests/build/test_generate_config.py::test_custom_targets_data_found", "tests/build/test_generate_config.py::test_overrides_lib_config_param_from_same_lib[target:", "tests/build/test_generate_config.py::test_warns_when_attempting_to_override_nonexistent_param[target:", "tests/build/test_generate_config.py::test_config_param_from_target_processed_with_default_name_mangling", "tests/build/test_generate_config.py::test_settings_from_multiple_libs_included[target:", "tests/build/test_generate_config.py::test_macros_from_target_collected", "tests/build/test_generate_config.py::test_macros_from_lib_collected[multiple]", "tests/build/test_generate_config.py::test_macros_from_app_collected[single]", "tests/build/test_generate_config.py::test_overrides_lib_config_param_from_app[target:", "tests/build/test_generate_config.py::test_config_param_from_app_processed_with_default_name_mangling", "tests/build/test_generate_config.py::test_raises_error_when_attempting_to_customize_existing_target", "tests/build/test_generate_config.py::test_requires_config_option", "tests/build/test_generate_config.py::test_macros_from_app_collected[multiple]", "tests/build/test_generate_config.py::test_output_ext_unspecified", "tests/build/test_generate_config.py::test_output_ext_bin", "tests/build/test_generate_config.py::test_forced_reset_timeout_set", "tests/build/test_generate_config.py::test_mbedignore_generated", "tests/build/test_generate_config.py::test_overrides_target_config_param_from_app[target:", "tests/build/test_generate_config.py::test_config_param_from_lib_processed_with_user_set_name" ]
[]
{ "failed_lite_validators": [ "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2021-08-11T11:32:09Z"
apache-2.0
ASFHyP3__hyp3-sdk-152
diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c340f0..55972fc 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,14 @@ and this project adheres to [PEP 440](https://www.python.org/dev/peps/pep-0440/) and uses [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [1.4.1](https://github.com/ASFHyP3/hyp3-sdk/compare/v1.4.1...v1.4.1) + +### Fixed +- Slicing a `Batch` object will now return a new `Batch` instead of `list` of jobs +- `Batch` equality now compares the contained jobs and not object identity + ## [1.4.0](https://github.com/ASFHyP3/hyp3-sdk/compare/v1.3.2...v1.4.0) + ### Added - Exposed new `include_displacement_maps` parameter for `HyP3.prepare_insar_job` and `HyP3.submit_insar_job`, which will cause both a line-of-sight displacement and a vertical displacement GeoTIFF to be included in the product. diff --git a/hyp3_sdk/jobs.py b/hyp3_sdk/jobs.py index fbe31e8..dbcfb4d 100644 --- a/hyp3_sdk/jobs.py +++ b/hyp3_sdk/jobs.py @@ -170,21 +170,22 @@ class Batch: def __contains__(self, job: Job): return job in self.jobs - def __delitem__(self, job: Job): + def __eq__(self, other: 'Batch'): + return self.jobs == other.jobs + + def __delitem__(self, job: int): self.jobs.pop(job) return self def __getitem__(self, index: int): + if isinstance(index, slice): + return Batch(self.jobs[index]) return self.jobs[index] def __setitem__(self, index: int, job: Job): self.jobs[index] = job return self - def __reverse__(self): - for job in self.jobs[::-1]: - yield job - def __repr__(self): reprs = ", ".join([job.__repr__() for job in self.jobs]) return f'Batch([{reprs}])' diff --git a/hyp3_sdk/util.py b/hyp3_sdk/util.py index 94ab7a7..cae1eac 100644 --- a/hyp3_sdk/util.py +++ b/hyp3_sdk/util.py @@ -109,8 +109,8 @@ def download_file(url: str, filepath: Union[Path, str], chunk_size=None, retries session.mount('https://', HTTPAdapter(max_retries=retry_strategy)) session.mount('http://', HTTPAdapter(max_retries=retry_strategy)) - - with session.get(url, stream=True) as s: + stream = False if chunk_size is None else True + with session.get(url, stream=stream) as s: s.raise_for_status() tqdm = get_tqdm_progress_bar() with tqdm.wrapattr(open(filepath, "wb"), 'write', miniters=1, desc=filepath.name,
ASFHyP3/hyp3-sdk
b3e64fdef9d76d7abb6bd762ae1b8429ebd1e3f5
diff --git a/tests/test_jobs.py b/tests/test_jobs.py index 35c409d..cdef7e5 100644 --- a/tests/test_jobs.py +++ b/tests/test_jobs.py @@ -228,60 +228,66 @@ def test_contains(get_mock_job): def test_delitem(): - j1 = Job.from_dict(SUCCEEDED_JOB) - j2 = Job.from_dict(FAILED_JOB) - batch = Batch([j1, j2]) + j0 = Job.from_dict(SUCCEEDED_JOB) + j1 = Job.from_dict(FAILED_JOB) + batch = Batch([j0, j1]) + assert j0 in batch assert j1 in batch - assert j2 in batch del batch[1] - assert j1 in batch - assert j2 not in batch + assert j0 in batch + assert j1 not in batch - batch += j2 + batch += j1 del batch[0] - assert j1 not in batch - assert j2 in batch + assert j0 not in batch + assert j1 in batch -def test_getitem(): - j1 = Job.from_dict(SUCCEEDED_JOB) - j2 = Job.from_dict(FAILED_JOB) - batch = Batch([j1, j2]) +def test_getitem(get_mock_job): + unexpired_time = (datetime.now(tz=tz.UTC) + timedelta(days=7)).isoformat(timespec='seconds') + j0 = Job.from_dict(SUCCEEDED_JOB) + j1 = Job.from_dict(FAILED_JOB) + j2 = get_mock_job(status_code='SUCCEEDED', expiration_time=unexpired_time, + files=[{'url': 'https://foo.com/file', 'size': 0, 'filename': 'file'}]) + batch = Batch([j0, j1, j2]) + + assert j0 == batch[0] + assert j1 == batch[1] + assert j2 == batch[2] - assert j1 == batch[0] - assert j2 == batch[1] + assert Batch([j1, j2]) == batch[1:] def test_setitem(get_mock_job): unexpired_time = (datetime.now(tz=tz.UTC) + timedelta(days=7)).isoformat(timespec='seconds') - j1 = Job.from_dict(SUCCEEDED_JOB) - j2 = Job.from_dict(FAILED_JOB) - j3 = get_mock_job(status_code='SUCCEEDED', expiration_time=unexpired_time, + j0 = Job.from_dict(SUCCEEDED_JOB) + j1 = Job.from_dict(FAILED_JOB) + j2 = get_mock_job(status_code='SUCCEEDED', expiration_time=unexpired_time, files=[{'url': 'https://foo.com/file', 'size': 0, 'filename': 'file'}]) - batch = Batch([j1, j2]) + batch = Batch([j0, j1]) - batch[1] = j3 - assert batch[1] == j3 + assert batch[1] == j1 + batch[1] = j2 + assert batch[1] == j2 def test_reverse(get_mock_job): unexpired_time = (datetime.now(tz=tz.UTC) + timedelta(days=7)).isoformat(timespec='seconds') - j1 = Job.from_dict(SUCCEEDED_JOB) - j2 = Job.from_dict(FAILED_JOB) - j3 = get_mock_job(status_code='SUCCEEDED', expiration_time=unexpired_time, + j0 = Job.from_dict(SUCCEEDED_JOB) + j1 = Job.from_dict(FAILED_JOB) + j2 = get_mock_job(status_code='SUCCEEDED', expiration_time=unexpired_time, files=[{'url': 'https://foo.com/file', 'size': 0, 'filename': 'file'}]) - batch = Batch([j1, j2, j3]) - - batch_reversed = list(reversed(batch)) + batch = Batch([j0, j1, j2]) - assert batch_reversed[0] == j3 - assert batch_reversed[1] == j2 - assert batch_reversed[2] == j1 + batch_reversed = reversed(batch) + assert next(batch_reversed) == j2 + assert next(batch_reversed) == j1 + assert next(batch_reversed) == j0 def test_batch_complete_succeeded(): diff --git a/tests/test_util.py b/tests/test_util.py index 7330f42..ec2b768 100644 --- a/tests/test_util.py +++ b/tests/test_util.py @@ -14,13 +14,24 @@ def test_download_file(tmp_path): assert result_path == (tmp_path / 'file') assert result_path.read_text() == 'foobar' + +@responses.activate +def test_download_file_string_format(tmp_path): responses.add(responses.GET, 'https://foo.com/file2', body='foobar2') - result_path = util.download_file('https://foo.com/file', str(tmp_path / 'file')) - assert result_path == (tmp_path / 'file') - assert result_path.read_text() == 'foobar' + result_path = util.download_file('https://foo.com/file2', str(tmp_path / 'file2')) + assert result_path == (tmp_path / 'file2') + assert result_path.read_text() == 'foobar2' assert isinstance(result_path, Path) +@responses.activate +def test_download_file_chunked_response(tmp_path): + responses.add(responses.GET, 'https://foo.com/file3', body='foobar3') + result_path = util.download_file('https://foo.com/file3', tmp_path / 'file3', chunk_size=3) + assert result_path == (tmp_path / 'file3') + assert result_path.read_text() == 'foobar3' + + def test_chunk(): items = list(range(1234)) chunks = list(util.chunk(items))
slicing a Batch returns a list Should return a Batch instead. ``` >>> import hyp3_sdk >>> hyp3 = hyp3_sdk.HyP3() >>> jobs = hyp3.find_jobs() >>> type(jobs) <class 'hyp3_sdk.jobs.Batch'> >>> len(jobs) 955 >>> type(jobs[3:10]) <class 'list'> ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_jobs.py::test_getitem" ]
[ "tests/test_jobs.py::test_batch_iter", "tests/test_jobs.py::test_contains", "tests/test_jobs.py::test_batch_len", "tests/test_jobs.py::test_delitem", "tests/test_jobs.py::test_job_expired", "tests/test_jobs.py::test_batch_add", "tests/test_jobs.py::test_batch_filter_jobs", "tests/test_jobs.py::test_batch_download", "tests/test_jobs.py::test_setitem", "tests/test_jobs.py::test_job_download_files_create_dirs", "tests/test_jobs.py::test_batch_complete_succeeded", "tests/test_jobs.py::test_job_attributes", "tests/test_jobs.py::test_job_complete_succeeded_failed_running", "tests/test_jobs.py::test_job_download_files_expired", "tests/test_jobs.py::test_batch_download_expired", "tests/test_jobs.py::test_job_dict_transforms", "tests/test_jobs.py::test_reverse", "tests/test_jobs.py::test_batch_any_expired", "tests/test_jobs.py::test_batch_iadd", "tests/test_jobs.py::test_job_download_files", "tests/test_util.py::test_download_file_chunked_response", "tests/test_util.py::test_chunk", "tests/test_util.py::test_download_file_string_format", "tests/test_util.py::test_extract_zipped_product", "tests/test_util.py::test_download_file" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-11-19T18:14:31Z"
bsd-3-clause
ASFHyP3__hyp3-sdk-51
diff --git a/hyp3_sdk/hyp3.py b/hyp3_sdk/hyp3.py index 7d90095..baf69f4 100644 --- a/hyp3_sdk/hyp3.py +++ b/hyp3_sdk/hyp3.py @@ -6,6 +6,7 @@ from urllib.parse import urljoin from requests.exceptions import HTTPError, RequestException +import hyp3_sdk from hyp3_sdk.exceptions import HyP3Error, ValidationError from hyp3_sdk.jobs import Batch, Job from hyp3_sdk.util import get_authenticated_session @@ -28,6 +29,7 @@ class HyP3: """ self.url = api_url self.session = get_authenticated_session(username, password) + self.session.headers.update({'User-Agent': f'{hyp3_sdk.__name__}/{hyp3_sdk.__version__}'}) def find_jobs(self, start: Optional[datetime] = None, end: Optional[datetime] = None, status: Optional[str] = None, name: Optional[str] = None) -> Batch:
ASFHyP3/hyp3-sdk
67e33235f7dc3b98241fe34d97a4fae58873590c
diff --git a/tests/test_hyp3.py b/tests/test_hyp3.py index 626ee05..9aa05e9 100644 --- a/tests/test_hyp3.py +++ b/tests/test_hyp3.py @@ -1,4 +1,3 @@ -import json from datetime import datetime, timedelta from urllib.parse import urljoin @@ -10,6 +9,18 @@ from hyp3_sdk import HyP3, Job hyp3_sdk.TESTING = True +@responses.activate +def test_session_headers(): + api = HyP3() + responses.add(responses.GET, urljoin(api.url, '/user'), json={'foo': 'bar'}) + + api.session.get(urljoin(api.url, '/user')) + assert responses.calls[0].request.headers['User-Agent'] == f'hyp3_sdk/{hyp3_sdk.__version__}' + + api.my_info() + assert responses.calls[1].request.headers['User-Agent'] == f'hyp3_sdk/{hyp3_sdk.__version__}' + + @responses.activate def test_find_jobs(get_mock_job): api_response_mock = { @@ -23,7 +34,7 @@ def test_find_jobs(get_mock_job): ] } api = HyP3() - responses.add(responses.GET, urljoin(api.url, '/jobs'), body=json.dumps(api_response_mock)) + responses.add(responses.GET, urljoin(api.url, '/jobs'), json=api_response_mock) response = api.find_jobs() assert len(response) == 3 @@ -32,7 +43,7 @@ def test_find_jobs(get_mock_job): def test_get_job_by_id(get_mock_job): job = get_mock_job() api = HyP3() - responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), body=json.dumps(job.to_dict())) + responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), json=job.to_dict()) response = api._get_job_by_id(job.job_id) assert response == job @@ -45,9 +56,9 @@ def test_watch(get_mock_job): api = HyP3() for ii in range(3): responses.add(responses.GET, urljoin(api.url, f'/jobs/{incomplete_job.job_id}'), - body=json.dumps(incomplete_job.to_dict())) + json=incomplete_job.to_dict()) responses.add(responses.GET, urljoin(api.url, f'/jobs/{incomplete_job.job_id}'), - body=json.dumps(complete_job.to_dict())) + json=complete_job.to_dict()) response = api.watch(incomplete_job, interval=0.05) assert response == complete_job responses.assert_call_count(urljoin(api.url, f'/jobs/{incomplete_job.job_id}'), 4) @@ -60,7 +71,7 @@ def test_refresh(get_mock_job): new_job.status_code = 'SUCCEEDED' api = HyP3() - responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), body=json.dumps(new_job.to_dict())) + responses.add(responses.GET, urljoin(api.url, f'/jobs/{job.job_id}'), json=new_job.to_dict()) response = api.refresh(job) assert response == new_job @@ -74,7 +85,7 @@ def test_submit_job_dict(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_job_dict(job.to_dict(for_resubmit=True)) assert response == job @@ -88,7 +99,7 @@ def test_submit_autorift_job(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_autorift_job('g1', 'g2') assert response == job @@ -102,7 +113,7 @@ def test_submit_rtc_job(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_rtc_job('g1') assert response == job @@ -116,7 +127,7 @@ def test_submit_insar_job(get_mock_job): ] } api = HyP3() - responses.add(responses.POST, urljoin(api.url, '/jobs'), body=json.dumps(api_response)) + responses.add(responses.POST, urljoin(api.url, '/jobs'), json=api_response) response = api.submit_insar_job('g1', 'g2') assert response == job @@ -135,7 +146,7 @@ def test_my_info(): 'user_id': 'someUser' } api = HyP3() - responses.add(responses.GET, urljoin(api.url, '/user'), body=json.dumps(api_response)) + responses.add(responses.GET, urljoin(api.url, '/user'), json=api_response) response = api.my_info() assert response == api_response @@ -154,6 +165,6 @@ def test_check_quota(): 'user_id': 'someUser' } api = HyP3() - responses.add(responses.GET, urljoin(api.url, '/user'), body=json.dumps(api_response)) + responses.add(responses.GET, urljoin(api.url, '/user'), json=api_response) response = api.check_quota() assert response == api_response['quota']['remaining']
Add custom User Agent header to hyp3 api session e.g. `User-Agent: hyp3-sdk v0.1.2` so we can identify SDK-generated requests in the API access logs, separate from other requests made via `requests`.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_hyp3.py::test_session_headers" ]
[ "tests/test_hyp3.py::test_submit_insar_job", "tests/test_hyp3.py::test_find_jobs", "tests/test_hyp3.py::test_watch", "tests/test_hyp3.py::test_submit_autorift_job", "tests/test_hyp3.py::test_get_job_by_id", "tests/test_hyp3.py::test_submit_job_dict", "tests/test_hyp3.py::test_refresh", "tests/test_hyp3.py::test_check_quota", "tests/test_hyp3.py::test_my_info", "tests/test_hyp3.py::test_submit_rtc_job" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2020-12-07T19:19:41Z"
bsd-3-clause
ASFHyP3__hyp3-sdk-53
diff --git a/CHANGELOG.md b/CHANGELOG.md index 8905268..ddcacaa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,14 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [PEP 440](https://www.python.org/dev/peps/pep-0440/) and uses [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [0.3.3](https://github.com/ASFHyP3/hyp3-sdk/compare/v0.3.2...v0.3.3) +### Added +- SDK will attach a `User-Agent` statement like `hyp3_sdk/VERSION` to all API interactions + +### Changed +- Providing a job list to `Batch.__init__()` is now optional; an empty batch will + be created if the job list is not provided +- `Batch.__init__()` no longer issues a warning when creating an empty batch ## [0.3.2](https://github.com/ASFHyP3/hyp3-sdk/compare/v0.3.1...v0.3.2) ### Changed diff --git a/hyp3_sdk/jobs.py b/hyp3_sdk/jobs.py index fbe8837..7866439 100644 --- a/hyp3_sdk/jobs.py +++ b/hyp3_sdk/jobs.py @@ -1,4 +1,3 @@ -import warnings from datetime import datetime from pathlib import Path from typing import List, Optional, Union @@ -124,10 +123,9 @@ class Job: class Batch: - def __init__(self, jobs: List[Job]): - if len(jobs) == 0: - warnings.warn('Jobs list is empty; creating an empty Batch', UserWarning) - + def __init__(self, jobs: Optional[List[Job]] = None): + if jobs is None: + jobs = [] self.jobs = jobs def __len__(self):
ASFHyP3/hyp3-sdk
56cfb700341a0de44ee0f2f3548d5ed6c534d659
diff --git a/tests/test_jobs.py b/tests/test_jobs.py index 6d25cec..400f2d8 100644 --- a/tests/test_jobs.py +++ b/tests/test_jobs.py @@ -119,8 +119,10 @@ def test_job_download_files(tmp_path, get_mock_job): def test_batch_len(): - with pytest.warns(UserWarning): - batch = Batch([]) + batch = Batch() + assert len(batch) == 0 + + batch = Batch([]) assert len(batch) == 0 batch = Batch([Job.from_dict(SUCCEEDED_JOB), Job.from_dict(FAILED_JOB)])
Batch constructor should create an empty batch by default Currently, calling `jobs = Batch()` raises `TypeError: __init__() missing 1 required positional argument: 'jobs'`. To construct an empty batch, the user has to write `jobs = Batch([])`. It would be more intuitive if this were the default behavior without having to explicitly provide an empty list.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_jobs.py::test_batch_len" ]
[ "tests/test_jobs.py::test_batch_download", "tests/test_jobs.py::test_batch_complete_succeeded", "tests/test_jobs.py::test_job_complete_succeeded_failed_running", "tests/test_jobs.py::test_batch_filter_jobs", "tests/test_jobs.py::test_job_dict_transforms", "tests/test_jobs.py::test_batch_add", "tests/test_jobs.py::test_job_download_files", "tests/test_jobs.py::test_batch_any_expired", "tests/test_jobs.py::test_job_expired" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2020-12-08T01:14:31Z"
bsd-3-clause
ASFHyP3__hyp3-sdk-71
diff --git a/CHANGELOG.md b/CHANGELOG.md index 620eb3f..38529ae 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ and uses [Semantic Versioning](https://semver.org/spec/v2.0.0.html). - `HyP3.prepare_insar_job` ### Changed +- HyP3 `Batch` objects are now iterable - HyP3 submit methods will always return a `Batch` containing the submitted job(s) - `HyP3.submit_job_dict` has been renamed to `HyP3.submit_prepared_jobs` and can submit one or more prepared job dictionaries. diff --git a/hyp3_sdk/jobs.py b/hyp3_sdk/jobs.py index 9167d02..38054fa 100644 --- a/hyp3_sdk/jobs.py +++ b/hyp3_sdk/jobs.py @@ -129,9 +129,6 @@ class Batch: jobs = [] self.jobs = jobs - def __len__(self): - return len(self.jobs) - def __add__(self, other: Union[Job, 'Batch']): if isinstance(other, Batch): return Batch(self.jobs + other.jobs) @@ -140,6 +137,12 @@ class Batch: else: raise TypeError(f"unsupported operand type(s) for +: '{type(self)}' and '{type(other)}'") + def __iter__(self): + return iter(self.jobs) + + def __len__(self): + return len(self.jobs) + def __repr__(self): return str([job.to_dict() for job in self.jobs])
ASFHyP3/hyp3-sdk
b8011c957ce5759bd64007c2116d202fdb5a6dae
diff --git a/tests/test_jobs.py b/tests/test_jobs.py index 400f2d8..dab034b 100644 --- a/tests/test_jobs.py +++ b/tests/test_jobs.py @@ -118,17 +118,6 @@ def test_job_download_files(tmp_path, get_mock_job): assert contents == 'foobar2' -def test_batch_len(): - batch = Batch() - assert len(batch) == 0 - - batch = Batch([]) - assert len(batch) == 0 - - batch = Batch([Job.from_dict(SUCCEEDED_JOB), Job.from_dict(FAILED_JOB)]) - assert len(batch) == 2 - - def test_batch_add(): a = Batch([Job.from_dict(SUCCEEDED_JOB)]) b = Batch([Job.from_dict(FAILED_JOB)]) @@ -147,6 +136,24 @@ def test_batch_add(): assert d.jobs[2].running() +def test_batch_iter(): + defined_jobs = [Job.from_dict(SUCCEEDED_JOB), Job.from_dict(FAILED_JOB)] + batch = Batch(defined_jobs) + for batch_job, defined_job in zip(batch, defined_jobs): + assert batch_job == defined_job + + +def test_batch_len(): + batch = Batch() + assert len(batch) == 0 + + batch = Batch([]) + assert len(batch) == 0 + + batch = Batch([Job.from_dict(SUCCEEDED_JOB), Job.from_dict(FAILED_JOB)]) + assert len(batch) == 2 + + def test_batch_complete_succeeded(): batch = Batch([Job.from_dict(SUCCEEDED_JOB), Job.from_dict(SUCCEEDED_JOB)]) assert batch.complete()
Batch should be iterable Attempting to iterate over a Batch object currently fails with `TypeError: 'Batch' object is not iterable`. ``` > import hyp3_sdk > api = hyp3_sdk.HyP3() > jobs = api.find_jobs(name='refactor') > sizes = [job['files'][0]['size'] for job in jobs] Traceback (most recent call last): File "<stdin>", line 1, in <module> TypeError: 'Batch' object is not iterable ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_jobs.py::test_batch_iter" ]
[ "tests/test_jobs.py::test_job_download_files", "tests/test_jobs.py::test_batch_complete_succeeded", "tests/test_jobs.py::test_job_expired", "tests/test_jobs.py::test_job_complete_succeeded_failed_running", "tests/test_jobs.py::test_batch_add", "tests/test_jobs.py::test_batch_len", "tests/test_jobs.py::test_batch_download", "tests/test_jobs.py::test_batch_filter_jobs", "tests/test_jobs.py::test_batch_any_expired", "tests/test_jobs.py::test_job_dict_transforms" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-02-16T00:37:00Z"
bsd-3-clause
ASPP__pelita-412
diff --git a/pelita/player/__init__.py b/pelita/player/__init__.py index cf429bca..bedaae24 100644 --- a/pelita/player/__init__.py +++ b/pelita/player/__init__.py @@ -1,7 +1,7 @@ from .base import AbstractTeam, SimpleTeam, AbstractPlayer -from .base import (StoppingPlayer, TestPlayer, SpeakingPlayer, +from .base import (StoppingPlayer, SteppingPlayer, SpeakingPlayer, RoundBasedPlayer, MoveExceptionPlayer, InitialExceptionPlayer, DebuggablePlayer) diff --git a/pelita/player/base.py b/pelita/player/base.py index f07bba65..0e578f2f 100644 --- a/pelita/player/base.py +++ b/pelita/player/base.py @@ -516,7 +516,7 @@ class SpeakingPlayer(AbstractPlayer): self.say("Going %r." % (move,)) return move -class TestPlayer(AbstractPlayer): +class SteppingPlayer(AbstractPlayer): """ A Player with predetermined set of moves. Parameters
ASPP/pelita
002ae9e325b1608a324d02749205cd70b4f6da2b
diff --git a/test/test_game_master.py b/test/test_game_master.py index 2b164441..e943d70b 100644 --- a/test/test_game_master.py +++ b/test/test_game_master.py @@ -5,7 +5,7 @@ import collections from pelita.datamodel import CTFUniverse from pelita.game_master import GameMaster, ManhattanNoiser, PlayerTimeout -from pelita.player import AbstractPlayer, SimpleTeam, StoppingPlayer, TestPlayer +from pelita.player import AbstractPlayer, SimpleTeam, StoppingPlayer, SteppingPlayer from pelita.viewer import AbstractViewer @@ -18,8 +18,8 @@ class TestGameMaster: # . # . .#3# ################## """) - team_1 = SimpleTeam("team1", TestPlayer([]), TestPlayer([])) - team_2 = SimpleTeam("team2", TestPlayer([]), TestPlayer([])) + team_1 = SimpleTeam("team1", SteppingPlayer([]), SteppingPlayer([])) + team_2 = SimpleTeam("team2", SteppingPlayer([]), SteppingPlayer([])) game_master = GameMaster(test_layout, [team_1, team_2], 4, 200) assert game_master.game_state["team_name"][0] == "" @@ -48,8 +48,8 @@ class TestGameMaster: # . # . .#3# ################## """) - team_1 = SimpleTeam('team1', TestPlayer([]), TestPlayer([])) - team_2 = SimpleTeam('team2', TestPlayer([]), TestPlayer([])) + team_1 = SimpleTeam('team1', SteppingPlayer([]), SteppingPlayer([])) + team_2 = SimpleTeam('team2', SteppingPlayer([]), SteppingPlayer([])) game_master = GameMaster(test_layout, [team_1, team_2], 4, 200) game_master.set_initial() @@ -64,7 +64,7 @@ class TestGameMaster: #2##### #####1# # . # . .#3# ################## """) - team_1 = SimpleTeam(TestPlayer([]), TestPlayer([])) + team_1 = SimpleTeam(SteppingPlayer([]), SteppingPlayer([])) with pytest.raises(ValueError): GameMaster(test_layout_4, [team_1], 4, 200) @@ -76,9 +76,9 @@ class TestGameMaster: # . # . .#3# ################## """) - team_1 = SimpleTeam(TestPlayer([]), TestPlayer([])) - team_2 = SimpleTeam(TestPlayer([]), TestPlayer([])) - team_3 = SimpleTeam(TestPlayer([]), TestPlayer([])) + team_1 = SimpleTeam(SteppingPlayer([]), SteppingPlayer([])) + team_2 = SimpleTeam(SteppingPlayer([]), SteppingPlayer([])) + team_3 = SimpleTeam(SteppingPlayer([]), SteppingPlayer([])) with pytest.raises(ValueError): GameMaster(test_layout_4, [team_1, team_2, team_3], 4, 200) @@ -259,7 +259,7 @@ class TestGame: return universe - teams = [SimpleTeam(TestPlayer('>-v>>>')), SimpleTeam(TestPlayer('<<-<<<'))] + teams = [SimpleTeam(SteppingPlayer('>-v>>>')), SimpleTeam(SteppingPlayer('<<-<<<'))] gm = GameMaster(test_start, teams, number_bots, 200) gm.set_initial() @@ -317,7 +317,7 @@ class TestGame: assert create_TestUniverse(test_sixth_round, black_score=gm.universe.KILLPOINTS, white_score=gm.universe.KILLPOINTS) == gm.universe - teams = [SimpleTeam(TestPlayer('>-v>>>')), SimpleTeam(TestPlayer('<<-<<<'))] + teams = [SimpleTeam(SteppingPlayer('>-v>>>')), SimpleTeam(SteppingPlayer('<<-<<<'))] # now play the full game gm = GameMaster(test_start, teams, number_bots, 200) gm.play() @@ -380,7 +380,7 @@ class TestGame: #0 . # #.. 1# ###### """) - teams = [SimpleTeam(FailingPlayer()), SimpleTeam(TestPlayer("^"))] + teams = [SimpleTeam(FailingPlayer()), SimpleTeam(SteppingPlayer("^"))] gm = GameMaster(test_layout, teams, 2, 1) @@ -409,8 +409,8 @@ class TestGame: number_bots = 2 teams = [ - SimpleTeam(TestPlayer([(0,0)])), - SimpleTeam(TestPlayer([(0,0)])) + SimpleTeam(SteppingPlayer([(0,0)])), + SimpleTeam(SteppingPlayer([(0,0)])) ] gm = GameMaster(test_start, teams, number_bots, 200) @@ -439,7 +439,7 @@ class TestGame: NUM_ROUNDS = 2 # bot 1 moves east twice to eat the single food teams = [ - SimpleTeam(TestPlayer('>>')), + SimpleTeam(SteppingPlayer('>>')), SimpleTeam(StoppingPlayer()) ] gm = GameMaster(test_start, teams, 2, game_time=NUM_ROUNDS) @@ -473,7 +473,7 @@ class TestGame: teams = [ SimpleTeam(StoppingPlayer()), - SimpleTeam(TestPlayer('<<')) # bot 1 moves west twice to eat the single food + SimpleTeam(SteppingPlayer('<<')) # bot 1 moves west twice to eat the single food ] gm = GameMaster(test_start, teams, 2, game_time=NUM_ROUNDS) @@ -533,7 +533,7 @@ class TestGame: ) teams = [ SimpleTeam(StoppingPlayer()), - SimpleTeam(TestPlayer('<<<')) + SimpleTeam(SteppingPlayer('<<<')) ] # bot 1 eats all the food and the game stops gm = GameMaster(test_start, teams, 2, 100) @@ -566,7 +566,7 @@ class TestGame: ) teams = [ SimpleTeam(StoppingPlayer()), - SimpleTeam(TestPlayer('<<<')) + SimpleTeam(SteppingPlayer('<<<')) ] # bot 1 eats all the food and the game stops gm = GameMaster(test_start, teams, 2, 100) @@ -710,8 +710,8 @@ class TestGame: teams = [ - SimpleTeam(TestPlayer('>>>>')), - SimpleTeam(TestPlayer('<<<<')) + SimpleTeam(SteppingPlayer('>>>>')), + SimpleTeam(SteppingPlayer('<<<<')) ] gm = GameMaster(test_start, teams, number_bots, 4) @@ -806,8 +806,8 @@ class TestGame: # the game lasts two rounds, enough time for bot 1 to eat food NUM_ROUNDS = 5 teams = [ - SimpleTeam(TestPlayer('>--->')), - SimpleTeam(TestPlayer('<<<<<')) # bot 1 moves west twice to eat the single food + SimpleTeam(SteppingPlayer('>--->')), + SimpleTeam(SteppingPlayer('<<<<<')) # bot 1 moves west twice to eat the single food ] gm = GameMaster(test_start, teams, 2, game_time=NUM_ROUNDS) diff --git a/test/test_player_base.py b/test/test_player_base.py index 96998f8d..75fdadae 100644 --- a/test/test_player_base.py +++ b/test/test_player_base.py @@ -8,7 +8,7 @@ from pelita import datamodel from pelita.datamodel import CTFUniverse, east, stop, west from pelita.game_master import GameMaster from pelita.player import (AbstractPlayer, SimpleTeam, - RandomPlayer, StoppingPlayer, TestPlayer, + RandomPlayer, StoppingPlayer, SteppingPlayer, RoundBasedPlayer, SpeakingPlayer) @@ -29,7 +29,7 @@ class TestAbstractPlayer: ################## """) player_0 = StoppingPlayer() - player_1 = TestPlayer('^<') + player_1 = SteppingPlayer('^<') player_2 = StoppingPlayer() player_3 = StoppingPlayer() teams = [ @@ -277,8 +277,8 @@ class TestAbstractPlayer: assert set(sim_uni.enemy_food(p1._index)) == {(4, 3), (4, 2)} -class TestTestPlayer: - def test_test_players(self): +class TestSteppingPlayer: + def test_stepping_players(self): test_layout = ( """ ############ #0 . . 1# @@ -287,8 +287,8 @@ class TestTestPlayer: movements_0 = [east, east] movements_1 = [west, west] teams = [ - SimpleTeam(TestPlayer(movements_0), TestPlayer(movements_0)), - SimpleTeam(TestPlayer(movements_1), TestPlayer(movements_1)) + SimpleTeam(SteppingPlayer(movements_0), SteppingPlayer(movements_0)), + SimpleTeam(SteppingPlayer(movements_1), SteppingPlayer(movements_1)) ] gm = GameMaster(test_layout, teams, 4, 2) @@ -311,8 +311,8 @@ class TestTestPlayer: ############ """) num_rounds = 5 teams = [ - SimpleTeam(TestPlayer('>v<^-)')), - SimpleTeam(TestPlayer('<^>v-)')) + SimpleTeam(SteppingPlayer('>v<^-)')), + SimpleTeam(SteppingPlayer('<^>v-)')) ] gm = GameMaster(test_layout, teams, 2, num_rounds) player0_expected_positions = [(1,1), (2,1), (2,2), (1,2), (1,1)] @@ -334,8 +334,8 @@ class TestTestPlayer: movements_0 = [east, east] movements_1 = [west, west] teams = [ - SimpleTeam(TestPlayer(movements_0), TestPlayer(movements_0)), - SimpleTeam(TestPlayer(movements_1), TestPlayer(movements_1)) + SimpleTeam(SteppingPlayer(movements_0), SteppingPlayer(movements_0)), + SimpleTeam(SteppingPlayer(movements_1), SteppingPlayer(movements_1)) ] gm = GameMaster(test_layout, teams, 4, 3) @@ -512,19 +512,19 @@ class TestSimpleTeam: assert team0.team_name == "my team" assert len(team0._players) == 0 - team1 = SimpleTeam("my team", TestPlayer([])) + team1 = SimpleTeam("my team", SteppingPlayer([])) assert team1.team_name == "my team" assert len(team1._players) == 1 - team2 = SimpleTeam("my other team", TestPlayer([]), TestPlayer([])) + team2 = SimpleTeam("my other team", SteppingPlayer([]), SteppingPlayer([])) assert team2.team_name == "my other team" assert len(team2._players) == 2 - team3 = SimpleTeam(TestPlayer([])) + team3 = SimpleTeam(SteppingPlayer([])) assert team3.team_name == "" assert len(team3._players) == 1 - team4 = SimpleTeam(TestPlayer([]), TestPlayer([])) + team4 = SimpleTeam(SteppingPlayer([]), SteppingPlayer([])) assert team4.team_name == "" assert len(team4._players) == 2 @@ -535,7 +535,7 @@ class TestSimpleTeam: ###### """ ) dummy_universe = CTFUniverse.create(layout, 4) - team1 = SimpleTeam(TestPlayer('^')) + team1 = SimpleTeam(SteppingPlayer('^')) with pytest.raises(ValueError): team1.set_initial(0, dummy_universe, {}) diff --git a/test/test_simplesetup.py b/test/test_simplesetup.py index 1a1cb830..fafe8c43 100644 --- a/test/test_simplesetup.py +++ b/test/test_simplesetup.py @@ -5,7 +5,7 @@ import uuid import zmq import pelita -from pelita.player import AbstractPlayer, SimpleTeam, TestPlayer +from pelita.player import AbstractPlayer, SimpleTeam, SteppingPlayer from pelita.simplesetup import SimpleClient, SimpleServer, bind_socket, extract_port_range from pelita.player import RandomPlayer @@ -61,8 +61,8 @@ class TestSimpleSetup: client1_address = server.bind_addresses[0].replace("*", "localhost") client2_address = server.bind_addresses[1].replace("*", "localhost") - client1 = SimpleClient(SimpleTeam("team1", TestPlayer("^>>v<")), address=client1_address) - client2 = SimpleClient(SimpleTeam("team2", TestPlayer("^<<v>")), address=client2_address) + client1 = SimpleClient(SimpleTeam("team1", SteppingPlayer("^>>v<")), address=client1_address) + client2 = SimpleClient(SimpleTeam("team2", SteppingPlayer("^<<v>")), address=client2_address) client1.autoplay_process() client2.autoplay_process() @@ -92,7 +92,7 @@ class TestSimpleSetup: def _get_move(self, universe, game_state): pass - client1 = SimpleClient(SimpleTeam("team1", TestPlayer("^>>v<")), address=client1_address) + client1 = SimpleClient(SimpleTeam("team1", SteppingPlayer("^>>v<")), address=client1_address) client2 = SimpleClient(SimpleTeam("team2", FailingPlayer()), address=client2_address) client1.autoplay_process()
pytest warns about our TestPlayer WC1 /tmp/group1/test/test_drunk_player.py cannot collect test class 'TestPlayer' because it has a __init__ constructor Maybe rename it?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_game_master.py::TestAbstracts::test_AbstractViewer", "test/test_game_master.py::TestAbstracts::test_BrokenViewer", "test/test_game_master.py::TestGame::test_play_step", "test/test_game_master.py::TestGame::test_kill_count", "test/test_game_master.py::TestGame::test_failing_player", "test/test_game_master.py::TestGame::test_viewer_may_change_gm", "test/test_game_master.py::TestGame::test_draw_on_timeout", "test/test_game_master.py::TestGame::test_win_on_timeout_team_0", "test/test_game_master.py::TestGame::test_must_not_move_after_last_timeout", "test/test_game_master.py::TestGame::test_game", "test/test_game_master.py::TestGame::test_win_on_eating_all", "test/test_game_master.py::TestGame::test_lose_on_eating_all", "test/test_game_master.py::TestGame::test_lose_5_timeouts", "test/test_game_master.py::TestGame::test_malicous_player", "test/test_game_master.py::TestGame::test_win_on_timeout_team_1", "test/test_game_master.py::TestUniverseNoiser::test_uniform_noise_manhattan", "test/test_game_master.py::TestUniverseNoiser::test_uniform_noise_4_bots_no_noise_manhattan", "test/test_game_master.py::TestUniverseNoiser::test_noise_manhattan_failure", "test/test_game_master.py::TestUniverseNoiser::test_uniform_noise_4_bots_manhattan", "test/test_game_master.py::TestGameMaster::test_team_names", "test/test_game_master.py::TestGameMaster::test_too_many_registered_teams", "test/test_game_master.py::TestGameMaster::test_team_names_in_simpleteam", "test/test_game_master.py::TestGameMaster::test_too_few_registered_teams", "test/test_simplesetup.py::TestSimpleSetup::test_simple_remote_game", "test/test_simplesetup.py::TestSimpleSetup::test_extract_port_range", "test/test_simplesetup.py::TestSimpleSetup::test_bind_socket", "test/test_simplesetup.py::TestSimpleSetup::test_failing_bots_do_not_crash_server_in_set_initial", "test/test_simplesetup.py::TestSimpleSetup::test_simple_game", "test/test_simplesetup.py::TestSimpleSetup::test_failing_bots_do_not_crash_server", "test/test_simplesetup.py::TestSimpleSetup::test_simple_failing_bots", "test/test_player_base.py::TestAbstractPlayer::test_time_spent", "test/test_player_base.py::TestAbstractPlayer::test_rnd", "test/test_player_base.py::TestAbstractPlayer::test_simulate_move", "test/test_player_base.py::TestAbstractPlayer::test_convenience", "test/test_player_base.py::TestSimpleTeam::test_player_api_methods", "test/test_player_base.py::TestSimpleTeam::test_too_few_players", "test/test_player_base.py::TestSimpleTeam::test_init", "test/test_player_base.py::TestRoundBasedPlayer::test_round_based_players", "test/test_player_base.py::TestSpeakingPlayer::test_demo_players", "test/test_player_base.py::TestRandomPlayerSeeds::test_random_seeds", "test/test_player_base.py::TestRandomPlayerSeeds::test_demo_players", "test/test_player_base.py::TestAbstracts::test_AbstractPlayer", "test/test_player_base.py::TestAbstracts::test_BrokenPlayer", "test/test_player_base.py::TestSteppingPlayer::test_shorthand", "test/test_player_base.py::TestSteppingPlayer::test_too_many_moves", "test/test_player_base.py::TestSteppingPlayer::test_stepping_players" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2017-08-29T08:38:58Z"
bsd-2-clause
ASPP__pelita-431
diff --git a/pelita/game_master.py b/pelita/game_master.py index 42490344..e73049f5 100644 --- a/pelita/game_master.py +++ b/pelita/game_master.py @@ -4,6 +4,7 @@ import abc import random import sys import time +from warnings import warn from . import datamodel from .datamodel import Bot, CTFUniverse @@ -19,6 +20,10 @@ class PlayerTimeout(Exception): class PlayerDisconnected(Exception): pass +class NoFoodWarning(Warning): + """ Warning about a layout with no food. """ + pass + class GameMaster: """ Controller of player moves and universe updates. @@ -158,6 +163,12 @@ class GameMaster: "noise_sight_distance": self.noiser and self.noiser.sight_distance } + # Check that both teams have food, and raise a warning otherwise + for (team_id, food_count) in enumerate(self.game_state["food_to_eat"]): + if food_count == 0: + warn("Layout contains no food for team {}.".format(team_id), + NoFoodWarning) + @property def game_time(self): return self.game_state["game_time"]
ASPP/pelita
a412b067ac8ab680a3398f724ea6f97416de0ceb
diff --git a/test/test_game_master.py b/test/test_game_master.py index e943d70b..69277e31 100644 --- a/test/test_game_master.py +++ b/test/test_game_master.py @@ -4,7 +4,7 @@ import unittest import collections from pelita.datamodel import CTFUniverse -from pelita.game_master import GameMaster, ManhattanNoiser, PlayerTimeout +from pelita.game_master import GameMaster, ManhattanNoiser, PlayerTimeout, NoFoodWarning from pelita.player import AbstractPlayer, SimpleTeam, StoppingPlayer, SteppingPlayer from pelita.viewer import AbstractViewer @@ -83,6 +83,26 @@ class TestGameMaster: with pytest.raises(ValueError): GameMaster(test_layout_4, [team_1, team_2, team_3], 4, 200) + def test_no_food(self): + team_1 = SimpleTeam(SteppingPlayer([]), SteppingPlayer([])) + team_2 = SimpleTeam(SteppingPlayer([]), SteppingPlayer([])) + + both_starving_layout = ( + """ ###### + #0 # + # 1# + ###### """) + with pytest.warns(NoFoodWarning): + GameMaster(both_starving_layout, [team_1, team_2], 2, 1) + + one_side_starving_layout = ( + """ ###### + #0 .# + # 1# + ###### """) + with pytest.warns(NoFoodWarning): + GameMaster(one_side_starving_layout, [team_1, team_2], 2, 1) + class TestUniverseNoiser: def test_uniform_noise_manhattan(self): test_layout = ( @@ -106,7 +126,7 @@ class TestUniverseNoiser: (4, 3), (5, 3), (6, 3), (7, 3), (7, 2), (6, 1), (5, 1), (4, 1), (3, 1) ] unittest.TestCase().assertCountEqual(position_bucket, expected, position_bucket) - + def test_uniform_noise_4_bots_manhattan(self): test_layout = (
Give a warning, when a layout without food is defined?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_game_master.py::TestGame::test_lose_on_eating_all", "test/test_game_master.py::TestGame::test_lose_5_timeouts", "test/test_game_master.py::TestGame::test_draw_on_timeout", "test/test_game_master.py::TestGame::test_win_on_timeout_team_1", "test/test_game_master.py::TestGame::test_win_on_eating_all", "test/test_game_master.py::TestGame::test_must_not_move_after_last_timeout", "test/test_game_master.py::TestGame::test_viewer_may_change_gm", "test/test_game_master.py::TestGame::test_failing_player", "test/test_game_master.py::TestGame::test_play_step", "test/test_game_master.py::TestGame::test_malicous_player", "test/test_game_master.py::TestGame::test_kill_count", "test/test_game_master.py::TestGame::test_game", "test/test_game_master.py::TestGame::test_win_on_timeout_team_0", "test/test_game_master.py::TestUniverseNoiser::test_uniform_noise_manhattan", "test/test_game_master.py::TestUniverseNoiser::test_noise_manhattan_failure", "test/test_game_master.py::TestUniverseNoiser::test_uniform_noise_4_bots_no_noise_manhattan", "test/test_game_master.py::TestUniverseNoiser::test_uniform_noise_4_bots_manhattan", "test/test_game_master.py::TestAbstracts::test_AbstractViewer", "test/test_game_master.py::TestAbstracts::test_BrokenViewer", "test/test_game_master.py::TestGameMaster::test_too_many_registered_teams", "test/test_game_master.py::TestGameMaster::test_team_names_in_simpleteam", "test/test_game_master.py::TestGameMaster::test_no_food", "test/test_game_master.py::TestGameMaster::test_team_names", "test/test_game_master.py::TestGameMaster::test_too_few_registered_teams" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2017-10-26T20:25:33Z"
bsd-2-clause
ASPP__pelita-601
diff --git a/pelita/game.py b/pelita/game.py index b42b52ae..1ecf0a00 100644 --- a/pelita/game.py +++ b/pelita/game.py @@ -13,7 +13,7 @@ from warnings import warn from . import layout from .exceptions import FatalException, NonFatalException, NoFoodWarning from .gamestate_filters import noiser -from .layout import initial_positions, get_legal_moves +from .layout import initial_positions, get_legal_positions from .libpelita import get_python_process, SimplePublisher from .network import bind_socket, setup_controller from .player.team import make_team @@ -316,7 +316,7 @@ def setup_game(team_specs, *, layout_dict, max_rounds=300, layout_name="", seed= layout_name=None, team_names=[None] * 2, fatal_errors=[[], []], - errors=[[], []], + errors=[{}, {}], whowins=None, rnd=Random(seed), viewers=[], @@ -519,6 +519,7 @@ def play_turn(game_state): game_state.update(update_round_counter(game_state)) turn = game_state['turn'] + round = game_state['round'] team = turn % 2 # request a new move from the current team try: @@ -552,11 +553,9 @@ def play_turn(game_state): # are collected and added to team_errors exception_event = { 'type': e.__class__.__name__, - 'description': str(e), - 'turn': game_state['turn'], - 'round': game_state['round'], + 'description': str(e) } - game_state['errors'][team].append(exception_event) + game_state['errors'][team][(round, turn)] = exception_event position = None game_print(turn, f"{type(e).__name__}: {e}") @@ -624,25 +623,34 @@ def apply_move(gamestate, bot_position): # previous errors team_errors = gamestate["errors"][team] - # check is step is legal - legal_moves = get_legal_moves(walls, gamestate["bots"][gamestate["turn"]]) - if bot_position not in legal_moves: - bad_bot_position = bot_position - bot_position = legal_moves[gamestate['rnd'].randint(0, len(legal_moves)-1)] - error_dict = { - "turn": turn, - "round": n_round, - "reason": 'illegal move', - "bot_position": bot_position - } - game_print(turn, f"Illegal move {bad_bot_position} not in {sorted(legal_moves)}. Choosing a random move instead: {bot_position}") - team_errors.append(error_dict) + + # the allowed moves for the current bot + legal_positions = get_legal_positions(walls, gamestate["bots"][gamestate["turn"]]) + + # unless we have already made an error, check if we made a legal move + if not (n_round, turn) in team_errors: + if bot_position not in legal_positions: + error_dict = { + "reason": 'illegal move', + "bot_position": bot_position + } + # add the error to the team’s errors + game_print(turn, f"Illegal position. {bot_position} not in legal positions: {sorted(legal_positions)}.") + team_errors[(n_round, turn)] = error_dict # only execute move if errors not exceeded gamestate.update(check_gameover(gamestate)) if gamestate['gameover']: return gamestate + # Now check if we must make a random move + if (n_round, turn) in team_errors: + # There was an error for this round and turn + # but the game is not over. + # We execute a random move + bot_position = gamestate['rnd'].choice(legal_positions) + game_print(turn, f"Setting a legal position at random: {bot_position}") + # take step bots[turn] = bot_position _logger.info(f"Bot {turn} moves to {bot_position}.") diff --git a/pelita/layout.py b/pelita/layout.py index 14c96011..1ad10138 100644 --- a/pelita/layout.py +++ b/pelita/layout.py @@ -430,8 +430,9 @@ def initial_positions(walls): return [left[0], right[0], left[1], right[1]] -def get_legal_moves(walls, bot_position): - """ Returns legal moves given a position. +def get_legal_positions(walls, bot_position): + """ Returns all legal positions that a bot at `bot_position` + can go to. Parameters ---------- @@ -443,12 +444,12 @@ def get_legal_moves(walls, bot_position): Returns ------- list - legal moves. + legal positions Raises ------ ValueError - if position invalid or on wall + if bot_position invalid or on wall """ width, height = wall_dimensions(walls) if not (0, 0) <= bot_position < (width, height):
ASPP/pelita
a670c668257a371587a2abfe83acf12043d16aa1
diff --git a/test/test_game.py b/test/test_game.py index bc19a73f..1476a615 100644 --- a/test/test_game.py +++ b/test/test_game.py @@ -12,7 +12,7 @@ from textwrap import dedent import numpy as np from pelita import game, layout -from pelita.game import initial_positions, get_legal_moves, apply_move, run_game, setup_game, play_turn +from pelita.game import initial_positions, get_legal_positions, apply_move, run_game, setup_game, play_turn from pelita.player import stepping_player @@ -154,23 +154,23 @@ def test_initial_positions_same_in_layout(layout_name): out = initial_positions(walls) assert out == exp -def test_get_legal_moves_basic(): +def test_get_legal_positions_basic(): """Check that the output of legal moves contains all legal moves for one example layout""" l = layout.get_layout_by_name(layout_name="layout_small_without_dead_ends_100") parsed_l = layout.parse_layout(l) - legal_moves = get_legal_moves(parsed_l["walls"], parsed_l["bots"][0]) + legal_positions = get_legal_positions(parsed_l["walls"], parsed_l["bots"][0]) exp = [(2, 5), (1, 6), (1, 5)] - assert legal_moves == exp + assert legal_positions == exp @pytest.mark.parametrize('layout_t', [layout.get_random_layout() for _ in range(50)]) @pytest.mark.parametrize('bot_idx', (0, 1, 2, 3)) -def test_get_legal_moves_random(layout_t, bot_idx): +def test_get_legal_positions_random(layout_t, bot_idx): """Check that the output of legal moves returns only moves that are 1 field away and not inside a wall""" layout_name, layout_string = layout_t # get_random_layout returns a tuple of name and string parsed_l = layout.parse_layout(layout_string) bot = parsed_l["bots"][bot_idx] - legal_moves = get_legal_moves(parsed_l["walls"], bot) - for move in legal_moves: + legal_positions = get_legal_positions(parsed_l["walls"], bot) + for move in legal_positions: assert move not in parsed_l["walls"] assert abs((move[0] - bot[0])+(move[1] - bot[1])) <= 1 @@ -180,21 +180,23 @@ def test_play_turn_apply_error(turn): """check that quits when there are too many errors""" game_state = setup_random_basic_gamestate() error_dict = { - "turn": 0, - "round": 0, "reason": 'illegal move', "bot_position": (1, 2) } game_state["turn"] = turn team = turn % 2 - game_state["errors"] = [[error_dict, error_dict, error_dict, error_dict], - [error_dict, error_dict, error_dict, error_dict]] - illegal_move = game_state["walls"][0] - game_state_new = apply_move(game_state, illegal_move) + game_state["errors"] = [{(r, t): error_dict for r in (1, 2) for t in (0, 1)}, + {(r, t): error_dict for r in (1, 2) for t in (0, 1)}] + # we pretend that two rounds have already been played + # so that the error dictionaries are sane + game_state["round"] = 3 + + illegal_position = game_state["walls"][0] + game_state_new = apply_move(game_state, illegal_position) assert game_state_new["gameover"] assert len(game_state_new["errors"][team]) == 5 assert game_state_new["whowins"] == int(not team) - assert set(game_state_new["errors"][team][4].keys()) == set(["turn", "round", "reason", "bot_position"]) + assert set(game_state_new["errors"][team][(3, turn)].keys()) == set(["reason", "bot_position"]) @pytest.mark.parametrize('turn', (0, 1, 2, 3)) def test_play_turn_fatal(turn): @@ -205,22 +207,22 @@ def test_play_turn_fatal(turn): fatal_list = [{}, {}] fatal_list[team] = {"error":True} game_state["fatal_errors"] = fatal_list - move = get_legal_moves(game_state["walls"], game_state["bots"][turn]) + move = get_legal_positions(game_state["walls"], game_state["bots"][turn]) game_state_new = apply_move(game_state, move[0]) assert game_state_new["gameover"] assert game_state_new["whowins"] == int(not team) @pytest.mark.parametrize('turn', (0, 1, 2, 3)) -def test_play_turn_illegal_move(turn): +def test_play_turn_illegal_position(turn): """check that illegal moves are added to error dict and bot still takes move""" game_state = setup_random_basic_gamestate() game_state["turn"] = turn team = turn % 2 - illegal_move = game_state["walls"][0] - game_state_new = apply_move(game_state, illegal_move) + illegal_position = game_state["walls"][0] + game_state_new = apply_move(game_state, illegal_position) assert len(game_state_new["errors"][team]) == 1 - assert set(game_state_new["errors"][team][0].keys()) == set(["turn", "round", "reason", "bot_position"]) - assert game_state_new["bots"][turn] in get_legal_moves(game_state["walls"], game_state["bots"][turn]) + assert game_state_new["errors"][team][(1, turn)].keys() == set(["reason", "bot_position"]) + assert game_state_new["bots"][turn] in get_legal_positions(game_state["walls"], game_state["bots"][turn]) @pytest.mark.parametrize('turn', (0, 1, 2, 3)) @pytest.mark.parametrize('which_food', (0, 1)) @@ -857,9 +859,9 @@ def test_play_turn_move(): "fatal_errors": [{}, {}], "rnd": random.Random() } - legal_moves = get_legal_moves(game_state["walls"], game_state["bots"][turn]) - game_state_new = apply_move(game_state, legal_moves[0]) - assert game_state_new["bots"][turn] == legal_moves[0] + legal_positions = get_legal_positions(game_state["walls"], game_state["bots"][turn]) + game_state_new = apply_move(game_state, legal_positions[0]) + assert game_state_new["bots"][turn] == legal_positions[0] diff --git a/test/test_game_master.py b/test/test_game_master.py index d8340847..619ec625 100644 --- a/test/test_game_master.py +++ b/test/test_game_master.py @@ -98,7 +98,7 @@ class TestGameMaster: assert state['bots'] == bot_pos state = run_game([stopping_player] * 2, layout_dict=parsed, max_rounds=5) assert state['fatal_errors'] == [[], []] - assert state['errors'] == [[], []] + assert state['errors'] == [{}, {}] else: with pytest.raises(ValueError): setup_game([stopping_player] * 2, layout_dict=parsed, max_rounds=300) diff --git a/test/test_layout.py b/test/test_layout.py index f2be16ee..dfca69dd 100644 --- a/test/test_layout.py +++ b/test/test_layout.py @@ -257,13 +257,13 @@ def test_equal_positions(): assert layout['bots'] == [(1, 1)]*4 -@pytest.mark.parametrize('pos, legal_moves', [ +@pytest.mark.parametrize('pos, legal_positions', [ ((2, 2), {(2, 1), (2, 3), (1, 2), (3, 2), (2, 2)}), ((1, 1), {(1, 2), (2, 1), (1, 1)}), ((4, 2), {(4, 2), (4, 1), (4, 3), (3, 2)}), ((4, 1), {(4, 2), (4, 1)}) ]) -def test_legal_moves(pos, legal_moves): +def test_legal_positions(pos, legal_positions): test_layout = ( """ ###### # # # @@ -271,7 +271,7 @@ def test_legal_moves(pos, legal_moves): # # ###### """) parsed = parse_layout(test_layout) - assert set(get_legal_moves(parsed['walls'], pos)) == legal_moves + assert set(get_legal_positions(parsed['walls'], pos)) == legal_positions @pytest.mark.parametrize('pos', [ @@ -281,7 +281,7 @@ def test_legal_moves(pos, legal_moves): (7, 7), (3, 1) ]) -def test_legal_moves_fail(pos): +def test_legal_positions_fail(pos): test_layout = ( """ ###### # # # @@ -290,4 +290,4 @@ def test_legal_moves_fail(pos): ###### """) parsed = parse_layout(test_layout) with pytest.raises(ValueError): - get_legal_moves(parsed['walls'], pos) + get_legal_positions(parsed['walls'], pos) diff --git a/test/test_players.py b/test/test_players.py index 2f5bc47b..047ce4d7 100644 --- a/test/test_players.py +++ b/test/test_players.py @@ -61,5 +61,5 @@ def test_players(player): # ensure that all test players ran correctly assert state['fatal_errors'] == [[], []] # our test players should never return invalid moves - assert state['errors'] == [[], []] + assert state['errors'] == [{}, {}] diff --git a/test/test_remote_game.py b/test/test_remote_game.py index 22d95c85..2951864e 100644 --- a/test/test_remote_game.py +++ b/test/test_remote_game.py @@ -30,7 +30,7 @@ def test_remote_call_pelita(remote_teams): res, stdout, stderr = libpelita.call_pelita(remote_teams, rounds=30, filter='small', viewer='null', dump=None, seed=None) assert res['whowins'] == 1 assert res['fatal_errors'] == [[], []] - assert res['errors'] == [[], []] + assert res['errors'] == [{}, {}] def test_remote_run_game(remote_teams): @@ -44,4 +44,4 @@ def test_remote_run_game(remote_teams): state = pelita.game.run_game(remote_teams, max_rounds=30, layout_dict=pelita.layout.parse_layout(layout)) assert state['whowins'] == 1 assert state['fatal_errors'] == [[], []] - assert state['errors'] == [[], []] + assert state['errors'] == [{}, {}]
[discussion] Is it possible to have more than one error per (round, turn)? If not, then I would suggest to change the error list to a dict with key (round, turn) as it would make checking for errors in the current round simpler. (And an error in the current round would always give you a random move.) Right now, we have a bug that a timeout gives two errors. I would like to make it a rule that there can be at most one error per turn.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_remote_game.py::test_remote_run_game", "test/test_remote_game.py::test_remote_call_pelita", "test/test_game_master.py::TestGameMaster::test_setup_game_with_wrong_bots_in_layout[\\n", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots1]", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots9]", "test/test_game_master.py::TestGameMaster::test_setup_game_with_too_few_bots_in_layout[\\n", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots2]", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots6]", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots3]", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots7]", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots0]", "test/test_game_master.py::TestGameMaster::test_too_many_registered_teams", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots4]", "test/test_game_master.py::TestGameMaster::test_team_names", "test/test_game_master.py::TestGameMaster::test_no_food[\\n", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots5]", "test/test_game_master.py::TestGameMaster::test_too_few_registered_teams", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots10]", "test/test_game_master.py::TestGameMaster::test_setup_game_with_different_number_of_bots[bots8]", "test/test_players.py::test_players[smart_random_player]", "test/test_players.py::test_players[nq_random_player]", "test/test_players.py::test_players[smart_eating_player]", "test/test_players.py::test_players[random_explorer_player]", "test/test_players.py::test_players[food_eating_player]", "test/test_players.py::test_players[random_player]", "test/test_players.py::TestNQRandom_Player::test_path", "test/test_players.py::TestNQRandom_Player::test_demo_players", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_004]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_013]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_077]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_071]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_029]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t18]", "test/test_game.py::test_get_legal_positions_random[2-layout_t28]", "test/test_game.py::test_get_legal_positions_random[2-layout_t3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_021]", "test/test_game.py::test_get_legal_positions_random[1-layout_t9]", "test/test_game.py::test_get_legal_positions_random[1-layout_t16]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_089]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags14-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_063]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_100]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_051]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_096]", "test/test_game.py::test_get_legal_positions_random[0-layout_t46]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_031]", "test/test_game.py::test_get_legal_positions_random[2-layout_t31]", "test/test_game.py::test_get_legal_positions_random[1-layout_t18]", "test/test_game.py::test_get_legal_positions_random[2-layout_t46]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags7-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_055]", "test/test_game.py::test_error_finishes_game[team_errors7-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_095]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_046]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_028]", "test/test_game.py::test_get_legal_positions_random[2-layout_t38]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_065]", "test/test_game.py::test_play_turn_illegal_position[3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_024]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_055]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_017]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_071]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_015]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_099]", "test/test_game.py::test_get_legal_positions_random[2-layout_t19]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_026]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_057]", "test/test_game.py::test_play_turn_illegal_position[1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_100]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_045]", "test/test_game.py::test_initial_positions[\\n", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_045]", "test/test_game.py::test_play_turn_eating_enemy_food[1-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_035]", "test/test_game.py::test_get_legal_positions_random[0-layout_t8]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t27]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags5-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_096]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_083]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_087]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_038]", "test/test_game.py::test_error_finishes_game[team_errors6-False]", "test/test_game.py::test_get_legal_positions_random[3-layout_t30]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_074]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_098]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t22]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_021]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_014]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_047]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_054]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_047]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_036]", "test/test_game.py::test_get_legal_positions_random[0-layout_t0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_033]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_063]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_034]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_088]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_094]", "test/test_game.py::test_get_legal_positions_random[3-layout_t49]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_092]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_041]", "test/test_game.py::test_get_legal_positions_random[0-layout_t13]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_057]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_048]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_064]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_050]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_007]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags7-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_064]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_051]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_089]", "test/test_game.py::test_get_legal_positions_random[0-layout_t19]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_026]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_053]", "test/test_game.py::test_cascade_kill_2", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags12-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_059]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_016]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_017]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_093]", "test/test_game.py::test_cascade_suicide", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_037]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_067]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_040]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_081]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_045]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_083]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_037]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_001]", "test/test_game.py::test_get_legal_positions_random[3-layout_t7]", "test/test_game.py::test_get_legal_positions_random[2-layout_t37]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_051]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_097]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_040]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_009]", "test/test_game.py::test_error_finishes_game[team_errors8-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_003]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_064]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_012]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_072]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_005]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_035]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags5-1]", "test/test_game.py::test_get_legal_positions_random[0-layout_t16]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_006]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_079]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_085]", "test/test_game.py::test_error_finishes_game[team_errors0-False]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t12]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_090]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_080]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_081]", "test/test_game.py::test_get_legal_positions_random[2-layout_t2]", "test/test_game.py::test_no_initial_positions_possible[\\n", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags9-3]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t24]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_020]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_052]", "test/test_game.py::test_get_legal_positions_random[0-layout_t10]", "test/test_game.py::test_play_turn_apply_error[0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_084]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_027]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_006]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_089]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags12-3]", "test/test_game.py::test_finished_when_no_food[2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_037]", "test/test_game.py::test_get_legal_positions_random[1-layout_t42]", "test/test_game.py::test_get_legal_positions_random[1-layout_t29]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_044]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_063]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags7-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_080]", "test/test_game.py::test_bot_does_not_eat_own_food", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_048]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t21]", "test/test_game.py::test_cascade_kill", "test/test_game.py::test_get_legal_positions_random[3-layout_t46]", "test/test_game.py::test_remote_errors", "test/test_game.py::test_cascade_kill_rescue_1", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_076]", "test/test_game.py::test_get_legal_positions_random[1-layout_t43]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_058]", "test/test_game.py::test_get_legal_positions_random[3-layout_t18]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_087]", "test/test_game.py::test_get_legal_positions_random[3-layout_t9]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags1-3]", "test/test_game.py::test_get_legal_positions_random[0-layout_t39]", "test/test_game.py::test_get_legal_positions_random[2-layout_t4]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_084]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_009]", "test/test_game.py::test_play_turn_fatal[3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_005]", "test/test_game.py::test_play_turn_fatal[1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_001]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags13-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_074]", "test/test_game.py::test_get_legal_positions_random[3-layout_t4]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_044]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_095]", "test/test_game.py::test_get_legal_positions_random[1-layout_t46]", "test/test_game.py::test_get_legal_positions_random[3-layout_t29]", "test/test_game.py::test_play_turn_eating_enemy_food[1-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_050]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_010]", "test/test_game.py::test_max_rounds", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_070]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_098]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_022]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_039]", "test/test_game.py::test_get_legal_positions_random[2-layout_t9]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_088]", "test/test_game.py::test_get_legal_positions_random[0-layout_t18]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t16]", "test/test_game.py::test_get_legal_positions_random[2-layout_t34]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_038]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_093]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_086]", "test/test_game.py::test_get_legal_positions_random[3-layout_t44]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_050]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_084]", "test/test_game.py::test_get_legal_positions_random[2-layout_t1]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags1-1]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags10-2]", "test/test_game.py::test_get_legal_positions_random[1-layout_t17]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_001]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_033]", "test/test_game.py::test_get_legal_positions_random[0-layout_t15]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_069]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_086]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_015]", "test/test_game.py::test_get_legal_positions_random[3-layout_t35]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_066]", "test/test_game.py::test_get_legal_positions_random[1-layout_t47]", "test/test_game.py::test_get_legal_positions_random[2-layout_t26]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_018]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_065]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_032]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_019]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_070]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags15-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_054]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_100]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags3-3]", "test/test_game.py::test_get_legal_positions_random[0-layout_t37]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_006]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_017]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_018]", "test/test_game.py::test_get_legal_positions_random[1-layout_t14]", "test/test_game.py::test_get_legal_positions_random[1-layout_t28]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_078]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_079]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_017]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_034]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_065]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_006]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_055]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_068]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_047]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_088]", "test/test_game.py::test_get_legal_positions_random[0-layout_t20]", "test/test_game.py::test_get_legal_positions_random[1-layout_t20]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_044]", "test/test_game.py::test_get_legal_positions_random[0-layout_t36]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_075]", "test/test_game.py::test_minimal_remote_game", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_049]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_070]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_073]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_065]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_027]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_075]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_039]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_039]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_061]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags9-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_090]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_007]", "test/test_game.py::test_get_legal_positions_random[1-layout_t45]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_095]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_023]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_093]", "test/test_game.py::test_get_legal_positions_random[0-layout_t24]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_088]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t28]", "test/test_game.py::test_get_legal_positions_random[2-layout_t18]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_063]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t4]", "test/test_game.py::test_get_legal_positions_random[1-layout_t31]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_094]", "test/test_game.py::test_get_legal_positions_random[0-layout_t28]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_020]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_065]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t5]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_047]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t14]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_037]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_092]", "test/test_game.py::test_get_legal_positions_random[2-layout_t36]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_077]", "test/test_game.py::test_play_turn_killing[0]", "test/test_game.py::test_bad_move_function[1]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t1]", "test/test_game.py::test_multiple_enemies_killing", "test/test_game.py::test_play_turn_move", "test/test_game.py::test_play_turn_maxrounds[score1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_055]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_030]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_081]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t17]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags3-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_012]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_075]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_043]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_026]", "test/test_game.py::test_get_legal_positions_random[0-layout_t29]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_044]", "test/test_game.py::test_minimal_game", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_062]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_060]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t8]", "test/test_game.py::test_get_legal_positions_random[0-layout_t40]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_002]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_069]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_017]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_083]", "test/test_game.py::test_get_legal_positions_random[2-layout_t45]", "test/test_game.py::test_get_legal_positions_random[3-layout_t16]", "test/test_game.py::test_get_legal_positions_random[3-layout_t19]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_032]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_040]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_068]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_014]", "test/test_game.py::test_get_legal_positions_random[1-layout_t23]", "test/test_game.py::test_get_legal_positions_random[3-layout_t34]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_080]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_029]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_076]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_025]", "test/test_game.py::test_error_finishes_game[team_errors13-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_049]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_072]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_051]", "test/test_game.py::test_get_legal_positions_random[3-layout_t11]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_090]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags8-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_090]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_024]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_060]", "test/test_game.py::test_get_legal_positions_random[0-layout_t1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_007]", "test/test_game.py::test_get_legal_positions_random[3-layout_t1]", "test/test_game.py::test_get_legal_positions_random[0-layout_t48]", "test/test_game.py::test_get_legal_positions_random[0-layout_t11]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_005]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t25]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_008]", "test/test_game.py::test_get_legal_positions_random[3-layout_t13]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_031]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_067]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_029]", "test/test_game.py::test_get_legal_positions_random[1-layout_t5]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_024]", "test/test_game.py::test_get_legal_positions_random[0-layout_t49]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_067]", "test/test_game.py::test_cascade_kill_rescue_2", "test/test_game.py::test_get_legal_positions_random[1-layout_t38]", "test/test_game.py::test_get_legal_positions_random[0-layout_t14]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_052]", "test/test_game.py::test_get_legal_positions_random[0-layout_t21]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_052]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_085]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_030]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags2-3]", "test/test_game.py::test_get_legal_positions_random[0-layout_t3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_008]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags5-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_026]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_056]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_093]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_012]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_058]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_078]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_059]", "test/test_game.py::test_get_legal_positions_random[3-layout_t6]", "test/test_game.py::test_get_legal_positions_random[1-layout_t19]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_028]", "test/test_game.py::test_get_legal_positions_random[0-layout_t25]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_096]", "test/test_game.py::test_bad_move_function[0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_056]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_090]", "test/test_game.py::test_get_legal_positions_random[1-layout_t22]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_036]", "test/test_game.py::test_play_turn_eating_enemy_food[0-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_036]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_064]", "test/test_game.py::test_get_legal_positions_random[1-layout_t8]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_070]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_061]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags11-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_033]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t29]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_033]", "test/test_game.py::test_get_legal_positions_random[0-layout_t47]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_069]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t6]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_028]", "test/test_game.py::test_get_legal_positions_random[1-layout_t33]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_056]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_016]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_003]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_030]", "test/test_game.py::test_get_legal_positions_basic", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_027]", "test/test_game.py::test_finished_when_no_food[0]", "test/test_game.py::test_error_finishes_game[team_errors3-False]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_079]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_002]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t23]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_085]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_078]", "test/test_game.py::test_get_legal_positions_random[3-layout_t20]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_091]", "test/test_game.py::test_play_turn_eating_enemy_food[0-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_053]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_036]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_047]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_016]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_045]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_065]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_011]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags14-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_096]", "test/test_game.py::test_get_legal_positions_random[0-layout_t5]", "test/test_game.py::test_get_legal_positions_random[2-layout_t24]", "test/test_game.py::test_get_legal_positions_random[0-layout_t26]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_032]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_054]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags6-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_099]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_024]", "test/test_game.py::test_play_turn_friendly_fire[setups0]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags0-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_003]", "test/test_game.py::test_get_legal_positions_random[3-layout_t15]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t20]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_099]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags11-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_096]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_053]", "test/test_game.py::test_get_legal_positions_random[1-layout_t3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_072]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_086]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_035]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_025]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_057]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_021]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_022]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_089]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_040]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_016]", "test/test_game.py::test_play_turn_illegal_position[2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_097]", "test/test_game.py::test_get_legal_positions_random[1-layout_t35]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_012]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_066]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_050]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_100]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_048]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_062]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_061]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_057]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_061]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags4-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_006]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_014]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_072]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_021]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_098]", "test/test_game.py::test_get_legal_positions_random[1-layout_t37]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_098]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags10-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_068]", "test/test_game.py::test_play_turn_fatal[2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_094]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_059]", "test/test_game.py::test_get_legal_positions_random[3-layout_t36]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_027]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_042]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_037]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_082]", "test/test_game.py::test_play_turn_killing[1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_054]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_002]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_017]", "test/test_game.py::test_get_legal_positions_random[3-layout_t32]", "test/test_game.py::test_get_legal_positions_random[2-layout_t11]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_030]", "test/test_game.py::test_error_finishes_game[team_errors2-False]", "test/test_game.py::test_get_legal_positions_random[1-layout_t26]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_011]", "test/test_game.py::test_get_legal_positions_random[2-layout_t29]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_079]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_099]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_045]", "test/test_game.py::test_get_legal_positions_random[1-layout_t40]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_050]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_092]", "test/test_game.py::test_get_legal_positions_random[2-layout_t44]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_100]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_031]", "test/test_game.py::test_get_legal_positions_random[3-layout_t14]", "test/test_game.py::test_get_legal_positions_random[2-layout_t16]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_029]", "test/test_game.py::test_play_turn_apply_error[1]", "test/test_game.py::test_get_legal_positions_random[0-layout_t38]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_035]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags10-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_061]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags1-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_052]", "test/test_game.py::test_get_legal_positions_random[2-layout_t49]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_016]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_010]", "test/test_game.py::test_get_legal_positions_random[1-layout_t48]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_005]", "test/test_game.py::test_get_legal_positions_random[2-layout_t23]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_086]", "test/test_game.py::test_get_legal_positions_random[0-layout_t12]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_046]", "test/test_game.py::test_get_legal_positions_random[0-layout_t35]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_089]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_049]", "test/test_game.py::test_get_legal_positions_random[1-layout_t10]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_034]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_018]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_034]", "test/test_game.py::test_get_legal_positions_random[0-layout_t33]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_014]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_069]", "test/test_game.py::test_update_round_counter", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_046]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_094]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_023]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_032]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_079]", "test/test_game.py::test_get_legal_positions_random[3-layout_t39]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_045]", "test/test_game.py::test_get_legal_positions_random[0-layout_t27]", "test/test_game.py::test_get_legal_positions_random[1-layout_t24]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_082]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_059]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_009]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_031]", "test/test_game.py::test_get_legal_positions_random[1-layout_t27]", "test/test_game.py::test_get_legal_positions_random[3-layout_t10]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_070]", "test/test_game.py::test_get_legal_positions_random[1-layout_t36]", "test/test_game.py::test_get_legal_positions_random[1-layout_t41]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_067]", "test/test_game.py::test_get_legal_positions_random[3-layout_t38]", "test/test_game.py::test_get_legal_positions_random[0-layout_t7]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_013]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_055]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_072]", "test/test_game.py::test_get_legal_positions_random[1-layout_t21]", "test/test_game.py::test_play_turn_eating_enemy_food[0-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_097]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_042]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_038]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags12-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_057]", "test/test_game.py::test_get_legal_positions_random[0-layout_t4]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags10-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_041]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_067]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_063]", "test/test_game.py::test_get_legal_positions_random[3-layout_t27]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags0-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_083]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_041]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_046]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_097]", "test/test_game.py::test_get_legal_positions_random[2-layout_t22]", "test/test_game.py::test_get_legal_positions_random[3-layout_t43]", "test/test_game.py::test_play_turn_friendly_fire[setups3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_005]", "test/test_game.py::test_get_legal_positions_random[0-layout_t30]", "test/test_game.py::test_error_finishes_game[team_errors1-False]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_064]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_077]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_050]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_082]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_010]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_089]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_019]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_054]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_011]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_070]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_043]", "test/test_game.py::test_finished_when_no_food[3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_097]", "test/test_game.py::test_play_turn_eating_enemy_food[1-3]", "test/test_game.py::test_error_finishes_game[team_errors14-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_056]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags15-1]", "test/test_game.py::test_get_legal_positions_random[1-layout_t0]", "test/test_game.py::test_get_legal_positions_random[2-layout_t40]", "test/test_game.py::test_get_legal_positions_random[0-layout_t17]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_048]", "test/test_game.py::test_get_legal_positions_random[3-layout_t3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_073]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_002]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_022]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_097]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags8-2]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags2-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_076]", "test/test_game.py::test_get_legal_positions_random[0-layout_t22]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_021]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_076]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_014]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_075]", "test/test_game.py::test_get_legal_positions_random[1-layout_t44]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_037]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_049]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t26]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_042]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_086]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_043]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_073]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_046]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_088]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_081]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags2-2]", "test/test_game.py::test_get_legal_positions_random[3-layout_t12]", "test/test_game.py::test_get_legal_positions_random[3-layout_t23]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_100]", "test/test_game.py::test_get_legal_positions_random[3-layout_t24]", "test/test_game.py::test_get_legal_positions_random[0-layout_t41]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_068]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_076]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_043]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_031]", "test/test_game.py::test_get_legal_positions_random[2-layout_t12]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags15-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_080]", "test/test_game.py::test_get_legal_positions_random[0-layout_t31]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_008]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_039]", "test/test_game.py::test_initial_positions_basic", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_015]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_024]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags13-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_036]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_042]", "test/test_game.py::test_get_legal_positions_random[2-layout_t17]", "test/test_game.py::test_get_legal_positions_random[0-layout_t6]", "test/test_game.py::test_get_legal_positions_random[3-layout_t0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_023]", "test/test_game.py::test_get_legal_positions_random[2-layout_t10]", "test/test_game.py::test_get_legal_positions_random[2-layout_t0]", "test/test_game.py::test_error_finishes_game[team_errors11-0]", "test/test_game.py::test_suicide_win", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_025]", "test/test_game.py::test_get_legal_positions_random[1-layout_t11]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_013]", "test/test_game.py::test_get_legal_positions_random[3-layout_t31]", "test/test_game.py::test_get_legal_positions_random[3-layout_t5]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_084]", "test/test_game.py::test_get_legal_positions_random[3-layout_t40]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_095]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags5-3]", "test/test_game.py::test_get_legal_positions_random[1-layout_t12]", "test/test_game.py::test_error_finishes_game[team_errors15-0]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t7]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_098]", "test/test_game.py::test_last_round_check", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_039]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_015]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_082]", "test/test_game.py::test_get_legal_positions_random[2-layout_t43]", "test/test_game.py::test_error_finishes_game[team_errors9-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_042]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags11-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_082]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_008]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_090]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_022]", "test/test_game.py::test_get_legal_positions_random[2-layout_t6]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_057]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_028]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_077]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_087]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags15-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_009]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_035]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_068]", "test/test_game.py::test_play_turn_friendly_fire[setups1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_066]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_056]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_032]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_041]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_059]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_062]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_024]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_060]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags13-3]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags8-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_004]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_062]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_025]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_010]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_071]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_094]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_087]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_028]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_058]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_048]", "test/test_game.py::test_get_legal_positions_random[2-layout_t8]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_053]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags9-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_069]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_096]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_036]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t15]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_053]", "test/test_game.py::test_get_legal_positions_random[2-layout_t35]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_087]", "test/test_game.py::test_get_legal_positions_random[1-layout_t34]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_019]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_007]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_009]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_022]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_056]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_060]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_078]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_073]", "test/test_game.py::test_finished_when_no_food[1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_055]", "test/test_game.py::test_get_legal_positions_random[2-layout_t42]", "test/test_game.py::test_get_legal_positions_random[2-layout_t25]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_083]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_061]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_004]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags6-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_011]", "test/test_game.py::test_get_legal_positions_random[3-layout_t47]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_020]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_015]", "test/test_game.py::test_get_legal_positions_random[1-layout_t30]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_013]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_074]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_002]", "test/test_game.py::test_get_legal_positions_random[3-layout_t8]", "test/test_game.py::test_play_turn_illegal_position[0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_026]", "test/test_game.py::test_double_suicide", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_004]", "test/test_game.py::test_get_legal_positions_random[0-layout_t43]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_054]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_075]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_064]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_062]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_001]", "test/test_game.py::test_minimal_losing_game_has_one_error", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_026]", "test/test_game.py::test_get_legal_positions_random[3-layout_t37]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_071]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_079]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t19]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_040]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags4-1]", "test/test_game.py::test_get_legal_positions_random[2-layout_t48]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_010]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_030]", "test/test_game.py::test_get_legal_positions_random[3-layout_t45]", "test/test_game.py::test_setup_game_run_game_have_same_args", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_092]", "test/test_game.py::test_get_legal_positions_random[2-layout_t47]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags11-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_076]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t13]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_020]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_019]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_084]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_018]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_004]", "test/test_game.py::test_get_legal_positions_random[0-layout_t23]", "test/test_game.py::test_get_legal_positions_random[1-layout_t13]", "test/test_game.py::test_get_legal_positions_random[2-layout_t20]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_051]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_049]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_015]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_044]", "test/test_game.py::test_play_turn_friendly_fire[setups2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_095]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_063]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_034]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_029]", "test/test_game.py::test_play_turn_maxrounds[score0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_051]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_027]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_066]", "test/test_game.py::test_play_turn_maxrounds[score2]", "test/test_game.py::test_get_legal_positions_random[2-layout_t39]", "test/test_game.py::test_error_finishes_game[team_errors12-2]", "test/test_game.py::test_play_turn_fatal[0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_099]", "test/test_game.py::test_get_legal_positions_random[2-layout_t41]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_058]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_088]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_078]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_044]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_022]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_046]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_077]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_019]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags4-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_078]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_028]", "test/test_game.py::test_get_legal_positions_random[1-layout_t1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_077]", "test/test_game.py::test_play_turn_eating_enemy_food[0-2]", "test/test_game.py::test_get_legal_positions_random[3-layout_t42]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_058]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags6-3]", "test/test_game.py::test_get_legal_positions_random[3-layout_t25]", "test/test_game.py::test_get_legal_positions_random[1-layout_t25]", "test/test_game.py::test_get_legal_positions_random[1-layout_t15]", "test/test_game.py::test_get_legal_positions_random[2-layout_t32]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_018]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_084]", "test/test_game.py::test_get_legal_positions_random[2-layout_t15]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_012]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_006]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_049]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_069]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_068]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_023]", "test/test_game.py::test_get_legal_positions_random[2-layout_t13]", "test/test_game.py::test_get_legal_positions_random[3-layout_t48]", "test/test_game.py::test_non_existing_file", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_080]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_001]", "test/test_game.py::test_get_legal_positions_random[3-layout_t22]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_012]", "test/test_game.py::test_play_turn_killing[3]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags13-2]", "test/test_game.py::test_get_legal_positions_random[1-layout_t7]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_002]", "test/test_game.py::test_get_legal_positions_random[2-layout_t7]", "test/test_game.py::test_suicide", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags2-0]", "test/test_game.py::test_get_legal_positions_random[0-layout_t9]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_066]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_052]", "test/test_game.py::test_get_legal_positions_random[3-layout_t26]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_086]", "test/test_game.py::test_get_legal_positions_random[0-layout_t45]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_041]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_052]", "test/test_game.py::test_get_legal_positions_random[1-layout_t32]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_005]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t11]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_098]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_093]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_062]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_035]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_071]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_021]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_008]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_041]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_059]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_029]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_020]", "test/test_game.py::test_get_legal_positions_random[3-layout_t28]", "test/test_game.py::test_get_legal_positions_random[3-layout_t2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_025]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_039]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_047]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_031]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_073]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_003]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_091]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags8-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_091]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_007]", "test/test_game.py::test_get_legal_positions_random[0-layout_t44]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_085]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags14-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_040]", "test/test_game.py::test_error_finishes_game[team_errors5-False]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_025]", "test/test_game.py::test_get_legal_positions_random[2-layout_t5]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_091]", "test/test_game.py::test_get_legal_positions_random[0-layout_t42]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_004]", "test/test_game.py::test_get_legal_positions_random[3-layout_t17]", "test/test_game.py::test_error_finishes_game[team_errors10-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_072]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_060]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_018]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_073]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_023]", "test/test_game.py::test_get_legal_positions_random[2-layout_t33]", "test/test_game.py::test_get_legal_positions_random[3-layout_t41]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags3-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_008]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_033]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_009]", "test/test_game.py::test_play_turn_apply_error[3]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags9-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_074]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_083]", "test/test_game.py::test_get_legal_positions_random[0-layout_t32]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_074]", "test/test_game.py::test_play_turn_eating_enemy_food[1-1]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags0-3]", "test/test_game.py::test_get_legal_positions_random[2-layout_t27]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags14-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_014]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_085]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_007]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_003]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_048]", "test/test_game.py::test_get_legal_positions_random[0-layout_t2]", "test/test_game.py::test_get_legal_positions_random[2-layout_t21]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_003]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_043]", "test/test_game.py::test_get_legal_positions_random[3-layout_t33]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_011]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_053]", "test/test_game.py::test_play_turn_apply_error[2]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags6-0]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags4-3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_013]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_081]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_058]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_011]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t3]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_043]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_082]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_023]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_091]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_060]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_091]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_066]", "test/test_game.py::test_get_legal_positions_random[0-layout_t34]", "test/test_game.py::test_get_legal_positions_random[1-layout_t49]", "test/test_game.py::test_get_legal_positions_random[2-layout_t30]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_030]", "test/test_game.py::test_play_turn_killing[2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_085]", "test/test_game.py::test_get_legal_positions_random[1-layout_t39]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags12-1]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_019]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_001]", "test/test_game.py::test_get_legal_positions_random[1-layout_t4]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_092]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_067]", "test/test_game.py::test_get_legal_positions_random[1-layout_t6]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags7-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_034]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_099]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags3-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_095]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t10]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_038]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags0-2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_094]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_027]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_013]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_038]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_092]", "test/test_game.py::test_initial_positions_same_in_layout_random[layout_t9]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_038]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_010]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_075]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_042]", "test/test_game.py::test_get_legal_positions_random[1-layout_t2]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_016]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_080]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_with_dead_ends_020]", "test/test_game.py::test_error_finishes_game[team_errors4-False]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_071]", "test/test_game.py::test_get_legal_positions_random[3-layout_t21]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_with_dead_ends_081]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_without_dead_ends_032]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_093]", "test/test_game.py::test_initial_positions_same_in_layout[layout_big_without_dead_ends_074]", "test/test_game.py::test_initial_positions_same_in_layout[layout_small_with_dead_ends_087]", "test/test_game.py::test_prepare_bot_state_resets_respawned_flag[respawn_flags1-0]", "test/test_game.py::test_initial_positions_same_in_layout[layout_normal_without_dead_ends_033]", "test/test_game.py::test_get_legal_positions_random[2-layout_t14]", "test/test_layout.py::test_illegal_walls", "test/test_layout.py::test_legal_positions_fail[pos4]", "test/test_layout.py::test_get_random_layout_returns_correct_layout", "test/test_layout.py::test_legal_positions[pos0-legal_positions0]", "test/test_layout.py::test_equal_positions", "test/test_layout.py::test_legal_positions_fail[pos3]", "test/test_layout.py::test_illegal_width", "test/test_layout.py::test_legal_positions[pos3-legal_positions3]", "test/test_layout.py::test_legal_positions[pos2-legal_positions2]", "test/test_layout.py::test_illegal_character", "test/test_layout.py::test_legal_positions_fail[pos0]", "test/test_layout.py::test_legal_positions_fail[pos1]", "test/test_layout.py::test_roundtrip_overlapping", "test/test_layout.py::test_legal_positions_fail[pos2]", "test/test_layout.py::test_get_available_layouts", "test/test_layout.py::test_legal_positions[pos1-legal_positions1]", "test/test_layout.py::test_different_width", "test/test_layout.py::test_illegal_index", "test/test_layout.py::test_empty_lines", "test/test_layout.py::test_roundtrip", "test/test_layout.py::test_combined_layouts_broken_lines", "test/test_layout.py::test_not_enclosed_by_walls", "test/test_layout.py::test_get_random_layout", "test/test_layout.py::test_combined_layouts_empty_lines", "test/test_layout.py::test_combined_layouts", "test/test_layout.py::test_get_layout_by_name" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-06-18T13:26:16Z"
bsd-2-clause
ASPP__pelita-655
diff --git a/layouts/create_pelita_layouts.py b/layouts/create_pelita_layouts.py deleted file mode 100755 index 1dfb91da..00000000 --- a/layouts/create_pelita_layouts.py +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env python3 -# Use this script to update/regenerate the layouts strings in pelita.layouts.py - -import os -import zlib -import base64 -import pelita - -EXTENSION = '.layout' -OUTFILENAME = '__layouts.py' - -local_dir = os.path.dirname(os.path.realpath(__file__)) -pelita_path = os.path.dirname(pelita.__file__) -outfile = os.path.join(pelita_path, OUTFILENAME) - -layout_entry = '{name} = """{code}"""\n' - -content = '### This file is auto-generated. DO NOT EDIT! ###\n' -# loop through all layout files -for f in sorted(os.listdir(local_dir)): - flname, ext = os.path.splitext(f) - if ext != EXTENSION: - continue - with open(os.path.join(local_dir,f), 'rb') as bytemaze: - layout = bytemaze.read() - - layout_name = "layout_" + flname - # encode layout string - layout_code = base64.encodebytes(zlib.compress(layout)).decode() - - content += layout_entry.format(name=layout_name, code=layout_code) - -# write out file in pelita directory -with open(outfile, 'w') as out: - out.write(content) diff --git a/pelita/layout.py b/pelita/layout.py index 369da014..df604a43 100644 --- a/pelita/layout.py +++ b/pelita/layout.py @@ -117,8 +117,9 @@ def parse_layout(layout_str, allow_enemy_chars=False): In this case, bot '0' and bot '2' are on top of each other at position (1,1) If `allow_enemy_chars` is True, we additionally allow for the definition of - at most 2 enemy characters with the letter "E". The returned dict will then - additionally contain an entry "enemy" which contains these coordinates. + at most 2 enemy characters with the letters "E" and "?". The returned dict will + then additionally contain an entry "enemy" which contains these coordinates and + an entry "is_noisy" that specifies which of the given enemies is noisy. If only one enemy character is given, both will be assumed sitting on the same spot. """ @@ -161,6 +162,7 @@ def parse_layout(layout_str, allow_enemy_chars=False): bots = [None] * num_bots if allow_enemy_chars: enemy = [] + noisy_enemy = set() # iterate through all layouts for layout in layout_list: @@ -178,7 +180,10 @@ def parse_layout(layout_str, allow_enemy_chars=False): # add the enemy, removing duplicates if allow_enemy_chars: - enemy = list(set(enemy + items['enemy'])) + # enemy contains _all_ enemies + enemy = list(set(enemy + items['enemy'] + items['noisy_enemy'])) + # noisy_enemy contains only the noisy enemies + noisy_enemy.update(items['noisy_enemy']) # add the bots for bot_idx, bot_pos in enumerate(items['bots']): @@ -213,6 +218,7 @@ def parse_layout(layout_str, allow_enemy_chars=False): # sort the enemy characters # be careful, since it may contain None out['enemy'] = sorted(enemy, key=lambda x: () if x is None else x) + out['is_noisy'] = [e in noisy_enemy for e in out['enemy']] return out @@ -271,6 +277,7 @@ def parse_single_layout(layout_str, num_bots=4, allow_enemy_chars=False): bots = [None] * num_bots # enemy positions (only used for team-style layouts) enemy = [] + noisy_enemy = [] # iterate through the grid of characters for y, row in enumerate(rows): @@ -292,6 +299,12 @@ def parse_single_layout(layout_str, num_bots=4, allow_enemy_chars=False): enemy.append(coord) else: raise ValueError(f"Enemy character not allowed.") + elif char == '?': + # noisy_enemy + if allow_enemy_chars: + noisy_enemy.append(coord) + else: + raise ValueError(f"Enemy character not allowed.") else: # bot try: @@ -312,11 +325,11 @@ def parse_single_layout(layout_str, num_bots=4, allow_enemy_chars=False): food.sort() out = {'walls':walls, 'food':food, 'bots':bots} if allow_enemy_chars: - enemy.sort() - out['enemy'] = enemy + out['enemy'] = sorted(enemy) + out['noisy_enemy'] = sorted(noisy_enemy) return out -def layout_as_str(*, walls, food=None, bots=None, enemy=None): +def layout_as_str(*, walls, food=None, bots=None, enemy=None, is_noisy=None): """Given walls, food and bots return a string layout representation Returns a combined layout string. @@ -339,6 +352,15 @@ def layout_as_str(*, walls, food=None, bots=None, enemy=None): if enemy is None: enemy = [] + # if noisy is given, it must be of the same length as enemy + if is_noisy is None: + noisy_enemies = set() + elif len(is_noisy) != len(enemy): + raise ValueError("Parameter `noisy` must have same length as `enemy`.") + else: + # if an enemy is flagged as noisy, we put it into the set of noisy_enemies + noisy_enemies = {e for e, e_is_noisy in zip(enemy, is_noisy) if e_is_noisy} + # flag to check if we have overlapping objects # when need_combined is True, we force the printing of a combined layout @@ -374,7 +396,10 @@ def layout_as_str(*, walls, food=None, bots=None, enemy=None): if (x, y) in bots: out.write(str(bots.index((x, y)))) elif (x, y) in enemy: - out.write("E") + if (x, y) in noisy_enemies: + out.write("?") + else: + out.write("E") else: out.write(' ') else: @@ -403,7 +428,8 @@ def layout_as_str(*, walls, food=None, bots=None, enemy=None): # if an enemy coordinate is None # don't put the enemy in the layout continue - coord_bots[pos] = coord_bots.get(pos, []) + ["E"] + enemy_char = '?' if pos in noisy_enemies else 'E' + coord_bots[pos] = coord_bots.get(pos, []) + [enemy_char] # loop through the bot coordinates while coord_bots: diff --git a/pelita/player/team.py b/pelita/player/team.py index b936994f..5f8f638e 100644 --- a/pelita/player/team.py +++ b/pelita/player/team.py @@ -594,7 +594,7 @@ class Bot: header = ("{blue}{you_blue} vs {red}{you_red}.\n" + "Playing on {col} side. Current turn: {turn}. Round: {round}, score: {blue_score}:{red_score}. " + - "timeouts: {blue_timeouts}:{red_timeouts}").format( + "timeouts: {blue_timeouts}:{red_timeouts}\n").format( blue=blue.team_name, red=red.team_name, turn=bot.turn, @@ -614,7 +614,8 @@ class Bot: layout = layout_as_str(walls=bot.walls[:], food=bot.food + bot.enemy[0].food, bots=[b.position for b in bot._team], - enemy=[e.position for e in bot.enemy]) + enemy=[e.position for e in bot.enemy], + is_noisy=[e.is_noisy for e in bot.enemy]) out.write(str(layout)) return out.getvalue() @@ -681,7 +682,7 @@ def make_bots(*, walls, team, enemy, round, bot_turn, rng): return team_bots[bot_turn] -def create_layout(*layout_strings, food=None, bots=None, enemy=None): +def create_layout(*layout_strings, food=None, bots=None, enemy=None, is_noisy=None): """ Create a layout from layout strings with additional food, bots and enemy positions. Walls must be equal in all layout strings. Food positions will be collected. @@ -729,4 +730,12 @@ def create_layout(*layout_strings, food=None, bots=None, enemy=None): _check_valid_pos(e, "enemy") parsed_layout['enemy'][idx] = e + # override is_noisy if given + if is_noisy is not None: + if not len(is_noisy) == 2: + raise ValueError(f"is_noisy must be a list of 2 ({is_noisy})!") + for idx, e_is_noisy in enumerate(is_noisy): + if e_is_noisy is not None: + parsed_layout['is_noisy'][idx] = e_is_noisy + return parsed_layout diff --git a/pelita/utils.py b/pelita/utils.py index 813e74c1..b238f1ec 100644 --- a/pelita/utils.py +++ b/pelita/utils.py @@ -34,7 +34,7 @@ def load_builtin_layout(layout_name, *, is_blue=True): def setup_test_game(*, layout, game=None, is_blue=True, round=None, score=None, seed=None, - food=None, bots=None, enemy=None): + food=None, bots=None, enemy=None, is_noisy=None): """Returns the first bot object given a layout. The returned Bot instance can be passed to a move function to test its return value. @@ -45,7 +45,7 @@ def setup_test_game(*, layout, game=None, is_blue=True, round=None, score=None, if score is None: score = [0, 0] - layout = create_layout(layout, food=food, bots=bots, enemy=enemy) + layout = create_layout(layout, food=food, bots=bots, enemy=enemy, is_noisy=is_noisy) width = max(layout['walls'])[0] + 1 food = split_food(width, layout['food']) @@ -79,7 +79,7 @@ def setup_test_game(*, layout, game=None, is_blue=True, round=None, score=None, 'bot_was_killed': [False]*2, 'error_count': 0, 'food': food[enemy_index], - 'is_noisy': [False] * len(layout['enemy']), + 'is_noisy': layout['is_noisy'], 'name': "red" if is_blue else "blue" }
ASPP/pelita
1108fc71cdc9a7eeb4563149e9821255d6f56bf3
diff --git a/test/test_layout.py b/test/test_layout.py index 0039b6e2..6b15bf7d 100644 --- a/test/test_layout.py +++ b/test/test_layout.py @@ -402,6 +402,14 @@ def test_enemy_raises(): # # #### """, [None, None]), # this will set both to None + (""" + #### + # E# + #### + #### + #??# + #### + """, [(1, 1), (2, 1)]), # two enemies in two layouts with duplication and question marks ]) def test_enemy_positions(layout, enemy_pos): if enemy_pos is None: diff --git a/test/test_utils.py b/test/test_utils.py new file mode 100644 index 00000000..0262fb16 --- /dev/null +++ b/test/test_utils.py @@ -0,0 +1,50 @@ + +from pelita import utils + +import pytest + +@pytest.mark.parametrize('is_blue', [True, False]) +def test_setup_test_game(is_blue): + layout = utils.load_builtin_layout('small_without_dead_ends_001', is_blue=is_blue) + test_game = utils.setup_test_game(layout=layout, is_blue=is_blue) + + if is_blue: + assert test_game.position == (1, 5) + assert test_game.other.position == (1, 6) + assert test_game.enemy[0].position == (16, 1) + assert test_game.enemy[1].position == (16, 2) + else: + assert test_game.position == (16, 2) + assert test_game.other.position == (16, 1) + assert test_game.enemy[0].position == (1, 5) + assert test_game.enemy[1].position == (1, 6) + + # load_builtin_layout loads unnoised enemies + assert test_game.enemy[0].is_noisy is False + assert test_game.enemy[1].is_noisy is False + + +@pytest.mark.parametrize('is_blue', [True, False]) +def test_setup_test_game(is_blue): + # Test that is_noisy is set properly + layout = """ + ################## + #. ... .##. ?# + # # # . .### # # + # # ##. E . # + # . .## # # + #0# ###. . # # # + #1 .##. ... .# + ################## + """ + test_game = utils.setup_test_game(layout=layout, is_blue=is_blue) + + assert test_game.position == (1, 5) + assert test_game.other.position == (1, 6) + assert test_game.enemy[0].position == (8, 3) + assert test_game.enemy[1].position == (16, 1) + + # load_builtin_layout loads unnoised enemies + assert test_game.enemy[0].is_noisy is False + assert test_game.enemy[1].is_noisy is True +
print(bot) should show which enemies are noisy This will hopefully avoid confusion. One remark: since we got rid of set_initial in the new-style API, the teams never see their enemies sitting unnoised on their initial positions, which has been a nice (and easy) starting point for filtering. Question: Do we want to be explicit about how the initial positions are fixed (ie. add an example) or do we want them to figure it out themselves?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_layout.py::test_enemy_positions[\\n", "test/test_utils.py::test_setup_test_game[True]", "test/test_utils.py::test_setup_test_game[False]" ]
[ "test/test_layout.py::test_get_available_layouts", "test/test_layout.py::test_get_layout_by_name", "test/test_layout.py::test_get_random_layout", "test/test_layout.py::test_get_random_layout_returns_correct_layout", "test/test_layout.py::test_not_enclosed_by_walls", "test/test_layout.py::test_illegal_character", "test/test_layout.py::test_illegal_index", "test/test_layout.py::test_illegal_walls", "test/test_layout.py::test_illegal_width", "test/test_layout.py::test_different_width", "test/test_layout.py::test_combined_layouts", "test/test_layout.py::test_combined_layouts_empty_lines", "test/test_layout.py::test_duplicate_bots_forbidden", "test/test_layout.py::test_duplicate_bots_forbidden_multiple", "test/test_layout.py::test_duplicate_bots_allowed", "test/test_layout.py::test_combined_layouts_broken_lines", "test/test_layout.py::test_roundtrip", "test/test_layout.py::test_roundtrip_overlapping", "test/test_layout.py::test_empty_lines", "test/test_layout.py::test_equal_positions", "test/test_layout.py::test_legal_positions[pos0-legal_positions0]", "test/test_layout.py::test_legal_positions[pos1-legal_positions1]", "test/test_layout.py::test_legal_positions[pos2-legal_positions2]", "test/test_layout.py::test_legal_positions[pos3-legal_positions3]", "test/test_layout.py::test_legal_positions_fail[pos0]", "test/test_layout.py::test_legal_positions_fail[pos1]", "test/test_layout.py::test_legal_positions_fail[pos2]", "test/test_layout.py::test_legal_positions_fail[pos3]", "test/test_layout.py::test_legal_positions_fail[pos4]", "test/test_layout.py::test_enemy_raises", "test/test_layout.py::test_layout_for_team" ]
{ "failed_lite_validators": [ "has_removed_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-08-08T14:56:42Z"
bsd-2-clause
ASPP__pelita-696
diff --git a/pelita/layout.py b/pelita/layout.py index e5797adc..66fa2ebd 100644 --- a/pelita/layout.py +++ b/pelita/layout.py @@ -545,9 +545,9 @@ def layout_for_team(layout, is_blue=True, is_noisy=(False, False)): 'is_noisy' : is_noisy, } -def layout_agnostic(layout_for_team, is_blue=True): - """ Converts a layout dict with 2 bots and enemies to a layout - with 4 bots. +def layout_agnostic(layout, is_blue=True): + """ Converts a layout dict with 2 bots and enemies (team-style) + to a layout with 4 bots (server-style). """ if "enemy" not in layout: raise ValueError("Layout is already in server-style.")
ASPP/pelita
557c3a757a24e0f1abe25f7edf5c4ffee83a077e
diff --git a/test/test_layout.py b/test/test_layout.py index ff30905a..4d3d8638 100644 --- a/test/test_layout.py +++ b/test/test_layout.py @@ -454,3 +454,38 @@ def test_layout_for_team(): with pytest.raises(ValueError): layout_for_team(parse_layout(red1)) + +def test_layout_agnostic(): + """ + Test if team-style layout can be converted to server-style layout. + + Uses this layout: + + #### + #01# + #EE# + #..# + #### + """ + + l = { + 'walls': [(0,0),(0,1),(0,2),(0,3),(1,0),(1,3),(2,0),(2,3),(3,0),(3,3),(4,0),(4,1),(4,2),(4,3)], + 'food': [(3,1),(3,2)], + 'bots': [(1,1),(1,2)], + 'enemy': [(2,1),(2,2)] + } + + + l_expected_blue = { + 'walls': [(0,0),(0,1),(0,2),(0,3),(1,0),(1,3),(2,0),(2,3),(3,0),(3,3),(4,0),(4,1),(4,2),(4,3)], + 'food': [(3,1),(3,2)], + 'bots': [(1,1),(2,1),(1,2),(2,2)] + } + l_expected_red = { + 'walls': [(0,0),(0,1),(0,2),(0,3),(1,0),(1,3),(2,0),(2,3),(3,0),(3,3),(4,0),(4,1),(4,2),(4,3)], + 'food': [(3,1),(3,2)], + 'bots': [(2,1),(1,1),(2,2),(1,2)] + } + + assert layout_agnostic(l, is_blue=True) == l_expected_blue + assert layout_agnostic(l, is_blue=False) == l_expected_red
layout_agnostic needs tests and fixes Currently broken. https://github.com/ASPP/pelita/blob/2f17db5355b4dffae8a130ede549ab869b2f1ce2/pelita/layout.py#L548-L566
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_layout.py::test_layout_agnostic" ]
[ "test/test_layout.py::test_get_available_layouts", "test/test_layout.py::test_get_layout_by_name", "test/test_layout.py::test_get_random_layout", "test/test_layout.py::test_get_random_layout_returns_correct_layout", "test/test_layout.py::test_not_enclosed_by_walls", "test/test_layout.py::test_illegal_character", "test/test_layout.py::test_illegal_index", "test/test_layout.py::test_illegal_walls", "test/test_layout.py::test_illegal_width", "test/test_layout.py::test_different_width", "test/test_layout.py::test_combined_layouts", "test/test_layout.py::test_combined_layouts_empty_lines", "test/test_layout.py::test_duplicate_bots_forbidden", "test/test_layout.py::test_duplicate_bots_allowed", "test/test_layout.py::test_combined_layouts_broken_lines", "test/test_layout.py::test_roundtrip", "test/test_layout.py::test_roundtrip_overlapping", "test/test_layout.py::test_empty_lines", "test/test_layout.py::test_equal_positions", "test/test_layout.py::test_legal_positions[pos0-legal_positions0]", "test/test_layout.py::test_legal_positions[pos1-legal_positions1]", "test/test_layout.py::test_legal_positions[pos2-legal_positions2]", "test/test_layout.py::test_legal_positions[pos3-legal_positions3]", "test/test_layout.py::test_legal_positions_fail[pos0]", "test/test_layout.py::test_legal_positions_fail[pos1]", "test/test_layout.py::test_legal_positions_fail[pos2]", "test/test_layout.py::test_legal_positions_fail[pos3]", "test/test_layout.py::test_legal_positions_fail[pos4]", "test/test_layout.py::test_enemy_raises", "test/test_layout.py::test_enemy_positions[\\n", "test/test_layout.py::test_layout_for_team" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-10-09T20:08:47Z"
bsd-2-clause
Abjad__abjad-ext-nauert-24
diff --git a/abjadext/nauert/gracehandlers.py b/abjadext/nauert/gracehandlers.py index 8813e0f..a2dbdd3 100644 --- a/abjadext/nauert/gracehandlers.py +++ b/abjadext/nauert/gracehandlers.py @@ -199,8 +199,8 @@ class ConcatenatingGraceHandler(GraceHandler): .. container:: example - When ``replace_rest_with_final_grace_note`` is set to ``False`` (the - default behaviour), grace notes are allowed to be attached to a rest. + When ``replace_rest_with_final_grace_note`` is set to ``False``, grace + notes are allowed to be attached to a rest. >>> quantizer = nauert.Quantizer() >>> durations = [1000, 1, 999, 1000] @@ -208,7 +208,9 @@ class ConcatenatingGraceHandler(GraceHandler): >>> q_event_sequence = nauert.QEventSequence.from_millisecond_pitch_pairs( ... tuple(zip(durations, pitches)) ... ) - >>> grace_handler = nauert.ConcatenatingGraceHandler() + >>> grace_handler = nauert.ConcatenatingGraceHandler( + ... replace_rest_with_final_grace_note=False + ... ) >>> result = quantizer(q_event_sequence, grace_handler=grace_handler) >>> abjad.show(result) # doctest: +SKIP @@ -233,13 +235,11 @@ class ConcatenatingGraceHandler(GraceHandler): .. container:: example - When ``replace_rest_with_final_grace_note`` is set to ``True``, any - rest with grace notes attached to it is replaced by the last pitched - grace note in the grace container. + When ``replace_rest_with_final_grace_note`` is set to ``True`` (the + default behavior), any rest with grace notes attached to it is replaced + by the last pitched grace note in the grace container. - >>> grace_handler = nauert.ConcatenatingGraceHandler( - ... replace_rest_with_final_grace_note=True - ... ) + >>> grace_handler = nauert.ConcatenatingGraceHandler() >>> result = quantizer(q_event_sequence, grace_handler=grace_handler) >>> abjad.show(result) # doctest: +SKIP @@ -274,7 +274,7 @@ class ConcatenatingGraceHandler(GraceHandler): self, discard_grace_rest=True, grace_duration=None, - replace_rest_with_final_grace_note=False, + replace_rest_with_final_grace_note=True, ): self._discard_grace_rest = discard_grace_rest if grace_duration is None:
Abjad/abjad-ext-nauert
520f389f06e21ee0a094016b4f1e2b0cb58263c1
diff --git a/tests/test_ConcatenatingGraceHandler___call__.py b/tests/test_ConcatenatingGraceHandler___call__.py index 75fa793..11424af 100644 --- a/tests/test_ConcatenatingGraceHandler___call__.py +++ b/tests/test_ConcatenatingGraceHandler___call__.py @@ -58,7 +58,9 @@ def test_ConcatenatingGraceHandler___call___02(): def test_ConcatenatingGraceHandler___call___03(): - grace_handler = nauert.ConcatenatingGraceHandler() + grace_handler = nauert.ConcatenatingGraceHandler( + replace_rest_with_final_grace_note=False + ) quantizer = nauert.Quantizer() durations = [1000, 1, 999, 1000] pitches = [0, 0, None, 0] diff --git a/tests/test_ConcatenatingGraceHandler___init__.py b/tests/test_ConcatenatingGraceHandler___init__.py index 8466a64..2b14614 100644 --- a/tests/test_ConcatenatingGraceHandler___init__.py +++ b/tests/test_ConcatenatingGraceHandler___init__.py @@ -12,14 +12,14 @@ def test_ConcatenatingGraceHandler___init___02(): grace_handler = nauert.ConcatenatingGraceHandler(discard_grace_rest=False) assert grace_handler.grace_duration == abjad.Duration(1, 16) assert grace_handler.discard_grace_rest is False - assert grace_handler.replace_rest_with_final_grace_note is False + assert grace_handler.replace_rest_with_final_grace_note is True def test_ConcatenatingGraceHandler___init___03(): grace_handler = nauert.ConcatenatingGraceHandler(grace_duration=(1, 32)) assert grace_handler.grace_duration == abjad.Duration(1, 32) assert grace_handler.discard_grace_rest is True - assert grace_handler.replace_rest_with_final_grace_note is False + assert grace_handler.replace_rest_with_final_grace_note is True def test_ConcatenatingGraceHandler___init___04():
Check gracehandlers behaviors There seem to be some odd behaviors in handling grace notes. The first odd behavior results in a "grace rest" attaching to a pitched note, as shown below: ``` import abjad from abjadext import nauert quantizer = nauert.Quantizer() durations = [1000, 1, 999] pitches = [0, None, 0] q_event_sequence = nauert.QEventSequence.from_millisecond_pitch_pairs( tuple(zip(durations, pitches)) ) result = quantizer(q_event_sequence) print(abjad.lilypond(result)) ``` which results in ``` \new Voice { { \tempo 4=60 %%% \time 4/4 %%% c'4 \grace { r16 } c'4 r4 r4 } } ``` The second one results in a grace note attaching to a rest. A snippet might be uploaded later (or not).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_ConcatenatingGraceHandler___init__.py::test_ConcatenatingGraceHandler___init___02", "tests/test_ConcatenatingGraceHandler___init__.py::test_ConcatenatingGraceHandler___init___03" ]
[ "tests/test_ConcatenatingGraceHandler___call__.py::test_ConcatenatingGraceHandler___call___01", "tests/test_ConcatenatingGraceHandler___call__.py::test_ConcatenatingGraceHandler___call___02", "tests/test_ConcatenatingGraceHandler___call__.py::test_ConcatenatingGraceHandler___call___03", "tests/test_ConcatenatingGraceHandler___call__.py::test_ConcatenatingGraceHandler___call___04", "tests/test_ConcatenatingGraceHandler___init__.py::test_ConcatenatingGraceHandler___init___01", "tests/test_ConcatenatingGraceHandler___init__.py::test_ConcatenatingGraceHandler___init___04" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-03-15T03:29:50Z"
mit
Adyen__adyen-python-api-library-102
diff --git a/Adyen/util.py b/Adyen/util.py index 580ff31..6156876 100644 --- a/Adyen/util.py +++ b/Adyen/util.py @@ -44,3 +44,53 @@ def is_valid_hmac(dict_object, hmac_key): merchant_sign = generate_hpp_sig(dict_object, hmac_key) merchant_sign_str = merchant_sign.decode("utf-8") return merchant_sign_str == expected_sign + + +def generate_notification_sig(dict_object, hmac_key): + if 'issuerId' in dict_object: + if dict_object['issuerId'] == "": + del dict_object['issuerId'] + + if not isinstance(dict_object, dict): + raise ValueError("Must Provide dictionary object") + + def escape_val(val): + if isinstance(val, int): + return val + return val.replace('\\', '\\\\').replace(':', '\\:') + + hmac_key = binascii.a2b_hex(hmac_key) + + request_dict = dict(dict_object) + request_dict['value'] = request_dict['amount']['value'] + request_dict['currency'] = request_dict['amount']['currency'] + + element_orders = [ + 'pspReference', + 'originalReference', + 'merchantAccountCode', + 'merchantReference', + 'value', + 'currency', + 'eventCode', + 'success', + ] + + signing_string = ':'.join( + map(escape_val, map(str, ( + request_dict.get(element, '') for element in element_orders)))) + + hm = hmac.new(hmac_key, signing_string.encode('utf-8'), hashlib.sha256) + return base64.b64encode(hm.digest()) + + +def is_valid_hmac_notification(dict_object, hmac_key): + if 'additionalData' in dict_object: + if dict_object['additionalData']['hmacSignature'] == "": + raise ValueError("Must Provide hmacSignature in additionalData") + else: + expected_sign = dict_object['additionalData']['hmacSignature'] + del dict_object['additionalData'] + merchant_sign = generate_notification_sig(dict_object, hmac_key) + merchant_sign_str = merchant_sign.decode("utf-8") + return merchant_sign_str == expected_sign
Adyen/adyen-python-api-library
7e539a538255450e1343bdc89ef18c1e354d4022
diff --git a/test/UtilTest.py b/test/UtilTest.py index acaf101..9cd9b33 100644 --- a/test/UtilTest.py +++ b/test/UtilTest.py @@ -1,15 +1,19 @@ import unittest import Adyen -from Adyen import generate_hpp_sig -from Adyen.util import is_valid_hmac +from Adyen.util import ( + generate_hpp_sig, + is_valid_hmac, + generate_notification_sig, + is_valid_hmac_notification, +) class UtilTest(unittest.TestCase): ady = Adyen.Adyen() client = ady.client - def test_notification_request_item_hmac(self): + def test_hpp_request_item_hmac(self): request = { "pspReference": "pspReference", "originalReference": "originalReference", @@ -31,3 +35,33 @@ class UtilTest(unittest.TestCase): request['additionalData'] = {'hmacSignature': hmac_calculation_str} hmac_validate = is_valid_hmac(request, key) self.assertTrue(hmac_validate) + + def test_notification_request_item_hmac(self): + request = { + "pspReference": "7914073381342284", + "merchantReference": "TestPayment-1407325143704", + "merchantAccountCode": "TestMerchant", + "amount": { + "currency": "EUR", + "value": 1130 + }, + "eventCode": "AUTHORISATION", + "success": "true", + "eventDate": "2019-05-06T17:15:34.121+02:00", + "operations": [ + "CANCEL", + "CAPTURE", + "REFUND" + ], + "paymentMethod": "visa", + } + key = "44782DEF547AAA06C910C43932B1EB0C" \ + "71FC68D9D0C057550C48EC2ACF6BA056" + hmac_calculation = generate_notification_sig(request, key) + hmac_calculation_str = hmac_calculation.decode("utf-8") + expected_hmac = "coqCmt/IZ4E3CzPvMY8zTjQVL5hYJUiBRg8UU+iCWo0=" + self.assertTrue(hmac_calculation_str != "") + self.assertEqual(hmac_calculation_str, expected_hmac) + request['additionalData'] = {'hmacSignature': hmac_calculation_str} + hmac_validate = is_valid_hmac_notification(request, key) + self.assertTrue(hmac_validate)
Util generate_hpp_sig() value and hmac do not correspond **Python version**: 2.7.16 **Library version**: 2.1.0 (and tested too with future 2.2.0) **Description** TEST ENVIRONMENT Implemented webhook for Adyen notifications (https://docs.adyen.com/development-resources/notifications) and using Test Configuration form in Adyen backend. Passed JSON received data from Adyen to generate_hpp_sig() function according to test here : https://github.com/Adyen/adyen-python-api-library/blob/develop/test/UtilTest.py#L13 The generated hmac does not correspond to the addtionnalData['hmacSignature']. Even tested with future 2.2.0 function is_valid_hmac, returns False.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/UtilTest.py::UtilTest::test_hpp_request_item_hmac", "test/UtilTest.py::UtilTest::test_notification_request_item_hmac" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2020-04-01T12:07:36Z"
mit
Adyen__adyen-python-api-library-276
diff --git a/Adyen/__init__.py b/Adyen/__init__.py index 712155e..3e9a8a8 100644 --- a/Adyen/__init__.py +++ b/Adyen/__init__.py @@ -1,5 +1,3 @@ -#!/bin/python - from __future__ import absolute_import, division, unicode_literals from . import util diff --git a/Adyen/client.py b/Adyen/client.py index cd45b98..2e40e97 100644 --- a/Adyen/client.py +++ b/Adyen/client.py @@ -1,5 +1,3 @@ -#!/bin/python - from __future__ import absolute_import, division, unicode_literals import json as json_lib @@ -266,18 +264,18 @@ class AdyenClient(object): def _set_url_version(self, service, endpoint): version_lookup = {"binlookup": self.api_bin_lookup_version, - "checkout": self.api_checkout_version, - "management": self.api_management_version, - "payments": self.api_payment_version, - "payouts": self.api_payout_version, - "recurring": self.api_recurring_version, - "terminal": self.api_terminal_version, - "legalEntityManagement": self.api_legal_entity_management_version, - "dataProtection": self.api_data_protection_version, - "transfers": self.api_transfers_version, - "storedValue": self.api_stored_value_version, - "balancePlatform": self.api_balance_platform_version, - "disputes": self.api_disputes_version + "checkout": self.api_checkout_version, + "management": self.api_management_version, + "payments": self.api_payment_version, + "payouts": self.api_payout_version, + "recurring": self.api_recurring_version, + "terminal": self.api_terminal_version, + "legalEntityManagement": self.api_legal_entity_management_version, + "dataProtection": self.api_data_protection_version, + "transfers": self.api_transfers_version, + "storedValue": self.api_stored_value_version, + "balancePlatform": self.api_balance_platform_version, + "disputes": self.api_disputes_version } new_version = f"v{version_lookup[service]}" @@ -383,7 +381,7 @@ class AdyenClient(object): def _handle_response(self, url, raw_response, raw_request, status_code, headers): """This parses the content from raw communication, raising an error if - anything other than 200 was returned. + anything other than 2xx was returned. Args: url (str): URL where request was made @@ -391,58 +389,31 @@ class AdyenClient(object): raw_request (str): The raw response returned by Adyen status_code (int): The HTTP status code headers (dict): Key/Value of the headers. - request_dict (dict): The original request dictionary that was given - to the HTTPClient. Returns: AdyenResult: Result object if successful. """ - if status_code not in [200, 201, 204]: + try: + response = json_lib.loads(raw_response) + except json_lib.JSONDecodeError: response = {} - # If the result can't be parsed into json, most likely is raw html. - # Some response are neither json or raw html, handle them here: - if raw_response: - response = json_lib.loads(raw_response) - # Pass raised error to error handler. - self._handle_http_error(url, response, status_code, - headers.get('pspReference'), - raw_request, raw_response, - headers) - - try: - if response['errorCode']: - raise AdyenAPICommunicationError( - "Unexpected error while communicating with Adyen." - " Received the response data:'{}', HTTP Code:'{}'. " - "Please reach out to support@adyen.com if the " - "problem persists with the psp:{}".format( - raw_response, - status_code, - headers.get('pspReference')), - status_code=status_code, - raw_request=raw_request, - raw_response=raw_response, - url=url, - psp=headers.get('pspReference'), - headers=headers, - error_code=response['errorCode']) - except KeyError: - erstr = 'KeyError: errorCode' - raise AdyenAPICommunicationError(erstr) + + if status_code not in [200, 201, 202, 204]: + self._raise_http_error(url, response, status_code, + headers.get('pspReference'), + raw_request, raw_response, + headers) else: - if status_code != 204: - response = json_lib.loads(raw_response) - else: - response = {} psp = self._get_psp(response, headers) return AdyenResult(message=response, status_code=status_code, psp=psp, raw_request=raw_request, raw_response=raw_response) - def _handle_http_error(self, url, response_obj, status_code, psp_ref, - raw_request, raw_response, headers): - """This function handles the non 200 responses from Adyen, raising an + @staticmethod + def _raise_http_error(url, response_obj, status_code, psp_ref, + raw_request, raw_response, headers): + """This function handles the non 2xx responses from Adyen, raising an error that should provide more information. Args: @@ -456,7 +427,7 @@ class AdyenClient(object): headers(dict): headers of the response Returns: - None + None: It never returns """ if response_obj == {}: @@ -484,9 +455,9 @@ class AdyenClient(object): elif status_code == 500: raise AdyenAPICommunicationError(message, raw_request, raw_response, url, psp_ref, headers, status_code, error_code) - else: - raise AdyenAPIResponseError(message, raw_request, raw_response, url, psp_ref, headers, status_code, - error_code) + + raise AdyenAPIResponseError(message, raw_request, raw_response, url, psp_ref, headers, status_code, + error_code) @staticmethod def _get_psp(response, headers): diff --git a/Adyen/httpclient.py b/Adyen/httpclient.py index 954aba5..4b8d310 100644 --- a/Adyen/httpclient.py +++ b/Adyen/httpclient.py @@ -1,5 +1,3 @@ -#!/bin/python - from __future__ import absolute_import, division, unicode_literals try: @@ -49,7 +47,6 @@ class HTTPClient(object): self.timeout = timeout - def _pycurl_request( self, method,
Adyen/adyen-python-api-library
72bd79756c6fe5de567e7ca0e61b27d304d7e8c0
diff --git a/test/ConfigurationTest.py b/test/BalancePlatformTest.py similarity index 87% rename from test/ConfigurationTest.py rename to test/BalancePlatformTest.py index 3bbb9f0..fe29b68 100644 --- a/test/ConfigurationTest.py +++ b/test/BalancePlatformTest.py @@ -1,5 +1,6 @@ -import Adyen import unittest + +import Adyen from Adyen import settings try: @@ -8,7 +9,7 @@ except ImportError: from .BaseTest import BaseTest -class TestManagement(unittest.TestCase): +class TestBalancePlatform(unittest.TestCase): adyen = Adyen.Adyen() client = adyen.client @@ -117,3 +118,22 @@ class TestManagement(unittest.TestCase): json=None, xapikey="YourXapikey" ) + + def test_update_network_token(self): + request = { + "status": "closed" + } + self.adyen.client = self.test.create_client_from_file(202, request) + + result = self.adyen.balancePlatform.network_tokens_api.update_network_token(request, 'TK123ABC') + + self.assertEqual(202, result.status_code) + self.assertEqual({}, result.message) + self.assertEqual("", result.raw_response) + self.adyen.client.http_client.request.assert_called_once_with( + 'PATCH', + f'{self.balance_platform_url}/networkTokens/TK123ABC', + headers={'adyen-library-name': 'adyen-python-api-library', 'adyen-library-version': settings.LIB_VERSION}, + json=request, + xapikey="YourXapikey" + ) diff --git a/test/BaseTest.py b/test/BaseTest.py index 08a97bc..c910acb 100644 --- a/test/BaseTest.py +++ b/test/BaseTest.py @@ -7,7 +7,7 @@ import json from Adyen import httpclient -class BaseTest(): +class BaseTest: def __init__(self, adyen): self.ady = adyen diff --git a/test/ManagementTest.py b/test/ManagementTest.py index a5914b1..7e34681 100644 --- a/test/ManagementTest.py +++ b/test/ManagementTest.py @@ -19,18 +19,19 @@ class TestManagement(unittest.TestCase): def test_get_company_account(self): request = None - id = "YOUR_COMPANY_ACCOUNT" + company_id = "YOUR_COMPANY_ACCOUNT" self.adyen.client = self.test.create_client_from_file(200, request, "test/mocks/" "management/" "get_company_account" ".json") - result = self.adyen.management.account_company_level_api.get_company_account(companyId=id) - self.assertEqual(id, result.message['id']) + result = self.adyen.management.account_company_level_api.get_company_account(companyId=company_id) + + self.assertEqual(company_id, result.message['id']) self.adyen.client.http_client.request.assert_called_once_with( 'GET', - f'{self.management_url}/companies/{id}', + f'{self.management_url}/companies/{company_id}', headers={'adyen-library-name': 'adyen-python-api-library', 'adyen-library-version': settings.LIB_VERSION}, json=None, xapikey="YourXapikey" @@ -43,23 +44,29 @@ class TestManagement(unittest.TestCase): "management/" "post_me_allowed" "_origins.json") + result = self.adyen.management.my_api_credential_api.add_allowed_origin(request) - originId = result.message['id'] + self.assertEqual("YOUR_DOMAIN", result.message['domain']) + + def test_no_content(self): self.adyen.client = self.test.create_client_from_file(204, {}, "test/mocks/" "management/" "no_content.json") - result = self.adyen.management.my_api_credential_api.remove_allowed_origin(originId) + origin_id = 'YOUR_DOMAIN_ID' + + self.adyen.management.my_api_credential_api.remove_allowed_origin(origin_id) + self.adyen.client.http_client.request.assert_called_once_with( 'DELETE', - f'{self.management_url}/me/allowedOrigins/{originId}', + f'{self.management_url}/me/allowedOrigins/{origin_id}', headers={'adyen-library-name': 'adyen-python-api-library', 'adyen-library-version': settings.LIB_VERSION}, json=None, xapikey="YourXapikey" ) - def test_update_a_store(self): + def test_update_store(self): request = { "address": { "line1": "1776 West Pinewood Avenue", @@ -73,19 +80,34 @@ class TestManagement(unittest.TestCase): "management/" "update_a_store" ".json") - storeId = "YOUR_STORE_ID" - merchantId = "YOUR_MERCHANT_ACCOUNT_ID" - result = self.adyen.management.account_store_level_api.update_store(request, merchantId, storeId) + store_id = "YOUR_STORE_ID" + merchant_id = "YOUR_MERCHANT_ACCOUNT_ID" + + result = self.adyen.management.account_store_level_api.update_store(request, merchant_id, store_id) + self.adyen.client.http_client.request.assert_called_once_with( 'PATCH', - f'{self.management_url}/merchants/{merchantId}/stores/{storeId}', + f'{self.management_url}/merchants/{merchant_id}/stores/{store_id}', headers={'adyen-library-name': 'adyen-python-api-library', 'adyen-library-version': settings.LIB_VERSION}, json=request, xapikey="YourXapikey" ) - self.assertEqual(storeId, result.message['id']) + self.assertEqual(store_id, result.message['id']) self.assertEqual("1776 West Pinewood Avenue", result.message['address']['line1']) + def test_reassign_terminal(self): + request = { + 'storeId': 'ST123ABC', + 'inventory': False, + } + self.adyen.client = self.test.create_client_from_file(200, request) + + result = self.adyen.management.terminals_terminal_level_api.reassign_terminal(request, 'AMS1-2345') + + self.assertEqual(200, result.status_code) + self.assertEqual({}, result.message) + self.assertEqual("", result.raw_response) + def test_create_a_user(self): request = { "name": { @@ -108,12 +130,14 @@ class TestManagement(unittest.TestCase): "management/" "create_a_user" ".json") - companyId = "YOUR_COMPANY_ACCOUNT" - result = self.adyen.management.users_company_level_api.create_new_user(request, companyId) + company_id = "YOUR_COMPANY_ACCOUNT" + + result = self.adyen.management.users_company_level_api.create_new_user(request, company_id) + self.assertEqual(request['name']['firstName'], result.message['name']['firstName']) self.adyen.client.http_client.request.assert_called_once_with( 'POST', - f'{self.management_url}/companies/{companyId}/users', + f'{self.management_url}/companies/{company_id}/users', json=request, headers={'adyen-library-name': 'adyen-python-api-library', 'adyen-library-version': settings.LIB_VERSION}, xapikey="YourXapikey" @@ -127,13 +151,15 @@ class TestManagement(unittest.TestCase): "get_list_of" "_android_apps" ".json") - companyId = "YOUR_COMPANY_ACCOUNT" - result = self.adyen.management.android_files_company_level_api.list_android_apps(companyId) + company_id = "YOUR_COMPANY_ACCOUNT" + + result = self.adyen.management.android_files_company_level_api.list_android_apps(company_id) + self.assertEqual("ANDA422LZ223223K5F694GCCF732K8", result.message['androidApps'][0]['id']) - def test_query_paramaters(self): + def test_query_parameters(self): request = {} - companyId = "YOUR_COMPANY_ACCOUNT" + company_id = "YOUR_COMPANY_ACCOUNT" query_parameters = { 'pageNumber': 1, 'pageSize': 10 @@ -143,11 +169,13 @@ class TestManagement(unittest.TestCase): "test/mocks/" "management/" "get_list_of_merchant_accounts.json") - result = self.adyen.management.account_company_level_api. \ - list_merchant_accounts(companyId, query_parameters=query_parameters) + + self.adyen.management.account_company_level_api. \ + list_merchant_accounts(company_id, query_parameters=query_parameters) + self.adyen.client.http_client.request.assert_called_once_with( 'GET', - f'{self.management_url}/companies/{companyId}/merchants?pageNumber=1&pageSize=10', + f'{self.management_url}/companies/{company_id}/merchants?pageNumber=1&pageSize=10', headers={'adyen-library-name': 'adyen-python-api-library', 'adyen-library-version': settings.LIB_VERSION}, json=None, xapikey="YourXapikey"
`TerminalsTerminalLevelApi.reassign_terminal` throws JSONDecodeError **Describe the bug** All calls to `TerminalsTerminalLevelApi.reassign_terminal` throw a JSONDecodeError **To Reproduce** ```python from Adyen import AdyenClient from Adyen.services.management import TerminalsTerminalLevelApi API_KEY = '<redacted>' STORE_ID = 'ST3224Z223225T5JQTRDD7CRZ' TERMINAL_ID = 'AMS1-000168223606144' client = AdyenClient(xapikey=API_KEY) api = TerminalsTerminalLevelApi(client=client) api.reassign_terminal({ 'storeId': STORE_ID, 'inventory': False, }, TERMINAL_ID) ``` Output: ``` Traceback (most recent call last): File "/Users/luhn/Code/revenue/sandbox/adyentest.py", line 12, in <module> api.reassign_terminal({ File "/Users/luhn/.pyenv/versions/revenue/lib/python3.10/site-packages/Adyen/services/management/terminals_terminal_level_api.py", line 30, in reassign_terminal return self.client.call_adyen_api(request, self.service, method, endpoint, idempotency_key, **kwargs) File "/Users/luhn/.pyenv/versions/revenue/lib/python3.10/site-packages/Adyen/client.py", line 369, in call_adyen_api adyen_result = self._handle_response(url, raw_response, raw_request, File "/Users/luhn/.pyenv/versions/revenue/lib/python3.10/site-packages/Adyen/client.py", line 435, in _handle_response response = json_lib.loads(raw_response) File "/Users/luhn/.pyenv/versions/3.10.1/lib/python3.10/json/__init__.py", line 346, in loads return _default_decoder.decode(s) File "/Users/luhn/.pyenv/versions/3.10.1/lib/python3.10/json/decoder.py", line 337, in decode obj, end = self.raw_decode(s, idx=_w(s, 0).end()) File "/Users/luhn/.pyenv/versions/3.10.1/lib/python3.10/json/decoder.py", line 355, in raw_decode raise JSONDecodeError("Expecting value", s, err.value) from None json.decoder.JSONDecodeError: Expecting value: line 1 column 1 (char 0) ``` **Expected behavior** No exception should be thrown. **Screenshots** N/A **Desktop (please complete the following information):** - OS: Mac OS, Python 3.10 - Browser: N/A - Version: 10.0.0 **Additional context** According to [the docs](https://docs.adyen.com/api-explorer/Management/3/post/terminals/_terminalId_/reassign), reassigning a terminal returns HTTP 200 with no content. My own testing confirms this: ``` curl -i https://management-test.adyen.com/v3/terminals/AMS1-000168223606144/reassign -d '{"storeId": "ST3224Z223225T5JQTRDD7CRZ", "inventory": false}' -H 'Content-Type: application/json' -H 'x-API-key: <redacted>' HTTP/1.1 200 traceparent: 00-36fb314f5ca8069a20974823e9986efd-9f224b0d4601a27c-01 Set-Cookie: <redacted> pspReference: GVTHZQPNN8JSTC82 requestid: GVTHZQPNN8JSTC82 Content-Type: application/json;charset=utf-8 Transfer-Encoding: chunked Date: Mon, 13 Nov 2023 23:45:44 GMT ``` The SDK expects the body to be valid JSON, except for HTTP 204. https://github.com/Adyen/adyen-python-api-library/blob/d6253f98202f4ef136d9859895e75a4c599bb1af/Adyen/client.py#L434-L437 Personally I think the SDK is right and the API is wrongβ€”Especially since the API declares the response is JSON (`Content-Type: application/json;charset=utf-8`) yet does not return valid JSON.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/ManagementTest.py::TestManagement::test_reassign_terminal", "test/BalancePlatformTest.py::TestBalancePlatform::test_update_network_token" ]
[ "test/ManagementTest.py::TestManagement::test_create_a_user", "test/ManagementTest.py::TestManagement::test_update_store", "test/ManagementTest.py::TestManagement::test_no_content", "test/ManagementTest.py::TestManagement::test_get_list_of_android_apps", "test/ManagementTest.py::TestManagement::test_get_company_account", "test/ManagementTest.py::TestManagement::test_my_api_credential_api", "test/ManagementTest.py::TestManagement::test_query_parameters", "test/BalancePlatformTest.py::TestBalancePlatform::test_creating_balance_account", "test/BalancePlatformTest.py::TestBalancePlatform::test_creating_payment_instrument_group", "test/BalancePlatformTest.py::TestBalancePlatform::test_get_transaction_rule", "test/BalancePlatformTest.py::TestBalancePlatform::test_creating_payment_instrument", "test/BalancePlatformTest.py::TestBalancePlatform::test_get_balance_platform", "test/BalancePlatformTest.py::TestBalancePlatform::test_creating_account_holder" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2023-11-17T12:47:32Z"
mit
Alexei-Kornienko__schematics_to_swagger-7
diff --git a/schematics_to_swagger/__init__.py b/schematics_to_swagger/__init__.py index d108f3f..d203de0 100644 --- a/schematics_to_swagger/__init__.py +++ b/schematics_to_swagger/__init__.py @@ -54,17 +54,24 @@ def _map_schematics_type(t): def model_to_definition(model): - fields = model.fields.items() + properties = {} + required = [] + + for field_name, field in model.fields.items(): + if field_name.startswith(f'_{model.__name__}'): + continue # Exclude private fields + properties[field_name] = _map_schematics_type(field) + if getattr(field, 'required'): + required.append(field_name) + result_info = { 'type': 'object', 'title': model.__name__, 'description': model.__doc__, - 'properties': {k: _map_schematics_type(v) for k, v in fields} + 'properties': properties } - required = [k for k, v in fields if getattr(v, 'required')] if required: result_info['required'] = required - return result_info
Alexei-Kornienko/schematics_to_swagger
3ddc537a8ed7682e9bb709ebd749b99d7ef09473
diff --git a/tests/models.py b/tests/models.py index 5392711..7cd4582 100644 --- a/tests/models.py +++ b/tests/models.py @@ -16,3 +16,10 @@ class WeatherStats(Model): last_report = types.ModelType(WeatherReport) prev_reports = types.ListType(types.ModelType(WeatherReport)) date_list = types.ListType(types.DateTimeType()) + + +class WeatherPrivateData(Model): + """Some sample model with private field""" + city = types.StringType(max_length=50, metadata={'readOnly': True}) + temperature = types.DecimalType(required=True) + __private_information = types.StringType(max_length=50) diff --git a/tests/test_model.py b/tests/test_model.py index ddeabe3..1ed6fba 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -53,6 +53,23 @@ WEATHER_STATS_DEF = { } }, } +WEATHER_PRIVATE_DATA = { + 'title': 'WeatherPrivateData', + 'type': 'object', + 'description': 'Some sample model with private field', + 'properties': { + 'city': { + 'type': 'string', + 'maxLength': 50, + 'readOnly': True + }, + 'temperature': { + 'type': 'number', + 'format': 'double' + } + }, + 'required': ['temperature'] +} def test_model_to_definition(): @@ -64,7 +81,8 @@ def test_model_to_definition(): def test_read_models_from_module(): expected = { 'WeatherReport': WEATHER_REPORT_DEFINITION, - 'WeatherStats': WEATHER_STATS_DEF + 'WeatherStats': WEATHER_STATS_DEF, + 'WeatherPrivateData': WEATHER_PRIVATE_DATA } data = schematics_to_swagger.read_models_from_module(models) assert expected == data @@ -74,3 +92,9 @@ def test_compound_type(): expected = WEATHER_STATS_DEF data = schematics_to_swagger.model_to_definition(models.WeatherStats) assert expected == data + + +def test_private_fields(): + expected = WEATHER_PRIVATE_DATA + definition = schematics_to_swagger.model_to_definition(models.WeatherPrivateData) + assert expected == definition
Hide private model fields in swagger doc
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_model.py::test_private_fields", "tests/test_model.py::test_read_models_from_module" ]
[ "tests/test_model.py::test_model_to_definition", "tests/test_model.py::test_compound_type" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-11-20T22:11:16Z"
mit
AmiiThinks__driving_gridworld-13
diff --git a/driving_gridworld/road.py b/driving_gridworld/road.py index cb519ef..559362f 100644 --- a/driving_gridworld/road.py +++ b/driving_gridworld/road.py @@ -142,13 +142,12 @@ def combinations(iterable, r, collection=tuple): class Road(object): - def __init__(self, num_rows, car, obstacles, speed_limit): - if speed_limit < car.speed: + def __init__(self, num_rows, car, obstacles): + if num_rows + 1 < car.speed: raise ValueError("Car's speed above speed limit!") self._num_rows = num_rows self._num_columns = 4 self._car = car - self._speed_limit = speed_limit self._obstacles = obstacles self._available_spaces = {} for pos in product(range(0, self._car.speed), range(4)): @@ -159,6 +158,20 @@ class Road(object): if disallowed_position in self._available_spaces: del self._available_spaces[disallowed_position] + def speed_limit(self): + '''The hard speed limit on this road. + + Taking the `UP` action when traveling at the speed limit has no effect. + + Set according to the headlight range since overdriving the + headlights too much breaks the physical plausibility of the game + due to the way we reusing obstacles to simulate arbitrarily long + roads with many obstacles. This is not too much of a restriction + though because even overdriving the headlights by one unit is + completely unsafe. + ''' + return self._num_rows + 1 + def obstacle_outside_car_path(self, obstacle): return (obstacle.col < 0 or obstacle.col >= self._num_columns or obstacle.row >= self._num_rows) @@ -198,7 +211,7 @@ class Road(object): state. The reward function is deterministic. ''' - next_car = self._car.next(action, self._speed_limit) + next_car = self._car.next(action, self.speed_limit()) for positions, reveal_indices in ( self.every_combination_of_revealed_obstacles()): @@ -225,8 +238,7 @@ class Road(object): reward += self._car.reward() if self._car.col == 0 or self._car.col == 3: reward -= 4 * self._car.speed - next_road = self.__class__(self._num_rows, next_car, - next_obstacles, self._speed_limit) + next_road = self.__class__(self._num_rows, next_car, next_obstacles) yield (next_road, prob, reward) def to_key(self, show_walls=False):
AmiiThinks/driving_gridworld
fbc47c68cfade4e7d95ba59a3990dfef196389a6
diff --git a/test/road_test.py b/test/road_test.py index ae22a47..d8aeb36 100644 --- a/test/road_test.py +++ b/test/road_test.py @@ -9,9 +9,8 @@ import pytest def test_transition_probs_without_obstacles_are_always_1(): num_rows = 4 obstacles = [] - speed_limit = 1 car_inst = Car(0, 0, 1) - road_test = Road(num_rows, car_inst, obstacles, speed_limit) + road_test = Road(num_rows, car_inst, obstacles) for a in ACTIONS: for next_state, prob, reward in road_test.successors(a): @@ -21,9 +20,7 @@ def test_transition_probs_without_obstacles_are_always_1(): @pytest.mark.parametrize("obst", [Bump(0, 0), Pedestrian(0, 0)]) def test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road(obst): num_rows = 2 - speed_limit = 1 - - road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit) + road_test = Road(num_rows, Car(1, 1, 1), [obst]) patient = [ (positions, reveal_indices) for positions, reveal_indices in @@ -36,9 +33,7 @@ def test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_ @pytest.mark.parametrize("action", ACTIONS) def test_transition_probs_with_one_obstacle_are_1(obst, action): num_rows = 2 - speed_limit = 1 - - road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit) + road_test = Road(num_rows, Car(1, 1, 1), [obst]) probs = [ prob for next_state, prob, reward in road_test.successors(action) @@ -50,9 +45,7 @@ def test_transition_probs_with_one_obstacle_are_1(obst, action): @pytest.mark.parametrize("action", ACTIONS) def test_transition_probs_with_invisible_obstacle(obst, action): num_rows = 2 - speed_limit = 1 - - road_test = Road(num_rows, Car(1, 1, 1), [obst], speed_limit) + road_test = Road(num_rows, Car(1, 1, 1), [obst]) probs = [ prob for next_state, prob, reward in road_test.successors(action) @@ -72,9 +65,8 @@ def test_transition_probs_with_invisible_obstacle(obst, action): def test_driving_faster_gives_a_larger_reward(action, current_speed): num_rows = 4 obstacles = [] - speed_limit = 4 car = Car(0, 1, current_speed) - road_test = Road(num_rows, car, obstacles, speed_limit) + road_test = Road(num_rows, car, obstacles) for next_state, prob, reward in road_test.successors(action): assert reward == float(current_speed) @@ -82,12 +74,10 @@ def test_driving_faster_gives_a_larger_reward(action, current_speed): def test_road_cannot_start_with_car_going_faster_than_speed_limit(): num_rows = 4 obstacles = [] - speed_limit = 1 - current_speed = 2 + current_speed = 6 car = Car(0, 0, current_speed) - with pytest.raises(ValueError): - road_test = Road(num_rows, car, obstacles, speed_limit) + road_test = Road(num_rows, car, obstacles) @pytest.mark.parametrize("car", [Car(0, 0, 1), Car(0, 3, 1)]) @@ -95,20 +85,28 @@ def test_road_cannot_start_with_car_going_faster_than_speed_limit(): def test_receive_negative_reward_for_driving_off_the_road(car, action): num_rows = 4 obstacles = [] - speed_limit = 2 - road_test = Road(num_rows, car, obstacles, speed_limit) + road_test = Road(num_rows, car, obstacles) for next_state, prob, reward in road_test.successors(action): assert reward < 0 + + @pytest.mark.parametrize("obst", [Bump(-1, -1), Pedestrian(0, -1)]) @pytest.mark.parametrize("action", ACTIONS) @pytest.mark.parametrize("speed", [1, 2, 3]) def test_number_of_successors_invisible_obstacle_and_variable_speeds( obst, action, speed): num_rows = 2 - speed_limit = 3 - road_test = Road(num_rows, Car(1, 1, speed), [obst], speed_limit) + road_test = Road(num_rows, Car(1, 1, speed), [obst]) probs = [ prob for next_state, prob, reward in road_test.successors(action) ] assert len(probs) == 4 * speed + 1 + + +def test_speed_limit_equals_number_of_rows_plus_one(): + num_rows = 2 + obstacles = [] + car = Car(0, 0, 1) + road_test = Road(num_rows, car, obstacles) + assert road_test.speed_limit() == num_rows + 1
Enforce a hard limit on the speed limit in `Road` to the number of rows + 1 If the speed limit is larger than this, then the physical plausibility of the similar breaks, because the number of possible obstacle encounters across a fixed distance can depend on the car's speed and the range of its headlights (the number of rows).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/road_test.py::test_transition_probs_without_obstacles_are_always_1", "test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst0]", "test/road_test.py::test_no_obstacles_revealed_is_the_only_valid_set_of_revealed_obstacles_when_all_obstacles_already_on_road[obst1]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst0]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[0-obst1]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst0]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[1-obst1]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst0]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[2-obst1]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst0]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[3-obst1]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst0]", "test/road_test.py::test_transition_probs_with_one_obstacle_are_1[4-obst1]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst0]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[0-obst1]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst0]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[1-obst1]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst0]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[2-obst1]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst0]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[3-obst1]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst0]", "test/road_test.py::test_transition_probs_with_invisible_obstacle[4-obst1]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[1-0]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[1-1]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[1-2]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[1-3]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[1-4]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[2-0]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[2-1]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[2-2]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[2-3]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[2-4]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[3-0]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[3-1]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[3-2]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[3-3]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[3-4]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[4-0]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[4-1]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[4-2]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[4-3]", "test/road_test.py::test_driving_faster_gives_a_larger_reward[4-4]", "test/road_test.py::test_road_cannot_start_with_car_going_faster_than_speed_limit", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car0]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[0-car1]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car0]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[1-car1]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car0]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[2-car1]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car0]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[3-car1]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car0]", "test/road_test.py::test_receive_negative_reward_for_driving_off_the_road[4-car1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-0-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-1-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-2-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-3-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[1-4-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-0-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-1-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-2-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-3-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[2-4-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-0-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-1-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-2-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-3-obst1]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst0]", "test/road_test.py::test_number_of_successors_invisible_obstacle_and_variable_speeds[3-4-obst1]", "test/road_test.py::test_speed_limit_equals_number_of_rows_plus_one" ]
[]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-06-12T21:08:06Z"
mit
AnalogJ__lexicon-336
diff --git a/lexicon/cli.py b/lexicon/cli.py index dbef1ae2..0b5425ce 100644 --- a/lexicon/cli.py +++ b/lexicon/cli.py @@ -14,12 +14,10 @@ from lexicon.parser import generate_cli_main_parser logger = logging.getLogger(__name__) # pylint: disable=C0103 -def generate_table_result(lexicon_logger, output=None, without_header=None): - """Convert returned JSON into a nice table for command line usage""" - try: - _ = (entry for entry in output) - except TypeError: - lexicon_logger.debug('Command output is not iterable, and then cannot ' +def generate_list_table_result(lexicon_logger, output=None, without_header=None): + """Convert returned data from list actions into a nice table for command line usage""" + if not isinstance(output, list): + lexicon_logger.debug('Command output is not a list, and then cannot ' 'be printed with --quiet parameter not enabled.') return None @@ -58,26 +56,43 @@ def generate_table_result(lexicon_logger, output=None, without_header=None): table.append(' '.join(row_list)) # Return table - return '\n'.join(table) + return os.linesep.join(table) -def handle_output(results, output_type): +def generate_table_results(output=None, without_header=None): + """Convert returned data from non-list actions into a nice table for command line usage""" + array = [] + str_output = str(output) + + if not without_header: + array.append('RESULT') + array.append('-' * max(6, len(str_output))) + + array.append(str_output) + return os.linesep.join(array) + + +def handle_output(results, output_type, action): """Print the relevant output for given output_type""" - if not output_type == 'QUIET': - if not output_type == 'JSON': - table = generate_table_result( + if output_type == 'QUIET': + return + + if not output_type == 'JSON': + if action == 'list': + table = generate_list_table_result( logger, results, output_type == 'TABLE-NO-HEADER') - if table: - print(table) else: - try: - _ = (entry for entry in results) - json_str = json.dumps(results) - if json_str: - print(json_str) - except TypeError: - logger.debug('Output is not a JSON, and then cannot ' - 'be printed with --output=JSON parameter.') + table = generate_table_results(results, output_type == 'TABLE-NO-HEADER') + if table: + print(table) + else: + try: + json_str = json.dumps(results) + if json_str: + print(json_str) + except TypeError: + logger.debug('Output is not JSON serializable, and then cannot ' + 'be printed with --output=JSON parameter.') def main(): @@ -101,7 +116,7 @@ def main(): results = client.execute() - handle_output(results, parsed_args.output) + handle_output(results, parsed_args.output, config.resolve('lexicon:action')) if __name__ == '__main__':
AnalogJ/lexicon
27106bded0bfa8d44ffe3f449ca2e4871588be0f
diff --git a/tests/test_output.py b/tests/test_output.py index f95ffbd5..f5673110 100644 --- a/tests/test_output.py +++ b/tests/test_output.py @@ -1,12 +1,9 @@ +""" Ensure that stdout corresponds to the given reference output """ from __future__ import absolute_import -import importlib import json import logging -import sys -from types import ModuleType from lexicon import cli -from lexicon.providers.base import Provider as BaseProvider logger = logging.getLogger(__name__) @@ -17,8 +14,6 @@ data = [ 'content': 'fake2', 'ttl': 3600} ] -# Ensure that stdout corresponds to the given reference output - def assert_correct_output(capsys, expected_output_lines): out, _ = capsys.readouterr() @@ -33,7 +28,7 @@ def test_output_function_outputs_json_as_table(capsys): 'fake2-id TXT fake2.example.com fake2 3600', ] - cli.handle_output(data, 'TABLE') + cli.handle_output(data, 'TABLE', 'list') assert_correct_output(capsys, expected_output_lines) @@ -43,12 +38,12 @@ def test_output_function_outputs_json_as_table_with_no_header(capsys): 'fake2-id TXT fake2.example.com fake2 3600', ] - cli.handle_output(data, 'TABLE-NO-HEADER') + cli.handle_output(data, 'TABLE-NO-HEADER', 'list') assert_correct_output(capsys, expected_output_lines) def test_output_function_outputs_json_as_json_string(capsys): - cli.handle_output(data, 'JSON') + cli.handle_output(data, 'JSON', 'list') out, _ = capsys.readouterr() json_data = json.loads(out) @@ -59,18 +54,18 @@ def test_output_function_outputs_json_as_json_string(capsys): def test_output_function_output_nothing_when_quiet(capsys): expected_output_lines = [] - cli.handle_output(data, 'QUIET') + cli.handle_output(data, 'QUIET', 'list') assert_correct_output(capsys, expected_output_lines) -def test_output_function_outputs_nothing_with_not_a_json_data(capsys): +def test_output_function_outputs_nothing_with_not_a_json_serializable(capsys): expected_output_lines = [] - cli.handle_output(True, 'TABLE') + cli.handle_output(object(), 'TABLE', 'list') assert_correct_output(capsys, expected_output_lines) - cli.handle_output(True, 'TABLE-NO-HEADER') + cli.handle_output(object(), 'TABLE-NO-HEADER', 'list') assert_correct_output(capsys, expected_output_lines) - cli.handle_output(True, 'JSON') + cli.handle_output(object(), 'JSON', 'list') assert_correct_output(capsys, expected_output_lines)
Memset provider: TypeError: string indices must be integers Hi, When using the Memset provider with the default table formatting I get this error: ```bash $ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300 Traceback (most recent call last): File "/usr/local/bin/lexicon", line 11, in <module> sys.exit(main()) File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 133, in main handle_output(results, parsed_args.output) File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 109, in handle_output table = generate_table_result(logger, results, output_type == 'TABLE-NO-HEADER') File "/usr/local/lib/python2.7/dist-packages/lexicon/__main__.py", line 75, in generate_table_result array = [[row['id'], row['type'], row['name'], row['content'], row['ttl']] for row in output] TypeError: string indices must be integers ``` I think this is because `output` is a string not an array - when I added `print output` I got a string like `969f9caabe19859c11249333dd80aa15`. When I use `--output JSON` I get the same ID plus quotes: ```bash $ lexicon memset create example.com TXT --name _acme-challenge.example.com --content BLAH --ttl 300 --output JSON "969f9caabe19859c11249333dd80aa15" ``` I know Memset's not public so if you need any help to test it just let me know. For now I'll work around it with `--output QUIET` since I don't really care about the output here. Thanks! Dave
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_output.py::test_output_function_outputs_json_as_json_string", "tests/test_output.py::test_output_function_outputs_json_as_table", "tests/test_output.py::test_output_function_outputs_nothing_with_not_a_json_serializable", "tests/test_output.py::test_output_function_outputs_json_as_table_with_no_header", "tests/test_output.py::test_output_function_output_nothing_when_quiet" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2018-12-27T22:27:28Z"
mit
AngryMaciek__angry-moran-simulator-25
diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index db7d90e..b0d5816 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -40,6 +40,7 @@ jobs: black --check moranpycess/MoranProcess.py black --check moranpycess/MoranProcess2D.py black --check moranpycess/MoranProcess3D.py + black --check moranpycess/exceptions.py black --check tests/unit/context.py black --check tests/unit/Individual.py black --check tests/unit/MoranProcess.py @@ -54,6 +55,7 @@ jobs: flake8 --max-line-length=101 --ignore F401,E231,W503,E741 moranpycess/MoranProcess.py flake8 --max-line-length=101 --ignore F401,E231,W503,E741 moranpycess/MoranProcess2D.py flake8 --max-line-length=101 --ignore F401,E231,W503,E741 moranpycess/MoranProcess3D.py + flake8 --max-line-length=88 moranpycess/exceptions.py flake8 --max-line-length=88 --ignore F401,E402 tests/unit/context.py flake8 --max-line-length=88 tests/unit/Individual.py flake8 --max-line-length=88 tests/unit/MoranProcess.py diff --git a/moranpycess/MoranProcess.py b/moranpycess/MoranProcess.py index 6e96db2..b966519 100644 --- a/moranpycess/MoranProcess.py +++ b/moranpycess/MoranProcess.py @@ -114,12 +114,16 @@ def __init__( == TransitionMatrix.shape[1] == len(label_list) ) - # check if the values are correct - for v in np.sum(TransitionMatrix, axis=1): - assert v == 1.0 except AssertionError as e: e.args += ("Invalid Transition Matrix",) raise + # check if the values are correct + for v in np.sum(TransitionMatrix, axis=1): + if v != 1.0: + raise moranpycess.IncorrectValueError( + parameter="Transition Matrix", + message="Transition probabilities need to add up to 1.0.", + ) self.TransitionMatrix = copy.deepcopy(TransitionMatrix) @property diff --git a/moranpycess/MoranProcess2D.py b/moranpycess/MoranProcess2D.py index 6012f53..6f84655 100644 --- a/moranpycess/MoranProcess2D.py +++ b/moranpycess/MoranProcess2D.py @@ -126,12 +126,16 @@ def __init__( == TransitionMatrix.shape[1] == len(label_list) ) - # check if the values are correct - for v in np.sum(TransitionMatrix, axis=1): - assert v == 1.0 except AssertionError as e: e.args += ("Invalid Transition Matrix",) raise + # check if the values are correct + for v in np.sum(TransitionMatrix, axis=1): + if v != 1.0: + raise moranpycess.IncorrectValueError( + parameter="Transition Matrix", + message="Transition probabilities need to add up to 1.0.", + ) self.TransitionMatrix = copy.deepcopy(TransitionMatrix) @property diff --git a/moranpycess/MoranProcess3D.py b/moranpycess/MoranProcess3D.py index 2036c52..8d7c898 100644 --- a/moranpycess/MoranProcess3D.py +++ b/moranpycess/MoranProcess3D.py @@ -128,12 +128,16 @@ def __init__( == TransitionMatrix.shape[1] == len(label_list) ) - # check if the values are correct - for v in np.sum(TransitionMatrix, axis=1): - assert v == 1.0 except AssertionError as e: e.args += ("Invalid Transition Matrix",) raise + # check if the values are correct + for v in np.sum(TransitionMatrix, axis=1): + if v != 1.0: + raise moranpycess.IncorrectValueError( + parameter="Transition Matrix", + message="Transition probabilities need to add up to 1.0.", + ) self.TransitionMatrix = copy.deepcopy(TransitionMatrix) @property diff --git a/moranpycess/__init__.py b/moranpycess/__init__.py index a1dcf59..e399ea2 100644 --- a/moranpycess/__init__.py +++ b/moranpycess/__init__.py @@ -18,3 +18,4 @@ from .MoranProcess import MoranProcess from .MoranProcess2D import MoranProcess2D from .MoranProcess3D import MoranProcess3D +from .exceptions import IncorrectValueError diff --git a/moranpycess/exceptions.py b/moranpycess/exceptions.py new file mode 100644 index 0000000..e065e4c --- /dev/null +++ b/moranpycess/exceptions.py @@ -0,0 +1,57 @@ +""" +############################################################################## +# +# Custom Exceptions +# +# AUTHOR: Maciej_Bak +# AFFILIATION: University_of_Basel +# AFFILIATION: Swiss_Institute_of_Bioinformatics +# CONTACT: wsciekly.maciek@gmail.com +# CREATED: 01-04-2021 +# LICENSE: MIT +# +############################################################################## +""" + + +class Error(Exception): + """Base class for other exceptions. + + Args: + Exception (Exception): built-in Exception class + """ + + pass + + +class IncorrectValueError(Error): + """Handling incorrect values of user's arguments. + + Args: + Error (Error): Base class for other exceptions. + """ + + def __init__( + self, + parameter, + message="Please check the documentation for expected argument values.", + ): + """Class initializer. + + Args: + parameter (str): parameter name + message (str, optional): error message. + Defaults to "Please check the documentation + for expected argument values.". + """ + self.parameter = parameter + self.message = message + super().__init__(self.message) + + def __str__(self): + """Display the error message. + + Returns: + str: error message + """ + return f"Incorrect value for {self.parameter}. {self.message}"
AngryMaciek/angry-moran-simulator
3f82c988f0bb53365081ef437914c0286b200b49
diff --git a/tests/unit/MoranProcess.py b/tests/unit/MoranProcess.py index 7c8acd1..a987f5a 100644 --- a/tests/unit/MoranProcess.py +++ b/tests/unit/MoranProcess.py @@ -213,7 +213,7 @@ def test_classMoranProcessWrongInit(self): label_list = ["A", "B", "C"] BirthPayoffMatrix = np.array([[10, 20], [30, 40]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess( size_list=size_list, label_list=label_list, @@ -225,7 +225,7 @@ def test_classMoranProcessWrongInit(self): label_list = ["A", "B"] BirthPayoffMatrix = np.array([[10, 20, 20], [30, 40, 40], [1, 1, 1]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess( size_list=size_list, label_list=label_list, @@ -237,7 +237,7 @@ def test_classMoranProcessWrongInit(self): label_list = ["A", "B"] BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20, 20], [30, 40, 40], [1, 1, 1]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess( size_list=size_list, label_list=label_list, @@ -250,7 +250,7 @@ def test_classMoranProcessWrongInit(self): BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20], [30, 40]]) TransitionMatrix = np.array([[0.0], [0.0]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess( size_list=size_list, label_list=label_list, @@ -264,7 +264,10 @@ def test_classMoranProcessWrongInit(self): BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20], [30, 40]]) TransitionMatrix = np.array([[0.5, 0.4], [0.5, 0.5]]) - with pytest.raises(Exception): + expected_error_msg = "Incorrect value for Transition Matrix." + expected_error_msg += " " + expected_error_msg += "Transition probabilities need to add up to 1.0." + with pytest.raises(moranpycess.IncorrectValueError, match=expected_error_msg): moranpycess.MoranProcess( size_list=size_list, label_list=label_list, diff --git a/tests/unit/MoranProcess2D.py b/tests/unit/MoranProcess2D.py index 8b7cfbf..938416d 100644 --- a/tests/unit/MoranProcess2D.py +++ b/tests/unit/MoranProcess2D.py @@ -64,7 +64,7 @@ def test_classMoranProcess2DWrongInit(self): grid = np.array([["A", "A"], ["A", "B"]]) BirthPayoffMatrix = np.array([[10, 20], [30, 40]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess2D( size_list=size_list, label_list=label_list, @@ -78,7 +78,7 @@ def test_classMoranProcess2DWrongInit(self): grid = np.array([["A", "A"], ["A", "B"]]) BirthPayoffMatrix = np.array([[10, 20, 20], [30, 40, 40], [1, 1, 1]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess2D( size_list=size_list, label_list=label_list, @@ -92,7 +92,7 @@ def test_classMoranProcess2DWrongInit(self): grid = np.array([["A", "A"], ["A", "B"]]) BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20, 20], [30, 40, 40], [1, 1, 1]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess2D( size_list=size_list, label_list=label_list, @@ -106,7 +106,7 @@ def test_classMoranProcess2DWrongInit(self): grid = np.array([["A", "A"], ["C", "B"]]) BirthPayoffMatrix = np.array([[10, 20], [30, 40]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess2D( size_list=size_list, label_list=label_list, @@ -120,7 +120,7 @@ def test_classMoranProcess2DWrongInit(self): grid = np.array([["A", "A"], ["A", "B"]]) BirthPayoffMatrix = np.array([[10, 20], [30, 40]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess2D( size_list=size_list, label_list=label_list, @@ -135,7 +135,7 @@ def test_classMoranProcess2DWrongInit(self): BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20], [30, 40]]) TransitionMatrix = np.array([[0.0], [0.0]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess2D( size_list=size_list, label_list=label_list, @@ -151,7 +151,10 @@ def test_classMoranProcess2DWrongInit(self): BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20], [30, 40]]) TransitionMatrix = np.array([[0.5, 0.4], [0.5, 0.5]]) - with pytest.raises(Exception): + expected_error_msg = "Incorrect value for Transition Matrix." + expected_error_msg += " " + expected_error_msg += "Transition probabilities need to add up to 1.0." + with pytest.raises(moranpycess.IncorrectValueError, match=expected_error_msg): moranpycess.MoranProcess2D( size_list=size_list, label_list=label_list, diff --git a/tests/unit/MoranProcess3D.py b/tests/unit/MoranProcess3D.py index caa5705..8326ce4 100644 --- a/tests/unit/MoranProcess3D.py +++ b/tests/unit/MoranProcess3D.py @@ -64,7 +64,7 @@ def test_classMoranProcess3DWrongInit(self): grid = np.array([[["A", "A"], ["A", "B"]], [["A", "A"], ["A", "A"]]]) BirthPayoffMatrix = np.array([[10, 20], [30, 40]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess3D( size_list=size_list, label_list=label_list, @@ -78,7 +78,7 @@ def test_classMoranProcess3DWrongInit(self): grid = np.array([[["A", "A"], ["A", "B"]], [["A", "A"], ["A", "A"]]]) BirthPayoffMatrix = np.array([[10, 20, 20], [30, 40, 40], [1, 1, 1]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess3D( size_list=size_list, label_list=label_list, @@ -92,7 +92,7 @@ def test_classMoranProcess3DWrongInit(self): grid = np.array([[["A", "A"], ["A", "B"]], [["A", "A"], ["A", "A"]]]) BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20, 20], [30, 40, 40], [1, 1, 1]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess3D( size_list=size_list, label_list=label_list, @@ -106,7 +106,7 @@ def test_classMoranProcess3DWrongInit(self): grid = np.array([[["A", "A"], ["C", "B"]], [["A", "A"], ["A", "A"]]]) BirthPayoffMatrix = np.array([[10, 20], [30, 40]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess3D( size_list=size_list, label_list=label_list, @@ -120,7 +120,7 @@ def test_classMoranProcess3DWrongInit(self): grid = np.array([[["A", "A"], ["B", "B"]], [["A", "A"], ["A", "A"]]]) BirthPayoffMatrix = np.array([[10, 20], [30, 40]]) DeathPayoffMatrix = np.array([[1, 2], [3, 4]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess3D( size_list=size_list, label_list=label_list, @@ -135,7 +135,7 @@ def test_classMoranProcess3DWrongInit(self): BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20], [30, 40]]) TransitionMatrix = np.array([[0.0], [0.0]]) - with pytest.raises(Exception): + with pytest.raises(AssertionError): moranpycess.MoranProcess3D( size_list=size_list, label_list=label_list, @@ -151,7 +151,10 @@ def test_classMoranProcess3DWrongInit(self): BirthPayoffMatrix = np.array([[1, 2], [3, 4]]) DeathPayoffMatrix = np.array([[10, 20], [30, 40]]) TransitionMatrix = np.array([[0.5, 0.4], [0.5, 0.5]]) - with pytest.raises(Exception): + expected_error_msg = "Incorrect value for Transition Matrix." + expected_error_msg += " " + expected_error_msg += "Transition probabilities need to add up to 1.0." + with pytest.raises(moranpycess.IncorrectValueError, match=expected_error_msg): moranpycess.MoranProcess3D( size_list=size_list, label_list=label_list,
Custom exceptions * add exceptions file with custom exceptsions as in the `mlem` project
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/MoranProcess.py::TestClass::test_classMoranProcessWrongInit", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2DWrongInit", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3DWrongInit" ]
[ "tests/unit/MoranProcess.py::TestClass::test_classMoranProcessInit", "tests/unit/MoranProcess.py::TestClass::test_classMoranProcess_roulette_wheel_selection_Birth", "tests/unit/MoranProcess.py::TestClass::test_classMoranProcess_roulette_wheel_selection_Death", "tests/unit/MoranProcess.py::TestClass::test_classMoranProcess_simulate", "tests/unit/MoranProcess.py::TestClass::test_plots", "tests/unit/MoranProcess.py::TestClass::test_MoranProcessWithTransitionMatrix", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2DInit", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2DUpdateBirthPayoff", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2DUpdateDeathPayoff", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2DUpdateBirthFitness", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2DUpdateDeathFitness", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2D_roulette_wheel_selection_Birth", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2D_roulette_wheel_selection_Death", "tests/unit/MoranProcess2D.py::TestClass::test_classMoranProcess2D_simulate", "tests/unit/MoranProcess2D.py::TestClass::test_plots2D", "tests/unit/MoranProcess2D.py::TestClass::test_MoranProcess2DWithTransitionMatrix", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3DInit", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3DUpdateBirthPayoff", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3DUpdateDeathPayoff", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3DUpdateBirthFitness", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3DUpdateDeathFitness", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3D_roulette_wheel_selection_Birth", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3D_roulette_wheel_selection_Death", "tests/unit/MoranProcess3D.py::TestClass::test_classMoranProcess3D_simulate", "tests/unit/MoranProcess3D.py::TestClass::test_plots3D", "tests/unit/MoranProcess3D.py::TestClass::test_MoranProcess3DWithTransitionMatrix" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2021-04-02T00:11:14Z"
mit
AnthonyBloomer__daftlistings-122
diff --git a/README.md b/README.md index 4fcc162..16ed94f 100644 --- a/README.md +++ b/README.md @@ -179,6 +179,29 @@ dublin_map.add_colorbar() dublin_map.save("ireland_rent.html") print("Done, please checkout the html file") ``` + +Search for apartments for rent in Dublin with an alarm and parking. + +```python +from daftlistings import Daft, Location, SearchType, PropertyType, Facility + +daft = Daft() +daft.set_location(Location.DUBLIN) +daft.set_search_type(SearchType.RESIDENTIAL_RENT) +daft.set_property_type(PropertyType.APARTMENT) +daft.set_facility(Facility.PARKING) +daft.set_facility(Facility.ALARM) + +listings = daft.search() + +for listing in listings: + print(listing.title) + print(listing.abbreviated_price) + print(listing.daft_link) + print() +``` + + ## Contributing - Fork the project and clone locally. diff --git a/daftlistings/daft.py b/daftlistings/daft.py index c46baeb..e2ff99a 100644 --- a/daftlistings/daft.py +++ b/daftlistings/daft.py @@ -58,6 +58,16 @@ class Daft: self._filters.append({"name": name, "values": [value]}) + def _add_and_filter(self, name: str, value: str): + if self._andFilters: + for f in self._andFilters: + if f["name"] == name: + if value not in f["values"]: + f["values"].append(value) + return + self._andFilters.append({"name": name, + "values": [value]}) + def _add_sort_filter(self, sort_filter: str): self._sort_filter = sort_filter @@ -153,6 +163,21 @@ class Daft: else: raise TypeError("Argument must be location.Location or string.") + def set_facility(self, facility: Facility): + if self._section == None: + raise ValueError('SearchType must be set before Facility') + else: + if isinstance(facility, Facility): + if self._section in [s.value for s in facility.valid_types]: + self._add_and_filter('facilities', facility.value) + else: + search_type = [(name,member) for name, member in SearchType.__members__.items() if member.value == self._section][0] + compatible_facilities = [f.name for f in Facility if search_type[1] in f.valid_types] + raise ValueError(f"Facility {facility.name} incompatible with SearchType {search_type[0]}\nThe following facilities are compatible with this SearchType:\n{compatible_facilities}") + else: + raise TypeError("Argument must be of type Facility") + + def set_sort_type(self, sort_type: SortType): if isinstance(sort_type, SortType): self._add_sort_filter(sort_type.value) @@ -178,6 +203,8 @@ class Daft: payload["section"] = self._section if self._filters: payload["filters"] = self._filters + if self._andFilters: + payload["andFilters"] = self._andFilters if self._ranges: payload["ranges"] = self._ranges if self._geoFilter: diff --git a/daftlistings/enums.py b/daftlistings/enums.py index b166c1a..04f2bd4 100644 --- a/daftlistings/enums.py +++ b/daftlistings/enums.py @@ -62,6 +62,42 @@ class MiscFilter(enum.Enum): TOILETS = "toilets" +class Facility(enum.Enum): + def __new__(cls, *args, **kwargs): + obj = object.__new__(cls) + obj._value_ = args[0] + return obj + + def __init__(self, _, valid_types): + self.valid_types = valid_types + + ALARM = ("alarm", [SearchType.RESIDENTIAL_SALE, SearchType.RESIDENTIAL_RENT, SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + CENTRAL_HEATING_GAS = ("gas-fired-central-heating", [SearchType.RESIDENTIAL_SALE]) + CENTRAL_HEATING_OIL = ("oil-fired-central-heating", [SearchType.RESIDENTIAL_SALE]) + PARKING = ("parking", [SearchType.RESIDENTIAL_SALE, SearchType.RESIDENTIAL_RENT, SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + WHEELCHAIR_ACCESS = ("wheelchair-access", [SearchType.RESIDENTIAL_SALE, SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + WIRED_FOR_CABLE_TELEVISION = ("wired-for-cable-television", [SearchType.RESIDENTIAL_SALE]) + CABLE_TELEVISION = ("cable-television", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + DISHWASHER = ("dishwasher", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + GARDEN_PATIO_BALCONY = ("garden-patio-balcony", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + CENTRAL_HEATING = ("central-heating", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + INTERNET = ("internet", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + MICROWAVE = ("microwave", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + PETS_ALLOWED = ("pets-allowed", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + SMOKING = ("smoking", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + SERVICED_PROPERTY = ("serviced-property", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + DRYER = ("dryer", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + WASHING_MACHINE = ("washing-machine", [SearchType.RESIDENTIAL_RENT, SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + ENSUITE = ("ensuite", [SearchType.SHARING, SearchType.STUDENT_ACCOMMODATION]) + CAT_5_CABLING = ("cat-5-cabling", [SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT]) + CAT_6_CABLING = ("cat-6-data-cabling", [SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT]) + KITCHEN_AREA = ("kitchen-area", [SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT]) + MEETING_ROOMS = ("meeting-rooms", [SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT]) + RECEPTION = ("reception", [SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT]) + PHONE_LINES = ("phone-lines", [SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT]) + TOILETS = ("toilets", [SearchType.COMMERCIAL_SALE, SearchType.COMMERCIAL_RENT]) + + class AddedSince(enum.Enum): DAYS_3 = "now-3d/d" DAYS_7 = "now-7d/d" diff --git a/examples/facilities.py b/examples/facilities.py new file mode 100644 index 0000000..e864f83 --- /dev/null +++ b/examples/facilities.py @@ -0,0 +1,16 @@ +from daftlistings import Daft, Location, SearchType, PropertyType, Facility + +daft = Daft() +daft.set_location(Location.DUBLIN) +daft.set_search_type(SearchType.RESIDENTIAL_RENT) +daft.set_property_type(PropertyType.APARTMENT) +daft.set_facility(Facility.PARKING) +daft.set_facility(Facility.ALARM) + +listings = daft.search() + +for listing in listings: + print(listing.title) + print(listing.abbreviated_price) + print(listing.daft_link) + print() \ No newline at end of file
AnthonyBloomer/daftlistings
bf730db6d229d0e76d9c773cf807022bc6045fa6
diff --git a/tests/test_daft_search.py b/tests/test_daft_search.py index 9942827..f14bbbb 100644 --- a/tests/test_daft_search.py +++ b/tests/test_daft_search.py @@ -10,6 +10,7 @@ from daftlistings import ( Listing, AddedSince, PropertyType, + Facility ) @@ -18,7 +19,8 @@ class DaftTest(unittest.TestCase): def test_search(self, mock_post): url = "https://search-gateway.dsch.ie/v1/listings" payload = { - "section": "new-homes", + "section": "residential-for-sale", + "andFilters": [{"name":"facilities", "values": ["alarm"]}], "ranges": [ {"name": "salePrice", "from": "250000", "to": "300000"}, {"name": "numBeds", "from": "3", "to": "3"}, @@ -38,7 +40,7 @@ class DaftTest(unittest.TestCase): daft = Daft() - daft.set_search_type(SearchType.NEW_HOMES) + daft.set_search_type(SearchType.RESIDENTIAL_SALE) daft.set_location(Location.KILDARE) daft.set_location("Kildare") daft.set_sort_type(SortType.PRICE_ASC) @@ -51,6 +53,7 @@ class DaftTest(unittest.TestCase): daft.set_max_floor_size(1000) daft.set_min_floor_size(1000) daft.set_added_since(AddedSince.DAYS_14) + daft.set_facility(Facility.ALARM) daft.search() mock_post.assert_called_with(url, headers=headers, json=payload)
Facility SearchType Combos The facility options available for filtering are heavily dependent on the `SearchType`. In my local version I have facilities filtering added and functioning but without any checking related to this. I was planning to resolve this issue before pushing it. It will probably make sense to solve this in the same way as whatever ends up being done for the PropertyType SearchType combos issue (https://github.com/AnthonyBloomer/daftlistings/issues/108#issue-849665232), although there are more differences in the available facilities across `SearchType` than there is in `PropertyType`. The facilities values by `SearchType` are as follows: ``` Buying Facilities: "value":"alarm" "value":"gas-fired-central-heating" "value":"oil-fired-central-heating" "value":"parking" "value":"wheelchair-access" "value":"wired-for-cable-television" Renting Facilities: "value":"alarm" "value":"cable-television" "value":"dishwasher" "value":"garden-patio-balcony" "value":"central-heating" "value":"internet" "value":"microwave" "value":"parking" "value":"pets-allowed" "value":"smoking" "value":"serviced-property" "value":"dryer" "value":"washing-machine" "value":"wheelchair-access" Share Facilities: "value":"alarm" "value":"cable-television" "value":"dishwasher" "value":"ensuite" "value":"garden-patio-balcony" "value":"central-heating" "value":"internet" "value":"microwave" "value":"parking" "value":"pets-allowed" "value":"serviced-property" "value":"smoking" "value":"dryer" "value":"washing-machine" "value":"wheelchair-access" New Homes Facilities: None Commerical Facilities: "value":"alarm" "value":"cat-5-cabling" "value":"cat-6-data-cabling" "value":"kitchen-area" "value":"meeting-rooms" "value":"reception" "value":"parking" "value":"phone-lines" "value":"toilets" ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_daft_search.py::DaftTest::test_listing", "tests/test_daft_search.py::DaftTest::test_search" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-04-05T12:36:54Z"
mit
AppImageCrafters__appimage-builder-98
diff --git a/appimagebuilder/builder/runtime/executables_wrapper.py b/appimagebuilder/builder/runtime/executables_wrapper.py index 05033e7..949c8ae 100644 --- a/appimagebuilder/builder/runtime/executables_wrapper.py +++ b/appimagebuilder/builder/runtime/executables_wrapper.py @@ -27,6 +27,8 @@ from appimagebuilder.common import file_utils class ExecutablesWrapper: + EXPORTED_FILES_PREFIX = "/tmp/appimage-" + def __init__( self, appdir_path: str, @@ -115,7 +117,10 @@ class ExecutablesWrapper: def _rewrite_shebang_using_env(self, executable): logging.info("Replacing SHEBANG on: %s" % executable.path) - local_env_path = "/tmp/appimage-" + self.env.get("APPIMAGE_UUID") + "-env" + local_env_path = "%s%s-env" % ( + self.EXPORTED_FILES_PREFIX, + self.env.get("APPIMAGE_UUID"), + ) tmp_path = executable.path.__str__() + ".tmp" output = open(tmp_path, "wb") try: @@ -136,7 +141,13 @@ class ExecutablesWrapper: def _write_rel_shebang(self, executable, local_env_path, output): output.write(b"#!%s" % local_env_path.encode()) - args_start = 2 if executable.shebang[0] == "/usr/bin/env" else 1 + shebang_main = executable.shebang[0] + if shebang_main.startswith("/usr/bin/env") or shebang_main.startswith( + self.EXPORTED_FILES_PREFIX + ): + args_start = 2 + else: + args_start = 1 bin_name = os.path.basename(executable.shebang[args_start - 1]) output.write(b" ") output.write(bin_name.encode())
AppImageCrafters/appimage-builder
cbc972cf65630312aab2cc814edf5d55acec3ac1
diff --git a/tests/builder/runtime/test_executables_wrapper.py b/tests/builder/runtime/test_executables_wrapper.py index c8a1068..b9edf2e 100644 --- a/tests/builder/runtime/test_executables_wrapper.py +++ b/tests/builder/runtime/test_executables_wrapper.py @@ -89,6 +89,22 @@ class TestExecutablesWrapper(TestCase): self.assertEqual(expected, result) + def test_wrap_previously_wrpped_interpreted_executable(self): + resolver = FakeAppRunBinariesResolver() + environment = Environment() + environment.set("APPIMAGE_UUID", "UUID") + wrapper = ExecutablesWrapper(self.data_dir, resolver, environment) + executable = InterpretedExecutable( + self.script_path, ["/tmp/appimage-OLD-UUID-env", "python3"] + ) + wrapper.wrap(executable) + + result = self.script_path.read_text() + expected = "#!/tmp/appimage-UUID-env python3\n" "1234567890\n" + self.assertTrue(os.access(self.bin_path, os.X_OK | os.R_OK)) + + self.assertEqual(expected, result) + def test_generate_executable_env(self): resolver = FakeAppRunBinariesResolver() environment = Environment()
Properly handle shebangs set in previous builds WARNING:root:Required interpreter 'appimage-f5d659fa-db6f-4a84-b46b-8893153ca973-env' could not be found in the AppDir while processing /home/msalvatore/appimage/../monkey-appdir/usr/src/monkey_island/cc/ui/node_modules/node-sass/src/libsass/script/ci-install-compiler
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/builder/runtime/test_executables_wrapper.py::TestExecutablesWrapper::test_wrap_previously_wrpped_interpreted_executable" ]
[ "tests/builder/runtime/test_executables_wrapper.py::TestExecutablesWrapper::test_generate_executable_env", "tests/builder/runtime/test_executables_wrapper.py::TestExecutablesWrapper::test_wrap_binary_executable", "tests/builder/runtime/test_executables_wrapper.py::TestExecutablesWrapper::test_wrap_interpreted_executable" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2021-02-19T02:39:17Z"
mit
Axelrod-Python__Axelrod-590
diff --git a/CHANGES.txt b/CHANGES.md similarity index 98% rename from CHANGES.txt rename to CHANGES.md index 083c60a5..c4b07ffe 100644 --- a/CHANGES.txt +++ b/CHANGES.md @@ -3,7 +3,7 @@ Internal improvements, progress bar, minor interface change Here are all the commits for this PR: -https://github.com/Axelrod-Python/Axelrod/compare/v0.0.31...master +https://github.com/Axelrod-Python/Axelrod/compare/v0.0.31...v1.0.0 This release is the first major release stating the stability and maturity of the library. diff --git a/axelrod/plot.py b/axelrod/plot.py index ca51518b..71dbaa5c 100644 --- a/axelrod/plot.py +++ b/axelrod/plot.py @@ -198,7 +198,7 @@ class Plot(object): # Ecological Plot - def stackplot(self, eco, title=None): + def stackplot(self, eco, title=None, logscale=True): if not self.matplotlib_installed: return None @@ -231,6 +231,7 @@ class Plot(object): ax.tick_params(direction='out') ax.set_yticklabels([]) - ax.set_xscale('log') + if logscale: + ax.set_xscale('log') return figure
Axelrod-Python/Axelrod
49f83b97c668fae0f11dd54e5db5b286d830c8f3
diff --git a/axelrod/tests/unit/test_plot.py b/axelrod/tests/unit/test_plot.py index d30026c6..9f936627 100644 --- a/axelrod/tests/unit/test_plot.py +++ b/axelrod/tests/unit/test_plot.py @@ -184,6 +184,8 @@ class TestPlot(unittest.TestCase): self.assertIsInstance( plot.stackplot(eco, title="dummy title"), matplotlib.pyplot.Figure) + self.assertIsInstance( + plot.stackplot(eco, logscale=False), matplotlib.pyplot.Figure) else: self.skipTest('matplotlib not installed')
Make log scale in eco plot optional Just nice to be able to change it back to a normal scale if we don't want to reproduce for a power of ten turns. (PR incoming, it's a 1 liner.)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "axelrod/tests/unit/test_plot.py::TestPlot::test_boxplot_dataset", "axelrod/tests/unit/test_plot.py::TestPlot::test_boxplot_xticks_labels", "axelrod/tests/unit/test_plot.py::TestPlot::test_boxplot_xticks_locations", "axelrod/tests/unit/test_plot.py::TestPlot::test_init", "axelrod/tests/unit/test_plot.py::TestPlot::test_init_from_resulsetfromfile", "axelrod/tests/unit/test_plot.py::TestPlot::test_lengthplot_dataset", "axelrod/tests/unit/test_plot.py::TestPlot::test_payoff_dataset", "axelrod/tests/unit/test_plot.py::TestPlot::test_winplot_dataset" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2016-05-13T16:32:21Z"
mit
Axelrod-Python__Axelrod-638
diff --git a/axelrod/strategies/finite_state_machines.py b/axelrod/strategies/finite_state_machines.py index defc4770..1c231d43 100644 --- a/axelrod/strategies/finite_state_machines.py +++ b/axelrod/strategies/finite_state_machines.py @@ -54,6 +54,7 @@ class FSMPlayer(Player): initial_state = 1 initial_action = C Player.__init__(self) + self.initial_state = initial_state self.initial_action = initial_action self.fsm = SimpleFSM(transitions, initial_state) @@ -67,6 +68,10 @@ class FSMPlayer(Player): self.state = self.fsm.state return action + def reset(self): + Player.reset(self) + self.fsm.state = self.initial_state + class Fortress3(FSMPlayer): """Finite state machine player specified in DOI:10.1109/CEC.2006.1688322.
Axelrod-Python/Axelrod
89651f45910f4b41a79c58358d9f5beca4197fc1
diff --git a/axelrod/tests/integration/test_matches.py b/axelrod/tests/integration/test_matches.py new file mode 100644 index 00000000..b6241145 --- /dev/null +++ b/axelrod/tests/integration/test_matches.py @@ -0,0 +1,25 @@ +"""Tests for some expected match behaviours""" +import unittest +import axelrod + +from hypothesis import given +from hypothesis.strategies import integers +from axelrod.tests.property import strategy_lists + +C, D = axelrod.Actions.C, axelrod.Actions.D + +deterministic_strategies = [s for s in axelrod.ordinary_strategies + if not s().classifier['stochastic']] # Well behaved strategies + +class TestMatchOutcomes(unittest.TestCase): + + @given(strategies=strategy_lists(strategies=deterministic_strategies, + min_size=2, max_size=2), + turns=integers(min_value=1, max_value=20)) + def test_outcome_repeats(self, strategies, turns): + """A test that if we repeat 3 matches with deterministic and well + behaved strategies then we get the same result""" + players = [s() for s in strategies] + matches = [axelrod.Match(players, turns) for _ in range(3)] + self.assertEqual(matches[0].play(), matches[1].play()) + self.assertEqual(matches[1].play(), matches[2].play()) diff --git a/axelrod/tests/unit/test_finite_state_machines.py b/axelrod/tests/unit/test_finite_state_machines.py index 043834a1..d8147a59 100644 --- a/axelrod/tests/unit/test_finite_state_machines.py +++ b/axelrod/tests/unit/test_finite_state_machines.py @@ -111,6 +111,12 @@ class TestFSMPlayer(TestPlayer): fsm = player.fsm self.assertTrue(check_state_transitions(fsm.state_transitions)) + def test_reset_initial_state(self): + player = self.player() + player.fsm.state = -1 + player.reset() + self.assertFalse(player.fsm.state == -1) + class TestFortress3(TestFSMPlayer):
Finite state machine players don't reset properly ``` >>> import axelrod as axl >>> tft = axl.TitForTat() >>> predator = axl.Predator() >>> predator.fsm.state 1 >>> m = axl.Match((tft, predator), 2) >>> m.play() [('C', 'C'), ('C', 'D')] >>> predator.fsm.state 2 >>> predator.reset() >>> predator.fsm.state 2 ``` Stumbled on this working on #636 (writing a hypothesis strategy that contrite TfT reduces to TfT in 0 noise) so the above is reduced from seeing that when playing the same match again we get a different output: ``` >>> m = axl.Match((tft, predator), 2) >>> m.play() [('C', 'C'), ('C', 'C')] ``` Am going to work on a fix now and include a hypothesis test that checks that random deterministic matches give the same outcomes.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "axelrod/tests/integration/test_matches.py::TestMatchOutcomes::test_outcome_repeats", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_reset_initial_state", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_reset_initial_state" ]
[ "axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestPlayer::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_cooperator", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_defector", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_malformed_tables", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_tft", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayers::test_wsls", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestFSMPlayer::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestPredator::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestRaider::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestRipoff::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB1::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestSolutionB5::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_clone", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_initialisation", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_match_attributes", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_repr", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_reset", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_strategy", "axelrod/tests/unit/test_finite_state_machines.py::TestThumper::test_transitions", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3vsFortress3::test_rounds", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3vsTitForTat::test_rounds", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress3vsCooperator::test_rounds", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4vsFortress4::test_rounds", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4vsTitForTat::test_rounds", "axelrod/tests/unit/test_finite_state_machines.py::TestFortress4vsCooperator::test_rounds" ]
{ "failed_lite_validators": [ "has_issue_reference" ], "has_test_patch": true, "is_lite": false }
"2016-06-19T20:45:17Z"
mit
Axelrod-Python__Axelrod-653
diff --git a/axelrod/__init__.py b/axelrod/__init__.py index 7cb2ebda..482df852 100644 --- a/axelrod/__init__.py +++ b/axelrod/__init__.py @@ -2,7 +2,7 @@ from __future__ import absolute_import # The order of imports matters! from .actions import Actions, flip_action -from .random_ import random_choice +from .random_ import random_choice, seed from .plot import Plot from .game import DefaultGame, Game from .player import init_args, is_basic, obey_axelrod, update_history, Player diff --git a/axelrod/random_.py b/axelrod/random_.py index 0c3bc72e..9dc783e5 100644 --- a/axelrod/random_.py +++ b/axelrod/random_.py @@ -1,4 +1,5 @@ import random +import numpy from axelrod import Actions @@ -21,3 +22,9 @@ def randrange(a, b): c = b - a r = c * random.random() return a + int(r) + + +def seed(seed): + """Sets a seed""" + random.seed(seed) + numpy.random.seed(seed) diff --git a/axelrod/strategies/_strategies.py b/axelrod/strategies/_strategies.py index 004108fc..7362e44e 100644 --- a/axelrod/strategies/_strategies.py +++ b/axelrod/strategies/_strategies.py @@ -56,7 +56,7 @@ from .sequence_player import SequencePlayer, ThueMorse, ThueMorseInverse from .titfortat import ( TitForTat, TitFor2Tats, TwoTitsForTat, Bully, SneakyTitForTat, SuspiciousTitForTat, AntiTitForTat, HardTitForTat, HardTitFor2Tats, - OmegaTFT, Gradual, ContriteTitForTat) + OmegaTFT, Gradual, ContriteTitForTat, SlowTitForTwoTats) # Note: Meta* strategies are handled in .__init__.py @@ -166,6 +166,7 @@ strategies = [ Ripoff, RiskyQLearner, Shubik, + SlowTitForTwoTats, SneakyTitForTat, SoftGrudger, SoftJoss, diff --git a/axelrod/strategies/titfortat.py b/axelrod/strategies/titfortat.py index fef73595..6086ca2a 100644 --- a/axelrod/strategies/titfortat.py +++ b/axelrod/strategies/titfortat.py @@ -386,3 +386,36 @@ class ContriteTitForTat(Player): Player.reset(self) self.contrite = False self._recorded_history = [] + + +class SlowTitForTwoTats(Player): + """ + A player plays C twice, then if the opponent plays the same move twice, + plays that move + """ + + name = 'Slow Tit For Two Tats' + classifier = { + 'memory_depth': 2, + 'stochastic': False, + 'makes_use_of': set(), + 'inspects_source': False, + 'manipulates_source': False, + 'manipulates_state': False + } + + def strategy(self, opponent): + + #Start with two cooperations + if len(self.history) < 2: + return C + + #Mimic if opponent plays the same move twice + if opponent.history[-2] == opponent.history[-1]: + return opponent.history[-1] + + #Otherwise cooperate + return C + + + diff --git a/docs/tutorials/advanced/index.rst b/docs/tutorials/advanced/index.rst index dcefec08..ea868106 100644 --- a/docs/tutorials/advanced/index.rst +++ b/docs/tutorials/advanced/index.rst @@ -13,3 +13,4 @@ Contents: making_tournaments.rst reading_and_writing_interactions.rst using_the_cache.rst + setting_a_seed.rst diff --git a/docs/tutorials/advanced/setting_a_seed.rst b/docs/tutorials/advanced/setting_a_seed.rst new file mode 100644 index 00000000..5459ee92 --- /dev/null +++ b/docs/tutorials/advanced/setting_a_seed.rst @@ -0,0 +1,35 @@ +.. _setting_a_seed: + +Setting a random seed +===================== + +The library has a variety of strategies whose behaviour is stochastic. To ensure +reproducible results a random seed should be set. As both Numpy and the standard +library are used for random number generation, both seeds need to be +set. To do this we can use the `seed` function:: + + >>> import axelrod as axl + >>> players = (axl.Random(), axl.MetaMixer()) # Two stochastic strategies + >>> axl.seed(0) + >>> axl.Match(players, turns=3).play() + [('D', 'C'), ('D', 'D'), ('C', 'D')] + +We obtain the same results is it is played with the same seed:: + + >>> axl.seed(0) + >>> axl.Match(players, turns=3).play() + [('D', 'C'), ('D', 'D'), ('C', 'D')] + +Note that this is equivalent to:: + + >>> import numpy + >>> import random + >>> players = (axl.Random(), axl.MetaMixer()) + >>> random.seed(0) + >>> numpy.random.seed(0) + >>> axl.Match(players, turns=3).play() + [('D', 'C'), ('D', 'D'), ('C', 'D')] + >>> numpy.random.seed(0) + >>> random.seed(0) + >>> axl.Match(players, turns=3).play() + [('D', 'C'), ('D', 'D'), ('C', 'D')]
Axelrod-Python/Axelrod
bc333844e10e389f9818e615bcc28c5c69daee94
diff --git a/axelrod/tests/integration/test_matches.py b/axelrod/tests/integration/test_matches.py index b6241145..d0018132 100644 --- a/axelrod/tests/integration/test_matches.py +++ b/axelrod/tests/integration/test_matches.py @@ -11,6 +11,10 @@ C, D = axelrod.Actions.C, axelrod.Actions.D deterministic_strategies = [s for s in axelrod.ordinary_strategies if not s().classifier['stochastic']] # Well behaved strategies +stochastic_strategies = [s for s in axelrod.ordinary_strategies + if s().classifier['stochastic']] + + class TestMatchOutcomes(unittest.TestCase): @given(strategies=strategy_lists(strategies=deterministic_strategies, @@ -23,3 +27,19 @@ class TestMatchOutcomes(unittest.TestCase): matches = [axelrod.Match(players, turns) for _ in range(3)] self.assertEqual(matches[0].play(), matches[1].play()) self.assertEqual(matches[1].play(), matches[2].play()) + + @given(strategies=strategy_lists(strategies=stochastic_strategies, + min_size=2, max_size=2), + turns=integers(min_value=1, max_value=20), + seed=integers(min_value=0, max_value=4294967295)) + def test_outcome_repeats_stochastic(self, strategies, turns, seed): + """a test to check that if a seed is set stochastic strategies give the + same result""" + results = [] + for _ in range(3): + axelrod.seed(seed) + players = [s() for s in strategies] + results.append(axelrod.Match(players, turns).play()) + + self.assertEqual(results[0], results[1]) + self.assertEqual(results[1], results[2]) diff --git a/axelrod/tests/integration/test_tournament.py b/axelrod/tests/integration/test_tournament.py index 7358c073..e90e2384 100644 --- a/axelrod/tests/integration/test_tournament.py +++ b/axelrod/tests/integration/test_tournament.py @@ -1,6 +1,7 @@ import unittest import axelrod import tempfile +import filecmp from axelrod.strategy_transformers import FinalTransformer @@ -60,6 +61,39 @@ class TestTournament(unittest.TestCase): actual_outcome = sorted(zip(self.player_names, scores)) self.assertEqual(actual_outcome, self.expected_outcome) + def test_repeat_tournament_deterministic(self): + """A test to check that tournament gives same results.""" + deterministic_players = [s() for s in axelrod.ordinary_strategies + if not s().classifier['stochastic']] + files = [] + for _ in range(2): + tournament = axelrod.Tournament(name='test', + players=deterministic_players, + game=self.game, turns=2, + repetitions=2) + files.append(tempfile.NamedTemporaryFile()) + tournament.play(progress_bar=False, filename=files[-1].name, + build_results=False) + self.assertTrue(filecmp.cmp(files[0].name, files[1].name)) + + def test_repeat_tournament_stochastic(self): + """ + A test to check that tournament gives same results when setting seed. + """ + files = [] + for _ in range(2): + axelrod.seed(0) + stochastic_players = [s() for s in axelrod.ordinary_strategies + if s().classifier['stochastic']] + tournament = axelrod.Tournament(name='test', + players=stochastic_players, + game=self.game, turns=2, + repetitions=2) + files.append(tempfile.NamedTemporaryFile()) + tournament.play(progress_bar=False, filename=files[-1].name, + build_results=False) + self.assertTrue(filecmp.cmp(files[0].name, files[1].name)) + class TestNoisyTournament(unittest.TestCase): def test_noisy_tournament(self): diff --git a/axelrod/tests/unit/test_punisher.py b/axelrod/tests/unit/test_punisher.py index 29451c37..7aabf295 100644 --- a/axelrod/tests/unit/test_punisher.py +++ b/axelrod/tests/unit/test_punisher.py @@ -116,3 +116,4 @@ class TestInversePunisher(TestPlayer): self.assertEqual(P1.history, []) self.assertEqual(P1.grudged, False) self.assertEqual(P1.grudge_memory, 0) + \ No newline at end of file diff --git a/axelrod/tests/unit/test_random_.py b/axelrod/tests/unit/test_random_.py index 16046617..5ce4a483 100644 --- a/axelrod/tests/unit/test_random_.py +++ b/axelrod/tests/unit/test_random_.py @@ -1,9 +1,10 @@ """Test for the random strategy.""" +import numpy import random import unittest -from axelrod import random_choice, Actions +from axelrod import random_choice, seed, Actions C, D = Actions.C, Actions.D @@ -16,3 +17,17 @@ class TestRandom_(unittest.TestCase): self.assertEqual(random_choice(), C) random.seed(2) self.assertEqual(random_choice(), D) + + def test_set_seed(self): + """Test that numpy and stdlib random seed is set by axelrod seed""" + + numpy_random_numbers = [] + stdlib_random_numbers = [] + for _ in range(2): + seed(0) + numpy_random_numbers.append(numpy.random.random()) + stdlib_random_numbers.append(random.random()) + + self.assertEqual(numpy_random_numbers[0], numpy_random_numbers[1]) + self.assertEqual(stdlib_random_numbers[0], stdlib_random_numbers[1]) + diff --git a/axelrod/tests/unit/test_titfortat.py b/axelrod/tests/unit/test_titfortat.py index 33c606ae..6d39a52d 100644 --- a/axelrod/tests/unit/test_titfortat.py +++ b/axelrod/tests/unit/test_titfortat.py @@ -432,8 +432,35 @@ class TestContriteTitForTat(TestPlayer): self.assertEqual(opponent.history, [C, D, D, D]) self.assertFalse(ctft.contrite) + def test_reset_cleans_all(self): p = self.player() p.contrite = True p.reset() self.assertFalse(p.contrite) + +class TestSlowTitForTwoTats(TestPlayer): + + name = "Slow Tit For Two Tats" + player = axelrod.SlowTitForTwoTats + expected_classifier = { + 'memory_depth': 2, + 'stochastic': False, + 'makes_use_of': set(), + 'inspects_source': False, + 'manipulates_source': False, + 'manipulates_state': False + } + + def test_strategy(self): + """Starts by cooperating.""" + self.first_play_test(C) + + def test_effect_of_strategy(self): + """If opponent plays the same move twice, repeats last action of opponent history.""" + self.responses_test([C]*2, [C, C], [C]) + self.responses_test([C]*3, [C, D, C], [C]) + self.responses_test([C]*3, [C, D, D], [D]) + + + \ No newline at end of file
Add tests for reproducibility of stochastic results I'd like to add a property based test similar to https://github.com/Nikoleta-v3/Axelrod/blob/635/axelrod/tests/integration/test_matches.py but that checks that when setting a seed we get the same results for stochastic strategies (those tests there only check for 'well behaved deterministic' strategies. I have a hunch that this isn't actually true as we're not setting numpy's seed... If that's the case and both seeds need to be set, we should document how to get reproducible results and either: 1. Indicate that you should set both seeds (numpy and stdlib); 2. Write a little helper function that does this (and just document it: `axelrod.seed()` or something like that...
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "axelrod/tests/integration/test_matches.py::TestMatchOutcomes::test_outcome_repeats", "axelrod/tests/integration/test_matches.py::TestMatchOutcomes::test_outcome_repeats_stochastic", "axelrod/tests/integration/test_tournament.py::TestTournament::test_full_tournament", "axelrod/tests/integration/test_tournament.py::TestTournament::test_parallel_play", "axelrod/tests/integration/test_tournament.py::TestTournament::test_repeat_tournament_deterministic", "axelrod/tests/integration/test_tournament.py::TestTournament::test_repeat_tournament_stochastic", "axelrod/tests/integration/test_tournament.py::TestTournament::test_serial_play", "axelrod/tests/integration/test_tournament.py::TestNoisyTournament::test_noisy_tournament", "axelrod/tests/integration/test_tournament.py::TestProbEndTournament::test_players_do_not_know_match_length", "axelrod/tests/unit/test_punisher.py::TestPlayer::test_clone", "axelrod/tests/unit/test_punisher.py::TestPlayer::test_initialisation", "axelrod/tests/unit/test_punisher.py::TestPlayer::test_match_attributes", "axelrod/tests/unit/test_punisher.py::TestPlayer::test_repr", "axelrod/tests/unit/test_punisher.py::TestPlayer::test_reset", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_clone", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_init", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_initialisation", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_match_attributes", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_repr", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_reset", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_reset_method", "axelrod/tests/unit/test_punisher.py::TestPunisher::test_strategy", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_clone", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_init", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_initialisation", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_match_attributes", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_repr", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_reset", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_reset_method", "axelrod/tests/unit/test_punisher.py::TestInversePunisher::test_strategy", "axelrod/tests/unit/test_random_.py::TestRandom_::test_return_values", "axelrod/tests/unit/test_random_.py::TestRandom_::test_set_seed", "axelrod/tests/unit/test_titfortat.py::TestPlayer::test_clone", "axelrod/tests/unit/test_titfortat.py::TestPlayer::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestPlayer::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestPlayer::test_repr", "axelrod/tests/unit/test_titfortat.py::TestPlayer::test_reset", "axelrod/tests/unit/test_titfortat.py::TestTitForTat::test_clone", "axelrod/tests/unit/test_titfortat.py::TestTitForTat::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestTitForTat::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestTitForTat::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestTitForTat::test_repr", "axelrod/tests/unit/test_titfortat.py::TestTitForTat::test_reset", "axelrod/tests/unit/test_titfortat.py::TestTitForTat::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestTitFor2Tats::test_clone", "axelrod/tests/unit/test_titfortat.py::TestTitFor2Tats::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestTitFor2Tats::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestTitFor2Tats::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestTitFor2Tats::test_repr", "axelrod/tests/unit/test_titfortat.py::TestTitFor2Tats::test_reset", "axelrod/tests/unit/test_titfortat.py::TestTitFor2Tats::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestTwoTitsForTat::test_clone", "axelrod/tests/unit/test_titfortat.py::TestTwoTitsForTat::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestTwoTitsForTat::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestTwoTitsForTat::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestTwoTitsForTat::test_repr", "axelrod/tests/unit/test_titfortat.py::TestTwoTitsForTat::test_reset", "axelrod/tests/unit/test_titfortat.py::TestTwoTitsForTat::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestBully::test_affect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestBully::test_clone", "axelrod/tests/unit/test_titfortat.py::TestBully::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestBully::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestBully::test_repr", "axelrod/tests/unit/test_titfortat.py::TestBully::test_reset", "axelrod/tests/unit/test_titfortat.py::TestBully::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestSneakyTitForTat::test_clone", "axelrod/tests/unit/test_titfortat.py::TestSneakyTitForTat::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestSneakyTitForTat::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestSneakyTitForTat::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestSneakyTitForTat::test_repr", "axelrod/tests/unit/test_titfortat.py::TestSneakyTitForTat::test_reset", "axelrod/tests/unit/test_titfortat.py::TestSneakyTitForTat::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestSuspiciousTitForTat::test_affect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestSuspiciousTitForTat::test_clone", "axelrod/tests/unit/test_titfortat.py::TestSuspiciousTitForTat::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestSuspiciousTitForTat::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestSuspiciousTitForTat::test_repr", "axelrod/tests/unit/test_titfortat.py::TestSuspiciousTitForTat::test_reset", "axelrod/tests/unit/test_titfortat.py::TestSuspiciousTitForTat::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestAntiTitForTat::test_affect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestAntiTitForTat::test_clone", "axelrod/tests/unit/test_titfortat.py::TestAntiTitForTat::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestAntiTitForTat::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestAntiTitForTat::test_repr", "axelrod/tests/unit/test_titfortat.py::TestAntiTitForTat::test_reset", "axelrod/tests/unit/test_titfortat.py::TestAntiTitForTat::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestHardTitForTat::test_clone", "axelrod/tests/unit/test_titfortat.py::TestHardTitForTat::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestHardTitForTat::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestHardTitForTat::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestHardTitForTat::test_repr", "axelrod/tests/unit/test_titfortat.py::TestHardTitForTat::test_reset", "axelrod/tests/unit/test_titfortat.py::TestHardTitForTat::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestHardTitFor2Tats::test_clone", "axelrod/tests/unit/test_titfortat.py::TestHardTitFor2Tats::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestHardTitFor2Tats::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestHardTitFor2Tats::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestHardTitFor2Tats::test_repr", "axelrod/tests/unit/test_titfortat.py::TestHardTitFor2Tats::test_reset", "axelrod/tests/unit/test_titfortat.py::TestHardTitFor2Tats::test_strategy", "axelrod/tests/unit/test_titfortat.py::OmegaTFT::test_clone", "axelrod/tests/unit/test_titfortat.py::OmegaTFT::test_initialisation", "axelrod/tests/unit/test_titfortat.py::OmegaTFT::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::OmegaTFT::test_repr", "axelrod/tests/unit/test_titfortat.py::OmegaTFT::test_reset", "axelrod/tests/unit/test_titfortat.py::OmegaTFT::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestOmegaTFTvsSTFT::test_rounds", "axelrod/tests/unit/test_titfortat.py::TestOmegaTFTvsAlternator::test_rounds", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_clone", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_output_from_literature", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_repr", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_reset", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_reset_cleans_all", "axelrod/tests/unit/test_titfortat.py::TestGradual::test_strategy", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_clone", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_is_tit_for_tat_with_no_noise", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_repr", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_reset", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_reset_cleans_all", "axelrod/tests/unit/test_titfortat.py::TestContriteTitForTat::test_strategy_with_noise", "axelrod/tests/unit/test_titfortat.py::TestSlowTitForTwoTats::test_clone", "axelrod/tests/unit/test_titfortat.py::TestSlowTitForTwoTats::test_effect_of_strategy", "axelrod/tests/unit/test_titfortat.py::TestSlowTitForTwoTats::test_initialisation", "axelrod/tests/unit/test_titfortat.py::TestSlowTitForTwoTats::test_match_attributes", "axelrod/tests/unit/test_titfortat.py::TestSlowTitForTwoTats::test_repr", "axelrod/tests/unit/test_titfortat.py::TestSlowTitForTwoTats::test_reset", "axelrod/tests/unit/test_titfortat.py::TestSlowTitForTwoTats::test_strategy" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-07-08T16:35:13Z"
mit
AzureAD__azure-activedirectory-library-for-python-120
diff --git a/adal/wstrust_response.py b/adal/wstrust_response.py index ecdc398..5b2f5ee 100644 --- a/adal/wstrust_response.py +++ b/adal/wstrust_response.py @@ -55,6 +55,35 @@ def scrub_rstr_log_message(response_str): return 'RSTR Response: ' + scrubbed_rstr +def findall_content(xml_string, tag): + """ + Given a tag name without any prefix, + this function returns a list of the raw content inside this tag as-is. + + >>> findall_content("<ns0:foo> what <bar> ever </bar> content </ns0:foo>", "foo") + [" what <bar> ever </bar> content "] + + Motivation: + + Usually we would use XML parser to extract the data by xpath. + However the ElementTree in Python will implicitly normalize the output + by "hoisting" the inner inline namespaces into the outmost element. + The result will be a semantically equivalent XML snippet, + but not fully identical to the original one. + While this effect shouldn't become a problem in all other cases, + it does not seem to fully comply with Exclusive XML Canonicalization spec + (https://www.w3.org/TR/xml-exc-c14n/), and void the SAML token signature. + SAML signature algo needs the "XML -> C14N(XML) -> Signed(C14N(Xml))" order. + + The binary extention lxml is probably the canonical way to solve this + (https://stackoverflow.com/questions/22959577/python-exclusive-xml-canonicalization-xml-exc-c14n) + but here we use this workaround, based on Regex, to return raw content as-is. + """ + # \w+ is good enough for https://www.w3.org/TR/REC-xml/#NT-NameChar + pattern = r"<(?:\w+:)?%(tag)s(?:[^>]*)>(.*)</(?:\w+:)?%(tag)s" % {"tag": tag} + return re.findall(pattern, xml_string, re.DOTALL) + + class WSTrustResponse(object): def __init__(self, call_context, response, wstrust_version): @@ -178,6 +207,15 @@ class WSTrustResponse(object): if self.token is None: raise AdalError("Unable to find any tokens in RSTR.") + @staticmethod + def _parse_token_by_re(raw_response): + for rstr in findall_content(raw_response, "RequestSecurityTokenResponse"): + token_types = findall_content(rstr, "TokenType") + tokens = findall_content(rstr, "RequestedSecurityToken") + if token_types and tokens: + return tokens[0].encode('us-ascii'), token_types[0] + + def parse(self): if not self._response: raise AdalError("Received empty RSTR response body.") @@ -195,7 +233,12 @@ class WSTrustResponse(object): str_fault_message = self.fault_message or 'NONE' error_template = 'Server returned error in RSTR - ErrorCode: {} : FaultMessage: {}' raise AdalError(error_template.format(str_error_code, str_fault_message)) - self._parse_token() + + token_found = self._parse_token_by_re(self._response) + if token_found: + self.token, self.token_type = token_found + else: # fallback to old logic + self._parse_token() finally: self._dom = None self._parents = None
AzureAD/azure-activedirectory-library-for-python
f8fd8ef1f45a4502209e5777f4cdea8397038a1e
diff --git a/tests/test_wstrust_response.py b/tests/test_wstrust_response.py index e0b1288..913ed87 100644 --- a/tests/test_wstrust_response.py +++ b/tests/test_wstrust_response.py @@ -36,6 +36,7 @@ except ImportError: from adal.constants import XmlNamespaces, Errors, WSTrustVersion from adal.wstrust_response import WSTrustResponse +from adal.wstrust_response import findall_content _namespaces = XmlNamespaces.namespaces _call_context = {'log_context' : {'correlation-id':'test-corr-id'}} @@ -101,5 +102,33 @@ class Test_wstrustresponse(unittest.TestCase): wstrustResponse = WSTrustResponse(_call_context, '<This is not parseable as an RSTR', WSTrustVersion.WSTRUST13) wstrustResponse.parse() + def test_findall_content_with_comparison(self): + content = """ + <saml:Assertion xmlns:saml="SAML:assertion"> + <ds:Signature xmlns:ds="http://www.w3.org/2000/09/xmldsig#"> + foo + </ds:Signature> + </saml:Assertion>""" + sample = ('<ns0:Wrapper xmlns:ns0="namespace0">' + + content + + '</ns0:Wrapper>') + + # Demonstrating how XML-based parser won't give you the raw content as-is + element = ET.fromstring(sample).findall('{SAML:assertion}Assertion')[0] + assertion_via_xml_parser = ET.tostring(element) + self.assertNotEqual(content, assertion_via_xml_parser) + self.assertNotIn(b"<ds:Signature>", assertion_via_xml_parser) + + # The findall_content() helper, based on Regex, will return content as-is. + self.assertEqual([content], findall_content(sample, "Wrapper")) + + def test_findall_content_for_real(self): + with open(os.path.join(os.getcwd(), 'tests', 'wstrust', 'RSTR.xml')) as f: + rstr = f.read() + wstrustResponse = WSTrustResponse(_call_context, rstr, WSTrustVersion.WSTRUST13) + wstrustResponse.parse() + self.assertIn("<X509Data>", rstr) + self.assertIn(b"<X509Data>", wstrustResponse.token) # It is in bytes + if __name__ == '__main__': unittest.main()
Issue with authenticating to Dynamics 365 CRM I am having issues authenticating to the Dynamics 365 CRM Web API via the Python ADAL library. Particularly, the acquire_token_with_username_password function. I can't seem to acquire an access token. Full error message below. The only "wrinkle" in my configuration is that our Azure AD is federated with an on-prem AD. Also, authentication seems to work using a device code & interactive login via the acquire_token_with_device_code function. The problem is I am writing a console application that is intended to be non-interactive. Has anyone else encountered this issue, or could otherwise explain how to resolve it? > adal.adal_error.AdalError: Get Token request returned http error: 400 and server response: {"error":"invalid_grant","error_description":"AADSTS70002: Error validating credentials. AADSTS50008: SAML token is invalid. AADSTS50006: The element with ID '_011236b3-e879-4bb5-b640-86577dda2a0a' was either unsigned or the signature was invalid.\r\nTrace ID: 7b0fbeae-5a37-42bd-af63-f1decf720926\r\nCorrelation ID: 3242f1a3-41be-49f3-ab2c-b643625b8d5b\r\nTimestamp: 2017-03-13 02:34:41Z","error_codes":[70002,50008,50006],"timestamp":"2017-03-13 02:34:41Z","trace_id":"7b0fbeae-5a37-42bd-af63-f1decf720926","correlation_id":"3242f1a3-41be-49f3-ab2c-b643625b8d5b"}
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_wstrust_response.py::Test_wstrustresponse::test_findall_content_for_real", "tests/test_wstrust_response.py::Test_wstrustresponse::test_findall_content_with_comparison", "tests/test_wstrust_response.py::Test_wstrustresponse::test_parse_error_happy_path", "tests/test_wstrust_response.py::Test_wstrustresponse::test_rstr_empty_string", "tests/test_wstrust_response.py::Test_wstrustresponse::test_rstr_none", "tests/test_wstrust_response.py::Test_wstrustresponse::test_rstr_unparseable_xml", "tests/test_wstrust_response.py::Test_wstrustresponse::test_token_parsing_happy_path" ]
[]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2018-02-21T01:10:22Z"
mit
AzureAD__azure-activedirectory-library-for-python-57
diff --git a/adal/authentication_context.py b/adal/authentication_context.py index 9e8ac06..6557bf3 100644 --- a/adal/authentication_context.py +++ b/adal/authentication_context.py @@ -26,6 +26,7 @@ #------------------------------------------------------------------------------ import os import threading +import warnings from .authority import Authority from . import argument @@ -44,9 +45,11 @@ class AuthenticationContext(object): https://github.com/AzureAD/azure-activedirectory-library-for-python ''' - def __init__(self, authority, validate_authority=None, cache=None): - '''Creates a new AuthenticationContext object. - + def __init__( + self, authority, validate_authority=None, cache=None, + api_version='1.0'): + '''Creates a new AuthenticationContext object. + By default the authority will be checked against a list of known Azure Active Directory authorities. If the authority is not recognized as one of these well known authorities then token acquisition will fail. @@ -62,13 +65,30 @@ class AuthenticationContext(object): the AuthenticationContext and are not shared unless it has been manually passed during the construction of other AuthenticationContexts. + :param api_version: (optional) Specifies API version using on the wire. + Historically it has a hardcoded default value as "1.0". + Developers are now encouraged to set it as None explicitly, + which means the underlying API version will be automatically chosen. + In next major release, this default value will be changed to None. ''' self.authority = Authority(authority, validate_authority is None or validate_authority) self._oauth2client = None self.correlation_id = None env_value = os.environ.get('ADAL_PYTHON_SSL_NO_VERIFY') + if api_version is not None: + warnings.warn( + """The default behavior of including api-version=1.0 on the wire + is now deprecated. + Future version of ADAL will change the default value to None. + + To ensure a smooth transition, you are recommended to explicitly + set it to None in your code now, and test out the new behavior. + + context = AuthenticationContext(..., api_version=None) + """, DeprecationWarning) self._call_context = { 'options': GLOBAL_ADAL_OPTIONS, + 'api_version': api_version, 'verify_ssl': None if env_value is None else not env_value # mainly for tracing through proxy } self._token_requests_with_user_code = {} diff --git a/adal/oauth2_client.py b/adal/oauth2_client.py index a7d6e78..dd26c4a 100644 --- a/adal/oauth2_client.py +++ b/adal/oauth2_client.py @@ -105,7 +105,9 @@ class OAuth2Client(object): def _create_token_url(self): parameters = {} - parameters[OAuth2.Parameters.AAD_API_VERSION] = '1.0' + if self._call_context.get('api_version'): + parameters[OAuth2.Parameters.AAD_API_VERSION] = self._call_context[ + 'api_version'] return urlparse('{}?{}'.format(self._token_endpoint, urlencode(parameters)))
AzureAD/azure-activedirectory-library-for-python
b9affeb5035e41a80e56053c054ce22ea3039b37
diff --git a/tests/test_api_version.py b/tests/test_api_version.py new file mode 100644 index 0000000..2a94873 --- /dev/null +++ b/tests/test_api_version.py @@ -0,0 +1,79 @@ +ο»Ώ#------------------------------------------------------------------------------ +# +# Copyright (c) Microsoft Corporation. +# All rights reserved. +# +# This code is licensed under the MIT License. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions : +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +#------------------------------------------------------------------------------ + +import warnings +try: + import unittest2 as unittest +except ImportError: + import unittest + +try: + from unittest import mock +except ImportError: + import mock + +import adal + +class TestAuthenticationContextApiVersionBehavior(unittest.TestCase): + + def test_api_version_default_value(self): + with warnings.catch_warnings(record=True) as caught_warnings: + warnings.simplefilter("always") + context = adal.AuthenticationContext( + "https://login.windows.net/tenant") + self.assertEqual(context._call_context['api_version'], '1.0') + if len(caught_warnings) == 1: + # It should be len(caught_warnings)==1, but somehow it works on + # all my local test environment but not on Travis-CI. + # So we relax this check, for now. + self.assertIn("deprecated", str(caught_warnings[0].message)) + + def test_explicitly_turn_off_api_version(self): + with warnings.catch_warnings(record=True) as caught_warnings: + warnings.simplefilter("always") + context = adal.AuthenticationContext( + "https://login.windows.net/tenant", api_version=None) + self.assertEqual(context._call_context['api_version'], None) + self.assertEqual(len(caught_warnings), 0) + +class TestOAuth2ClientApiVersionBehavior(unittest.TestCase): + + authority = mock.Mock(token_endpoint="https://example.com/token") + + def test_api_version_is_set(self): + client = adal.oauth2_client.OAuth2Client( + {"api_version": "1.0", "log_context": mock.Mock()}, self.authority) + self.assertIn('api-version=1.0', client._create_token_url().geturl()) + + def test_api_version_is_not_set(self): + client = adal.oauth2_client.OAuth2Client( + {"api_version": None, "log_context": mock.Mock()}, self.authority) + self.assertNotIn('api-version=1.0', client._create_token_url().geturl()) + +if __name__ == '__main__': + unittest.main() +
retrieved token does not grant access to related resource (401 Error) Hi, we stumbled over what we think can be categorized as a bug in the library. We are able to successfully obtain a token for a given resource and client id. However, the token does not grant us access to the resource. Whenever we call the resource with the `access_token`, we get a HTTP 401 response. When we use the .Net library, with the exact same parameters, the token works. The difference we have found is that the python library explicitly sends the parameter `api-version=1.0` to the oauth2 endpoint. `POST https://login.microsoftonline.com/common/oauth2/token?api-version=1.0` Once we changed the following line in `oauth2_client.py` in the adal library, I could access my resource. `return urlparse('{}?{}'.format(self._token_endpoint, urlencode(parameters)))` in the method `_create_token_url`, to `return urlparse(self._token_endpoint)`. See also this question on StackOverflow, the latest answer is from us. http://stackoverflow.com/questions/37909332/401-unauthorized-making-rest-call-to-azure-api-app-using-bearer-token/
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_api_version.py::TestAuthenticationContextApiVersionBehavior::test_api_version_default_value", "tests/test_api_version.py::TestAuthenticationContextApiVersionBehavior::test_explicitly_turn_off_api_version", "tests/test_api_version.py::TestOAuth2ClientApiVersionBehavior::test_api_version_is_not_set" ]
[ "tests/test_api_version.py::TestOAuth2ClientApiVersionBehavior::test_api_version_is_set" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-11-17T18:24:15Z"
mit
AzureAD__azure-activedirectory-library-for-python-87
diff --git a/adal/cache_driver.py b/adal/cache_driver.py index fba053b..9683dca 100644 --- a/adal/cache_driver.py +++ b/adal/cache_driver.py @@ -164,11 +164,19 @@ class CacheDriver(object): now_plus_buffer = now + timedelta(minutes=Misc.CLOCK_BUFFER) if is_resource_specific and now_plus_buffer > expiry_date: - self._log.info('Cached token is expired. Refreshing: %s', expiry_date) - return self._refresh_expired_entry(entry) + if TokenResponseFields.REFRESH_TOKEN in entry: + self._log.info('Cached token is expired. Refreshing: %s', expiry_date) + return self._refresh_expired_entry(entry) + else: + self.remove(entry) + return None elif not is_resource_specific and entry.get(TokenResponseFields.IS_MRRT): - self._log.info('Acquiring new access token from MRRT token.') - return self._acquire_new_token_from_mrrt(entry) + if TokenResponseFields.REFRESH_TOKEN in entry: + self._log.info('Acquiring new access token from MRRT token.') + return self._acquire_new_token_from_mrrt(entry) + else: + self.remove(entry) + return None else: return entry diff --git a/adal/oauth2_client.py b/adal/oauth2_client.py index dd26c4a..979a292 100644 --- a/adal/oauth2_client.py +++ b/adal/oauth2_client.py @@ -191,10 +191,10 @@ class OAuth2Client(object): wire_response[OAuth2.ResponseParameters.CREATED_ON] = str(temp_date) if not wire_response.get(OAuth2.ResponseParameters.TOKEN_TYPE): - raise AdalError('wire_response is missing token_type') + raise AdalError('wire_response is missing token_type', wire_response) if not wire_response.get(OAuth2.ResponseParameters.ACCESS_TOKEN): - raise AdalError('wire_response is missing access_token') + raise AdalError('wire_response is missing access_token', wire_response) token_response = map_fields(wire_response, TOKEN_RESPONSE_MAP)
AzureAD/azure-activedirectory-library-for-python
006b8b7749ede41c2f28530134b151a957ab5689
diff --git a/tests/test_cache_driver.py b/tests/test_cache_driver.py new file mode 100644 index 0000000..b3c4e07 --- /dev/null +++ b/tests/test_cache_driver.py @@ -0,0 +1,58 @@ +#------------------------------------------------------------------------------ +# +# Copyright (c) Microsoft Corporation. +# All rights reserved. +# +# This code is licensed under the MIT License. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files(the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions : +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +#------------------------------------------------------------------------------ + +import unittest +try: + from unittest import mock +except ImportError: + import mock + +from adal.log import create_log_context +from adal.cache_driver import CacheDriver + + +class TestCacheDriver(unittest.TestCase): + def test_rt_less_item_wont_cause_exception(self): # Github issue #82 + rt_less_entry_came_from_previous_client_credentials_grant = { + "expiresIn": 3600, + "_authority": "https://login.microsoftonline.com/foo", + "resource": "spn:00000002-0000-0000-c000-000000000000", + "tokenType": "Bearer", + "expiresOn": "1999-05-22 16:31:46.202000", + "isMRRT": True, + "_clientId": "client_id", + "accessToken": "this is an AT", + } + refresh_function = mock.MagicMock(return_value={}) + cache_driver = CacheDriver( + {"log_context": create_log_context()}, "authority", "resource", + "client_id", mock.MagicMock(), refresh_function) + entry = cache_driver._refresh_entry_if_necessary( + rt_less_entry_came_from_previous_client_credentials_grant, False) + refresh_function.assert_not_called() # Otherwise it will cause an exception + self.assertIsNone(entry) +
Issue while trying to obtain a token using client credentials once the token has expired I am able to obtain a valid access token by issuing the following command ``` >>> token = context.acquire_token_with_client_credentials(RESOURCE, client_id, client_secret) ``` However, when I issue the same command after the above token has expired, I get the following error message. Please let me know if I am missing something here or if I am expected to issue a different command in order to obtain a new token. Thanks in advance. ``` >>> token = context.acquire_token_with_client_credentials(RESOURCE, client_id, client_secret) Traceback (most recent call last): File "<stdin>", line 1, in <module> File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/authentication_context.py", line 160, in acquire_token_with_client_credentials return self._acquire_token(token_func) File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/authentication_context.py", line 109, in _acquire_token return token_func(self) File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/authentication_context.py", line 158, in token_func return token_request.get_token_with_client_credentials(client_secret) File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/token_request.py", line 304, in get_token_with_client_credentials token = self._find_token_from_cache() File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/token_request.py", line 128, in _find_token_from_cache return self._cache_driver.find(cache_query) File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/cache_driver.py", line 182, in find is_resource_tenant_specific) File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/cache_driver.py", line 171, in _refresh_entry_if_necessary return self._acquire_new_token_from_mrrt(entry) File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/cache_driver.py", line 153, in _acquire_new_token_from_mrrt token_response = self._refresh_function(entry, self._resource) File "/Users/shetty/.virtualenvs/ad/lib/python2.7/site-packages/adal/token_request.py", line 137, in _get_token_with_token_response refresh_token = entry[TOKEN_RESPONSE_FIELDS.REFRESH_TOKEN] KeyError: 'refreshToken' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_cache_driver.py::TestCacheDriver::test_rt_less_item_wont_cause_exception" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2017-05-23T18:47:18Z"
mit
AzureAD__microsoft-authentication-library-for-python-236
diff --git a/msal/application.py b/msal/application.py index 0d38a1a..e8bf71b 100644 --- a/msal/application.py +++ b/msal/application.py @@ -554,7 +554,9 @@ class ClientApplication(object): for alias in self._get_authority_aliases(self.authority.instance): if not self.token_cache.find( self.token_cache.CredentialType.REFRESH_TOKEN, - target=scopes, + # target=scopes, # MUST NOT filter by scopes, because: + # 1. AAD RTs are scope-independent; + # 2. therefore target is optional per schema; query={"environment": alias}): # Skip heavy weight logic when RT for this alias doesn't exist continue
AzureAD/microsoft-authentication-library-for-python
283bdd8c40037f752a69c9bb93a3067126dd53e9
diff --git a/tests/test_application.py b/tests/test_application.py index 65b36b3..1716470 100644 --- a/tests/test_application.py +++ b/tests/test_application.py @@ -240,21 +240,30 @@ class TestClientApplicationForAuthorityMigration(unittest.TestCase): uid=uid, utid=utid, access_token=self.access_token, refresh_token="some refresh token"), }) # The add(...) helper populates correct home_account_id for future searching - - def test_get_accounts(self): - app = ClientApplication( + self.app = ClientApplication( self.client_id, authority=self.authority_url_in_app, token_cache=self.cache) - accounts = app.get_accounts() + + def test_get_accounts_should_find_accounts_under_different_alias(self): + accounts = self.app.get_accounts() self.assertNotEqual([], accounts) self.assertEqual(self.environment_in_cache, accounts[0].get("environment"), "We should be able to find an account under an authority alias") - def test_acquire_token_silent(self): - app = ClientApplication( - self.client_id, - authority=self.authority_url_in_app, token_cache=self.cache) - at = app.acquire_token_silent(self.scopes, self.account) - self.assertNotEqual(None, at) - self.assertEqual(self.access_token, at.get('access_token')) + def test_acquire_token_silent_should_find_at_under_different_alias(self): + result = self.app.acquire_token_silent(self.scopes, self.account) + self.assertNotEqual(None, result) + self.assertEqual(self.access_token, result.get('access_token')) + + def test_acquire_token_silent_should_find_rt_under_different_alias(self): + self.cache._cache["AccessToken"] = {} # A hacky way to clear ATs + class ExpectedBehavior(Exception): + pass + def helper(scopes, account, authority, *args, **kwargs): + if authority.instance == self.environment_in_cache: + raise ExpectedBehavior("RT of different alias being attempted") + self.app._acquire_token_silent_from_cache_and_possibly_refresh_it = helper + + with self.assertRaises(ExpectedBehavior): + self.app.acquire_token_silent(["different scope"], self.account)
MSAL python 1.4.2 is no longer able to read from other MSAL caches **Describe the bug** MSAL.NET maintains cache consistency tests between libraries. It looks like MSAL.py 1.4.2 update brakes the tests which ensure MSAL .py is able to read the token cache produced by .NET and Java. After downgrading to MSAL py 1.4.1 (`pip install msal==1.4.1 --force-reinstall`), the tests start passing again. **To Reproduce** 1. AcquireTokenInteractive with MSAL.NET or with MSAL.Java and save the cache to a file (plaintext) 2. Configure MSAL py to read the cache from the file 3. AcquireTokenSilent with MSAL.Python (using the same scope, client_id etc.) **Expected behavior** AcquireTokenSilent should work (i.e. it should fetch the AT, there is no need for RT refresh) **What you see instead** GetAccounts returns 1 acconunt, however **AcquireTokenSilent return None.** **The MSAL Python version you are using** 1.4.2 **Additional context** Works with 1.4.1 Impact: please do not ship this version to AzCLI or any other partners with whom we do cache sharing! Note: python test code is [here](https://github.com/AzureAD/microsoft-authentication-library-for-dotnet/blob/d9f182257fceb34d7510502f9f42d91afde5abbe/tests/CacheCompat/CommonCache.Test.MsalPython/TestMsalPython.py#L33)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_application.py::TestClientApplicationForAuthorityMigration::test_acquire_token_silent_should_find_rt_under_different_alias" ]
[ "tests/test_application.py::TokenCacheTestCase::testAddByAad", "tests/test_application.py::TokenCacheTestCase::testAddByAdfs", "tests/test_application.py::TokenCacheTestCase::test_key_id_is_also_recorded", "tests/test_application.py::TestHelperExtractCerts::test_extract_a_tag_enclosed_cert", "tests/test_application.py::TestHelperExtractCerts::test_extract_a_tag_less_public_cert", "tests/test_application.py::TestHelperExtractCerts::test_extract_multiple_tag_enclosed_certs", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_acquire_token_silent_will_suppress_error", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_acquire_token_silent_with_error_will_return_error", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_atswe_will_map_some_suberror_to_classification_as_is", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_atswe_will_map_some_suberror_to_classification_to_empty_string", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_cache_empty_will_be_returned_as_None", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_family_app_remove_account", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_known_orphan_app_will_skip_frt_and_only_use_its_own_rt", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_preexisting_family_app_will_attempt_frt_and_return_error", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_unknown_family_app_will_attempt_frt_and_join_family", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_unknown_orphan_app_will_attempt_frt_and_not_remove_it", "tests/test_application.py::TestClientApplicationForAuthorityMigration::test_acquire_token_silent_should_find_at_under_different_alias", "tests/test_application.py::TestClientApplicationForAuthorityMigration::test_get_accounts_should_find_accounts_under_different_alias" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2020-07-25T00:11:01Z"
mit
AzureAD__microsoft-authentication-library-for-python-312
diff --git a/msal/application.py b/msal/application.py index a1f5003..9fcad59 100644 --- a/msal/application.py +++ b/msal/application.py @@ -822,6 +822,7 @@ class ClientApplication(object): force_refresh=False, # type: Optional[boolean] claims_challenge=None, **kwargs): + access_token_from_cache = None if not (force_refresh or claims_challenge): # Bypass AT when desired or using claims query={ "client_id": self.client_id, @@ -839,17 +840,27 @@ class ClientApplication(object): now = time.time() for entry in matches: expires_in = int(entry["expires_on"]) - now - if expires_in < 5*60: + if expires_in < 5*60: # Then consider it expired continue # Removal is not necessary, it will be overwritten logger.debug("Cache hit an AT") - return { # Mimic a real response + access_token_from_cache = { # Mimic a real response "access_token": entry["secret"], "token_type": entry.get("token_type", "Bearer"), "expires_in": int(expires_in), # OAuth2 specs defines it as int } - return self._acquire_token_silent_by_finding_rt_belongs_to_me_or_my_family( + if "refresh_on" in entry and int(entry["refresh_on"]) < now: # aging + break # With a fallback in hand, we break here to go refresh + return access_token_from_cache # It is still good as new + try: + result = self._acquire_token_silent_by_finding_rt_belongs_to_me_or_my_family( authority, decorate_scope(scopes, self.client_id), account, force_refresh=force_refresh, claims_challenge=claims_challenge, **kwargs) + if (result and "error" not in result) or (not access_token_from_cache): + return result + except: # The exact HTTP exception is transportation-layer dependent + logger.exception("Refresh token failed") # Potential AAD outage? + return access_token_from_cache + def _acquire_token_silent_by_finding_rt_belongs_to_me_or_my_family( self, authority, scopes, account, **kwargs): @@ -1013,6 +1024,9 @@ class PublicClientApplication(ClientApplication): # browser app or mobile app **kwargs): """Acquire token interactively i.e. via a local browser. + Prerequisite: In Azure Portal, configure the Redirect URI of your + "Mobile and Desktop application" as ``http://localhost``. + :param list scope: It is a list of case-sensitive strings. :param str prompt: diff --git a/msal/token_cache.py b/msal/token_cache.py index 34eff37..028635b 100644 --- a/msal/token_cache.py +++ b/msal/token_cache.py @@ -170,6 +170,9 @@ class TokenCache(object): } if data.get("key_id"): # It happens in SSH-cert or POP scenario at["key_id"] = data.get("key_id") + if "refresh_in" in response: + refresh_in = response["refresh_in"] # It is an integer + at["refresh_on"] = str(now + refresh_in) # Schema wants a string self.modify(self.CredentialType.ACCESS_TOKEN, at, at) if client_info and not event.get("skip_account_creation"):
AzureAD/microsoft-authentication-library-for-python
34e0b820c2e7324fbdf2c5d6042beb5817a03075
diff --git a/tests/test_application.py b/tests/test_application.py index 8d48a0a..3c3b464 100644 --- a/tests/test_application.py +++ b/tests/test_application.py @@ -319,3 +319,83 @@ class TestApplicationForClientCapabilities(unittest.TestCase): def test_both_claims_and_capabilities_none(self): self.assertEqual(_merge_claims_challenge_and_capabilities(None, None), None) + + +class TestApplicationForRefreshInBehaviors(unittest.TestCase): + """The following test cases were based on design doc here + https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FRefreshAtExpirationPercentage%2Foverview.md&version=GBdev&_a=preview&anchor=scenarios + """ + def setUp(self): + self.authority_url = "https://login.microsoftonline.com/common" + self.authority = msal.authority.Authority( + self.authority_url, MinimalHttpClient()) + self.scopes = ["s1", "s2"] + self.uid = "my_uid" + self.utid = "my_utid" + self.account = {"home_account_id": "{}.{}".format(self.uid, self.utid)} + self.rt = "this is a rt" + self.cache = msal.SerializableTokenCache() + self.client_id = "my_app" + self.app = ClientApplication( + self.client_id, authority=self.authority_url, token_cache=self.cache) + + def populate_cache(self, access_token="at", expires_in=86400, refresh_in=43200): + self.cache.add({ + "client_id": self.client_id, + "scope": self.scopes, + "token_endpoint": "{}/oauth2/v2.0/token".format(self.authority_url), + "response": TokenCacheTestCase.build_response( + access_token=access_token, + expires_in=expires_in, refresh_in=refresh_in, + uid=self.uid, utid=self.utid, refresh_token=self.rt), + }) + + def test_fresh_token_should_be_returned_from_cache(self): + # a.k.a. Return unexpired token that is not above token refresh expiration threshold + access_token = "An access token prepopulated into cache" + self.populate_cache(access_token=access_token, expires_in=900, refresh_in=450) + self.assertEqual( + access_token, + self.app.acquire_token_silent(['s1'], self.account).get("access_token")) + + def test_aging_token_and_available_aad_should_return_new_token(self): + # a.k.a. Attempt to refresh unexpired token when AAD available + self.populate_cache(access_token="old AT", expires_in=3599, refresh_in=-1) + new_access_token = "new AT" + self.app._acquire_token_silent_by_finding_rt_belongs_to_me_or_my_family = ( + lambda *args, **kwargs: {"access_token": new_access_token}) + self.assertEqual( + new_access_token, + self.app.acquire_token_silent(['s1'], self.account).get("access_token")) + + def test_aging_token_and_unavailable_aad_should_return_old_token(self): + # a.k.a. Attempt refresh unexpired token when AAD unavailable + old_at = "old AT" + self.populate_cache(access_token=old_at, expires_in=3599, refresh_in=-1) + self.app._acquire_token_silent_by_finding_rt_belongs_to_me_or_my_family = ( + lambda *args, **kwargs: {"error": "sth went wrong"}) + self.assertEqual( + old_at, + self.app.acquire_token_silent(['s1'], self.account).get("access_token")) + + def test_expired_token_and_unavailable_aad_should_return_error(self): + # a.k.a. Attempt refresh expired token when AAD unavailable + self.populate_cache(access_token="expired at", expires_in=-1, refresh_in=-900) + error = "something went wrong" + self.app._acquire_token_silent_by_finding_rt_belongs_to_me_or_my_family = ( + lambda *args, **kwargs: {"error": error}) + self.assertEqual( + error, + self.app.acquire_token_silent_with_error( # This variant preserves error + ['s1'], self.account).get("error")) + + def test_expired_token_and_available_aad_should_return_new_token(self): + # a.k.a. Attempt refresh expired token when AAD available + self.populate_cache(access_token="expired at", expires_in=-1, refresh_in=-900) + new_access_token = "new AT" + self.app._acquire_token_silent_by_finding_rt_belongs_to_me_or_my_family = ( + lambda *args, **kwargs: {"access_token": new_access_token}) + self.assertEqual( + new_access_token, + self.app.acquire_token_silent(['s1'], self.account).get("access_token")) + diff --git a/tests/test_token_cache.py b/tests/test_token_cache.py index c846883..92ab7c3 100644 --- a/tests/test_token_cache.py +++ b/tests/test_token_cache.py @@ -29,30 +29,20 @@ class TokenCacheTestCase(unittest.TestCase): def build_response( # simulate a response from AAD uid=None, utid=None, # If present, they will form client_info access_token=None, expires_in=3600, token_type="some type", - refresh_token=None, - foci=None, - id_token=None, # or something generated by build_id_token() - error=None, + **kwargs # Pass-through: refresh_token, foci, id_token, error, refresh_in, ... ): response = {} if uid and utid: # Mimic the AAD behavior for "client_info=1" request response["client_info"] = base64.b64encode(json.dumps({ "uid": uid, "utid": utid, }).encode()).decode('utf-8') - if error: - response["error"] = error if access_token: response.update({ "access_token": access_token, "expires_in": expires_in, "token_type": token_type, }) - if refresh_token: - response["refresh_token"] = refresh_token - if id_token: - response["id_token"] = id_token - if foci: - response["foci"] = foci + response.update(kwargs) # Pass-through key-value pairs as top-level fields return response def setUp(self): @@ -222,6 +212,21 @@ class TokenCacheTestCase(unittest.TestCase): {}).get("key_id") self.assertEqual(my_key_id, cached_key_id, "AT should be bound to the key") + def test_refresh_in_should_be_recorded_as_refresh_on(self): # Sounds weird. Yep. + self.cache.add({ + "client_id": "my_client_id", + "scope": ["s2", "s1", "s3"], # Not in particular order + "token_endpoint": "https://login.example.com/contoso/v2/token", + "response": self.build_response( + uid="uid", utid="utid", # client_info + expires_in=3600, refresh_in=1800, access_token="an access token", + ), #refresh_token="a refresh token"), + }, now=1000) + refresh_on = self.cache._cache["AccessToken"].get( + 'uid.utid-login.example.com-accesstoken-my_client_id-contoso-s2 s1 s3', + {}).get("refresh_on") + self.assertEqual("2800", refresh_on, "Should save refresh_on") + def test_old_rt_data_with_wrong_key_should_still_be_salvaged_into_new_rt(self): sample = { 'client_id': 'my_client_id', @@ -241,6 +246,7 @@ class TokenCacheTestCase(unittest.TestCase): 'uid.utid-login.example.com-refreshtoken-my_client_id--s2 s1 s3') ) + class SerializableTokenCacheTestCase(TokenCacheTestCase): # Run all inherited test methods, and have extra check in tearDown()
[Feature Request] refresh_in Refresh_In (token response) support. This feature allows the service to control when MSAL should attempt to refresh the access token (ahead of its expiration) [API Review](https://identitydivision.visualstudio.com/DevEx/_git/AuthLibrariesApiReview?path=%2FRefreshAtExpirationPercentage%2Foverview.md&version=GBdev&_a=contents)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_application.py::TokenCacheTestCase::test_refresh_in_should_be_recorded_as_refresh_on", "tests/test_application.py::TestApplicationForRefreshInBehaviors::test_aging_token_and_available_aad_should_return_new_token", "tests/test_token_cache.py::TokenCacheTestCase::test_refresh_in_should_be_recorded_as_refresh_on", "tests/test_token_cache.py::SerializableTokenCacheTestCase::test_refresh_in_should_be_recorded_as_refresh_on" ]
[ "tests/test_application.py::TokenCacheTestCase::testAddByAad", "tests/test_application.py::TokenCacheTestCase::testAddByAdfs", "tests/test_application.py::TokenCacheTestCase::test_key_id_is_also_recorded", "tests/test_application.py::TokenCacheTestCase::test_old_rt_data_with_wrong_key_should_still_be_salvaged_into_new_rt", "tests/test_application.py::TestHelperExtractCerts::test_extract_a_tag_enclosed_cert", "tests/test_application.py::TestHelperExtractCerts::test_extract_a_tag_less_public_cert", "tests/test_application.py::TestHelperExtractCerts::test_extract_multiple_tag_enclosed_certs", "tests/test_application.py::TestBytesConversion::test_bytes_to_bytes", "tests/test_application.py::TestBytesConversion::test_string_to_bytes", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_acquire_token_silent_will_suppress_error", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_acquire_token_silent_with_error_will_return_error", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_atswe_will_map_some_suberror_to_classification_as_is", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_atswe_will_map_some_suberror_to_classification_to_empty_string", "tests/test_application.py::TestClientApplicationAcquireTokenSilentErrorBehaviors::test_cache_empty_will_be_returned_as_None", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_family_app_remove_account", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_known_orphan_app_will_skip_frt_and_only_use_its_own_rt", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_preexisting_family_app_will_attempt_frt_and_return_error", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_unknown_family_app_will_attempt_frt_and_join_family", "tests/test_application.py::TestClientApplicationAcquireTokenSilentFociBehaviors::test_unknown_orphan_app_will_attempt_frt_and_not_remove_it", "tests/test_application.py::TestClientApplicationForAuthorityMigration::test_acquire_token_silent_should_find_at_under_different_alias", "tests/test_application.py::TestClientApplicationForAuthorityMigration::test_acquire_token_silent_should_find_rt_under_different_alias", "tests/test_application.py::TestClientApplicationForAuthorityMigration::test_get_accounts_should_find_accounts_under_different_alias", "tests/test_application.py::TestApplicationForClientCapabilities::test_both_claims_and_capabilities_none", "tests/test_application.py::TestApplicationForClientCapabilities::test_capabilities_and_id_token_claims_and_access_token_claims_merge", "tests/test_application.py::TestApplicationForClientCapabilities::test_capabilities_and_id_token_claims_merge", "tests/test_application.py::TestApplicationForClientCapabilities::test_no_capabilities_only_claims_merge", "tests/test_application.py::TestApplicationForClientCapabilities::test_only_client_capabilities_no_claims_merge", "tests/test_application.py::TestApplicationForRefreshInBehaviors::test_aging_token_and_unavailable_aad_should_return_old_token", "tests/test_application.py::TestApplicationForRefreshInBehaviors::test_expired_token_and_available_aad_should_return_new_token", "tests/test_application.py::TestApplicationForRefreshInBehaviors::test_expired_token_and_unavailable_aad_should_return_error", "tests/test_application.py::TestApplicationForRefreshInBehaviors::test_fresh_token_should_be_returned_from_cache", "tests/test_token_cache.py::TokenCacheTestCase::testAddByAad", "tests/test_token_cache.py::TokenCacheTestCase::testAddByAdfs", "tests/test_token_cache.py::TokenCacheTestCase::test_key_id_is_also_recorded", "tests/test_token_cache.py::TokenCacheTestCase::test_old_rt_data_with_wrong_key_should_still_be_salvaged_into_new_rt", "tests/test_token_cache.py::SerializableTokenCacheTestCase::testAddByAad", "tests/test_token_cache.py::SerializableTokenCacheTestCase::testAddByAdfs", "tests/test_token_cache.py::SerializableTokenCacheTestCase::test_has_state_changed", "tests/test_token_cache.py::SerializableTokenCacheTestCase::test_key_id_is_also_recorded", "tests/test_token_cache.py::SerializableTokenCacheTestCase::test_old_rt_data_with_wrong_key_should_still_be_salvaged_into_new_rt" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-02-18T01:44:30Z"
mit
AzureAD__microsoft-authentication-library-for-python-433
diff --git a/msal/application.py b/msal/application.py index 05b77fc..04ad5fd 100644 --- a/msal/application.py +++ b/msal/application.py @@ -231,8 +231,23 @@ class ClientApplication(object): :param str authority: A URL that identifies a token authority. It should be of the format - https://login.microsoftonline.com/your_tenant - By default, we will use https://login.microsoftonline.com/common + ``https://login.microsoftonline.com/your_tenant`` + By default, we will use ``https://login.microsoftonline.com/common`` + + *Changed in version 1.17*: you can also use predefined constant + and a builder like this:: + + from msal.authority import ( + AuthorityBuilder, + AZURE_US_GOVERNMENT, AZURE_CHINA, AZURE_PUBLIC) + my_authority = AuthorityBuilder(AZURE_PUBLIC, "contoso.onmicrosoft.com") + # Now you get an equivalent of + # "https://login.microsoftonline.com/contoso.onmicrosoft.com" + + # You can feed such an authority to msal's ClientApplication + from msal import PublicClientApplication + app = PublicClientApplication("my_client_id", authority=my_authority, ...) + :param bool validate_authority: (optional) Turns authority validation on or off. This parameter default to true. :param TokenCache cache: diff --git a/msal/authority.py b/msal/authority.py index 0656011..14a6ad1 100644 --- a/msal/authority.py +++ b/msal/authority.py @@ -14,12 +14,19 @@ from .exceptions import MsalServiceError logger = logging.getLogger(__name__) + +# Endpoints were copied from here +# https://docs.microsoft.com/en-us/azure/active-directory/develop/authentication-national-cloud#azure-ad-authentication-endpoints +AZURE_US_GOVERNMENT = "login.microsoftonline.us" +AZURE_CHINA = "login.chinacloudapi.cn" +AZURE_PUBLIC = "login.microsoftonline.com" + WORLD_WIDE = 'login.microsoftonline.com' # There was an alias login.windows.net WELL_KNOWN_AUTHORITY_HOSTS = set([ WORLD_WIDE, - 'login.chinacloudapi.cn', + AZURE_CHINA, 'login-us.microsoftonline.com', - 'login.microsoftonline.us', + AZURE_US_GOVERNMENT, 'login.microsoftonline.de', ]) WELL_KNOWN_B2C_HOSTS = [ @@ -30,6 +37,19 @@ WELL_KNOWN_B2C_HOSTS = [ ] +class AuthorityBuilder(object): + def __init__(self, instance, tenant): + """A helper to save caller from doing string concatenation. + + Usage is documented in :func:`application.ClientApplication.__init__`. + """ + self._instance = instance.rstrip("/") + self._tenant = tenant.strip("/") + + def __str__(self): + return "https://{}/{}".format(self._instance, self._tenant) + + class Authority(object): """This class represents an (already-validated) authority. @@ -53,6 +73,8 @@ class Authority(object): performed. """ self._http_client = http_client + if isinstance(authority_url, AuthorityBuilder): + authority_url = str(authority_url) authority, self.instance, tenant = canonicalize(authority_url) parts = authority.path.split('/') is_b2c = any(self.instance.endswith("." + d) for d in WELL_KNOWN_B2C_HOSTS) or (
AzureAD/microsoft-authentication-library-for-python
23e5341acf2118686fd95c11e81d7ffbbde9f367
diff --git a/tests/http_client.py b/tests/http_client.py index a5587b7..5adbbde 100644 --- a/tests/http_client.py +++ b/tests/http_client.py @@ -20,6 +20,9 @@ class MinimalHttpClient: return MinimalResponse(requests_resp=self.session.get( url, params=params, headers=headers, timeout=self.timeout)) + def close(self): # Not required, but we use it to avoid a warning in unit test + self.session.close() + class MinimalResponse(object): # Not for production use def __init__(self, requests_resp=None, status_code=None, text=None): diff --git a/tests/test_authority.py b/tests/test_authority.py index cd6db78..9fdc83c 100644 --- a/tests/test_authority.py +++ b/tests/test_authority.py @@ -8,16 +8,37 @@ from tests.http_client import MinimalHttpClient @unittest.skipIf(os.getenv("TRAVIS_TAG"), "Skip network io during tagged release") class TestAuthority(unittest.TestCase): + def _test_given_host_and_tenant(self, host, tenant): + c = MinimalHttpClient() + a = Authority('https://{}/{}'.format(host, tenant), c) + self.assertEqual( + a.authorization_endpoint, + 'https://{}/{}/oauth2/v2.0/authorize'.format(host, tenant)) + self.assertEqual( + a.token_endpoint, + 'https://{}/{}/oauth2/v2.0/token'.format(host, tenant)) + c.close() + + def _test_authority_builder(self, host, tenant): + c = MinimalHttpClient() + a = Authority(AuthorityBuilder(host, tenant), c) + self.assertEqual( + a.authorization_endpoint, + 'https://{}/{}/oauth2/v2.0/authorize'.format(host, tenant)) + self.assertEqual( + a.token_endpoint, + 'https://{}/{}/oauth2/v2.0/token'.format(host, tenant)) + c.close() + def test_wellknown_host_and_tenant(self): # Assert all well known authority hosts are using their own "common" tenant for host in WELL_KNOWN_AUTHORITY_HOSTS: - a = Authority( - 'https://{}/common'.format(host), MinimalHttpClient()) - self.assertEqual( - a.authorization_endpoint, - 'https://%s/common/oauth2/v2.0/authorize' % host) - self.assertEqual( - a.token_endpoint, 'https://%s/common/oauth2/v2.0/token' % host) + self._test_given_host_and_tenant(host, "common") + + def test_wellknown_host_and_tenant_using_new_authority_builder(self): + self._test_authority_builder(AZURE_PUBLIC, "consumers") + self._test_authority_builder(AZURE_CHINA, "organizations") + self._test_authority_builder(AZURE_US_GOVERNMENT, "common") @unittest.skip("As of Jan 2017, the server no longer returns V1 endpoint") def test_lessknown_host_will_return_a_set_of_v1_endpoints(self):
Add public convenience string constants for endpoints of each cloud (sovereign and public) **Context** Customers have had difficulty finding the right documentation pages: https://docs.microsoft.com/en-us/azure/azure-government/documentation-government-developer-guide#endpoint-mapping https://docs.microsoft.com/en-us/azure/china/resources-developer-guide#check-endpoints-in-azure **See also other libraries exposing these:** - [obj-c](https://azuread.github.io/microsoft-authentication-library-for-objc/Enums/MSALAzureCloudInstance.html) - [.net](https://docs.microsoft.com/en-us/dotnet/api/microsoft.identity.client.azurecloudinstance?view=azure-dotnet) **Reference:** https://identitydivision.visualstudio.com/Engineering/_workitems/edit/1063014 **Other:** https://github.com/AzureAD/microsoft-authentication-library-for-java/issues/258
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_authority.py::TestAuthority::test_wellknown_host_and_tenant_using_new_authority_builder" ]
[ "tests/test_authority.py::TestAuthority::test_invalid_host_skipping_validation_can_be_turned_off", "tests/test_authority.py::TestAuthority::test_unknown_host_wont_pass_instance_discovery", "tests/test_authority.py::TestAuthorityInternalHelperCanonicalize::test_canonicalize_rejects_non_https", "tests/test_authority.py::TestAuthorityInternalHelperCanonicalize::test_canonicalize_rejects_tenantless", "tests/test_authority.py::TestAuthorityInternalHelperCanonicalize::test_canonicalize_rejects_tenantless_host_with_trailing_slash", "tests/test_authority.py::TestAuthorityInternalHelperCanonicalize::test_canonicalize_tenant_followed_by_extra_fragment", "tests/test_authority.py::TestAuthorityInternalHelperCanonicalize::test_canonicalize_tenant_followed_by_extra_paths", "tests/test_authority.py::TestAuthorityInternalHelperCanonicalize::test_canonicalize_tenant_followed_by_extra_query", "tests/test_authority.py::TestAuthorityInternalHelperUserRealmDiscovery::test_memorize" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-11-02T19:48:19Z"
mit
Azure__WALinuxAgent-1105
diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py index 60d42fd2..de07ea93 100644 --- a/azurelinuxagent/pa/provision/cloudinit.py +++ b/azurelinuxagent/pa/provision/cloudinit.py @@ -64,7 +64,7 @@ class CloudInitProvisionHandler(ProvisionHandler): logger.info("Finished provisioning") self.report_ready(thumbprint) - self.report_event("Provisioning with cloud-init succeeded", + self.report_event("Provisioning with cloud-init succeeded ({0})".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py index 5d07fdf4..5df572cb 100644 --- a/azurelinuxagent/pa/provision/default.py +++ b/azurelinuxagent/pa/provision/default.py @@ -88,7 +88,7 @@ class ProvisionHandler(object): self.write_provisioned() - self.report_event("Provisioning succeeded", + self.report_event("Provisioning succeeded ({0})".format(self._get_uptime_seconds()), is_success=True, duration=elapsed_milliseconds(utc_start)) @@ -125,6 +125,15 @@ class ProvisionHandler(object): continue return is_running == is_expected + @staticmethod + def _get_uptime_seconds(): + try: + with open('/proc/uptime') as fh: + uptime, _ = fh.readline().split() + return uptime + except: + return 0 + def reg_ssh_host_key(self): keypair_type = conf.get_ssh_host_keypair_type() if conf.get_regenerate_ssh_host_key():
Azure/WALinuxAgent
fb7d6c51dac236538a8c9eb8e752159d5e3f54b8
diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py index 1004547b..52098f2f 100644 --- a/tests/pa/test_provision.py +++ b/tests/pa/test_provision.py @@ -146,8 +146,12 @@ class TestProvision(AgentTestCase): ph.run() - call1 = call("Provisioning succeeded", duration=ANY, is_success=True) - ph.report_event.assert_has_calls([call1]) + self.assertEqual(1, ph.report_event.call_count) + positional_args, kw_args = ph.report_event.call_args + # [call('Provisioning succeeded (146473.68)', duration=65, is_success=True)] + self.assertTrue(re.match(r'Provisioning succeeded \(\d+\.\d+\)', positional_args[0]) is not None) + self.assertTrue(isinstance(kw_args['duration'], int)) + self.assertTrue(kw_args['is_success']) @distros() @patch(
Track Boot Time in Provision Event To better understand and break down the provision process please include the boot time in the provision event, or emit a boot event with an appropriate duration.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_success" ]
[ "tests/pa/test_provision.py::TestProvision::test_customdata", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned", "tests/pa/test_provision.py::TestProvision::test_provision", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail", "tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2018-04-06T15:03:47Z"
apache-2.0
Azure__WALinuxAgent-1317
diff --git a/azurelinuxagent/ga/monitor.py b/azurelinuxagent/ga/monitor.py index c1215806..d6b66921 100644 --- a/azurelinuxagent/ga/monitor.py +++ b/azurelinuxagent/ga/monitor.py @@ -406,7 +406,11 @@ class MonitorHandler(object): CGroupsTelemetry.track_cgroup(CGroups.for_extension("")) CGroupsTelemetry.track_agent() except Exception as e: - logger.error("monitor: Exception tracking wrapper and agent: {0} [{1}]", e, traceback.format_exc()) + # when a hierarchy is not mounted, we raise an exception + # and we should therefore only issue a warning, since this + # is not unexpected + logger.warn("Monitor: cgroups not initialized: {0}", ustr(e)) + logger.verbose(traceback.format_exc()) def send_cgroup_telemetry(self): if self.last_cgroup_telemetry is None: @@ -419,13 +423,15 @@ class MonitorHandler(object): if value > 0: report_metric(metric_group, metric_name, cgroup_name, value) except Exception as e: - logger.warn("Failed to collect performance metrics: {0} [{1}]", e, traceback.format_exc()) + logger.warn("Monitor: failed to collect cgroups performance metrics: {0}", ustr(e)) + logger.verbose(traceback.format_exc()) # Look for extension cgroups we're not already tracking and track them try: CGroupsTelemetry.update_tracked(self.protocol.client.get_current_handlers()) except Exception as e: - logger.warn("Monitor: updating tracked extensions raised {0}: {1}", e, traceback.format_exc()) + logger.warn("Monitor: failed to update cgroups tracked extensions: {0}", ustr(e)) + logger.verbose(traceback.format_exc()) self.last_cgroup_telemetry = datetime.datetime.utcnow() diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py index 9609d7da..3f3cdb04 100644 --- a/azurelinuxagent/pa/provision/cloudinit.py +++ b/azurelinuxagent/pa/provision/cloudinit.py @@ -69,9 +69,10 @@ class CloudInitProvisionHandler(ProvisionHandler): duration=elapsed_milliseconds(utc_start)) except ProvisionError as e: - logger.error("Provisioning failed: {0}", ustr(e)) + msg = "Provisioning with cloud-init failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) + self.report_event(msg) return def wait_for_ovfenv(self, max_retry=1800, sleep_time=1): diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py index a6e50824..0eb0823c 100644 --- a/azurelinuxagent/pa/provision/default.py +++ b/azurelinuxagent/pa/provision/default.py @@ -98,9 +98,10 @@ class ProvisionHandler(object): logger.info("Provisioning complete") except (ProtocolError, ProvisionError) as e: + msg = "Provisioning failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e), is_success=False) - logger.error("Provisioning failed: {0}", ustr(e)) + self.report_event(msg, is_success=False) return @staticmethod
Azure/WALinuxAgent
ae2aec6fc31a4742c139d93cfc5e571e7afc741b
diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py index 0335bc9c..27f75266 100644 --- a/tests/pa/test_provision.py +++ b/tests/pa/test_provision.py @@ -268,8 +268,8 @@ class TestProvision(AgentTestCase): fileutil.write_file(ovfenv_file, ovfenv_data) ph.run() - ph.report_event.assert_called_once_with( - '[ProvisionError] --unit-test--', is_success=False) + positional_args, kw_args = ph.report_event.call_args_list[0] + self.assertTrue(re.match(r'Provisioning failed: \[ProvisionError\] --unit-test-- \(\d+\.\d+s\)', positional_args[0]) is not None) @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') @distros()
CGroups error in Ubuntu 14.04 ``` 2018/07/31 11:41:06.400633 ERROR ExtHandler monitor: Exception tracking wrapper and agent: 'Hierarchy memory is not mounted' [Traceback (most recent call last): File "bin/WALinuxAgent-2.2.30-py2.7.egg/azurelinuxagent/ga/monitor.py", line 397, in init_cgroups CGroupsTelemetry.track_cgroup(CGroups.for_extension("")) File "bin/WALinuxAgent-2.2.30-py2.7.egg/azurelinuxagent/common/cgroups.py", line 360, in for_extension return CGroups(name, CGroups._construct_custom_path_for_hierarchy) File "bin/WALinuxAgent-2.2.30-py2.7.egg/azurelinuxagent/common/cgroups.py", line 401, in __init__ raise CGroupsException("Hierarchy {0} is not mounted".format(hierarchy)) azurelinuxagent.common.cgroups.CGroupsException: 'Hierarchy memory is not mounted' ] ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail" ]
[ "tests/pa/test_provision.py::TestProvision::test_customdata", "tests/pa/test_provision.py::TestProvision::test_handle_provision_guest_agent", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned", "tests/pa/test_provision.py::TestProvision::test_provision", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_bad", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_empty", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_false", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_true", "tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-08-23T18:52:01Z"
apache-2.0
Azure__WALinuxAgent-1318
diff --git a/azurelinuxagent/pa/provision/cloudinit.py b/azurelinuxagent/pa/provision/cloudinit.py index 9609d7da..3f3cdb04 100644 --- a/azurelinuxagent/pa/provision/cloudinit.py +++ b/azurelinuxagent/pa/provision/cloudinit.py @@ -69,9 +69,10 @@ class CloudInitProvisionHandler(ProvisionHandler): duration=elapsed_milliseconds(utc_start)) except ProvisionError as e: - logger.error("Provisioning failed: {0}", ustr(e)) + msg = "Provisioning with cloud-init failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e)) + self.report_event(msg) return def wait_for_ovfenv(self, max_retry=1800, sleep_time=1): diff --git a/azurelinuxagent/pa/provision/default.py b/azurelinuxagent/pa/provision/default.py index a6e50824..0eb0823c 100644 --- a/azurelinuxagent/pa/provision/default.py +++ b/azurelinuxagent/pa/provision/default.py @@ -98,9 +98,10 @@ class ProvisionHandler(object): logger.info("Provisioning complete") except (ProtocolError, ProvisionError) as e: + msg = "Provisioning failed: {0} ({1}s)".format(ustr(e), self._get_uptime_seconds()) + logger.error(msg) self.report_not_ready("ProvisioningFailed", ustr(e)) - self.report_event(ustr(e), is_success=False) - logger.error("Provisioning failed: {0}", ustr(e)) + self.report_event(msg, is_success=False) return @staticmethod
Azure/WALinuxAgent
ae2aec6fc31a4742c139d93cfc5e571e7afc741b
diff --git a/tests/pa/test_provision.py b/tests/pa/test_provision.py index 0335bc9c..27f75266 100644 --- a/tests/pa/test_provision.py +++ b/tests/pa/test_provision.py @@ -268,8 +268,8 @@ class TestProvision(AgentTestCase): fileutil.write_file(ovfenv_file, ovfenv_data) ph.run() - ph.report_event.assert_called_once_with( - '[ProvisionError] --unit-test--', is_success=False) + positional_args, kw_args = ph.report_event.call_args_list[0] + self.assertTrue(re.match(r'Provisioning failed: \[ProvisionError\] --unit-test-- \(\d+\.\d+s\)', positional_args[0]) is not None) @patch('azurelinuxagent.pa.provision.default.ProvisionHandler.write_agent_disabled') @distros()
Record OS boot time for Failed Provisions Too The OS boot time is recorded in the Provision event **only** in the case of a successful provision. The OS boot time should be recorded in the case of a failed provision too.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_fail" ]
[ "tests/pa/test_provision.py::TestProvision::test_customdata", "tests/pa/test_provision.py::TestProvision::test_handle_provision_guest_agent", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_is_provisioned", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_deprovisioned", "tests/pa/test_provision.py::TestProvision::test_is_provisioned_not_provisioned", "tests/pa/test_provision.py::TestProvision::test_provision", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_bad", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_empty", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_false", "tests/pa/test_provision.py::TestProvision::test_provision_telemetry_pga_true", "tests/pa/test_provision.py::TestProvision::test_provisioning_is_skipped_when_not_enabled" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2018-08-23T19:52:31Z"
apache-2.0
Azure__azure-functions-python-worker-890
diff --git a/CODEOWNERS b/CODEOWNERS index 8d5a9d3..9dd0e16 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -7,6 +7,7 @@ # # AZURE FUNCTIONS TEAM -# For all file changes, github would automatically include the following people in the PRs. +# For all file changes, github would automatically +# include the following people in the PRs. # -* @anirudhgarg @Hazhzeng @vrdmr @AnatoliB \ No newline at end of file +* @vrdmr @AnatoliB \ No newline at end of file diff --git a/azure_functions_worker/constants.py b/azure_functions_worker/constants.py index 96c96cb..bad0504 100644 --- a/azure_functions_worker/constants.py +++ b/azure_functions_worker/constants.py @@ -39,6 +39,8 @@ UNIX_SHARED_MEMORY_DIRECTORIES = "FUNCTIONS_UNIX_SHARED_MEMORY_DIRECTORIES" PYTHON_THREADPOOL_THREAD_COUNT_DEFAULT = 1 PYTHON_THREADPOOL_THREAD_COUNT_MIN = 1 PYTHON_THREADPOOL_THREAD_COUNT_MAX = sys.maxsize +PYTHON_THREADPOOL_THREAD_COUNT_MAX_37 = 32 + PYTHON_ISOLATE_WORKER_DEPENDENCIES_DEFAULT = False PYTHON_ISOLATE_WORKER_DEPENDENCIES_DEFAULT_39 = False PYTHON_ENABLE_WORKER_EXTENSIONS_DEFAULT = False diff --git a/azure_functions_worker/dispatcher.py b/azure_functions_worker/dispatcher.py index e8a7b24..694e5fc 100644 --- a/azure_functions_worker/dispatcher.py +++ b/azure_functions_worker/dispatcher.py @@ -26,7 +26,7 @@ from . import loader from . import protos from .constants import (PYTHON_THREADPOOL_THREAD_COUNT, PYTHON_THREADPOOL_THREAD_COUNT_DEFAULT, - PYTHON_THREADPOOL_THREAD_COUNT_MAX, + PYTHON_THREADPOOL_THREAD_COUNT_MAX_37, PYTHON_THREADPOOL_THREAD_COUNT_MIN) from .logging import disable_console_logging, enable_console_logging from .logging import (logger, error_logger, is_system_log_category, @@ -567,25 +567,28 @@ class Dispatcher(metaclass=DispatcherMeta): 'integer') return False - if int_value < PYTHON_THREADPOOL_THREAD_COUNT_MIN or ( - int_value > PYTHON_THREADPOOL_THREAD_COUNT_MAX): + if int_value < PYTHON_THREADPOOL_THREAD_COUNT_MIN: logger.warning(f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set ' f'to a value between ' f'{PYTHON_THREADPOOL_THREAD_COUNT_MIN} and ' - f'{PYTHON_THREADPOOL_THREAD_COUNT_MAX}. ' - 'Reverting to default value for max_workers') + 'sys.maxint. Reverting to default value for ' + 'max_workers') return False - return True # Starting Python 3.9, worker won't be putting a limit on the # max_workers count in the created threadpool. default_value = None if sys.version_info.minor == 9 \ else f'{PYTHON_THREADPOOL_THREAD_COUNT_DEFAULT}' + max_workers = get_app_setting(setting=PYTHON_THREADPOOL_THREAD_COUNT, default_value=default_value, validator=tp_max_workers_validator) + if sys.version_info.minor <= 7: + max_workers = min(int(max_workers), + PYTHON_THREADPOOL_THREAD_COUNT_MAX_37) + # We can box the app setting as int for earlier python versions. return int(max_workers) if max_workers else None
Azure/azure-functions-python-worker
f5a68bd2039b892ce4c899aa181de593eaf273b6
diff --git a/tests/unittests/test_dispatcher.py b/tests/unittests/test_dispatcher.py index cb90ab8..2d5960a 100644 --- a/tests/unittests/test_dispatcher.py +++ b/tests/unittests/test_dispatcher.py @@ -11,8 +11,7 @@ from azure_functions_worker import protos from azure_functions_worker import testutils from azure_functions_worker.constants import PYTHON_THREADPOOL_THREAD_COUNT, \ PYTHON_THREADPOOL_THREAD_COUNT_DEFAULT, \ - PYTHON_THREADPOOL_THREAD_COUNT_MAX, \ - PYTHON_THREADPOOL_THREAD_COUNT_MIN + PYTHON_THREADPOOL_THREAD_COUNT_MAX_37, PYTHON_THREADPOOL_THREAD_COUNT_MIN SysVersionInfo = col.namedtuple("VersionInfo", ["major", "minor", "micro", "releaselevel", "serial"]) @@ -37,7 +36,8 @@ class TestThreadPoolSettingsPython37(testutils.AsyncTestCase): script_root=DISPATCHER_FUNCTIONS_DIR) self._default_workers: Optional[ int] = PYTHON_THREADPOOL_THREAD_COUNT_DEFAULT - self._allowed_max_workers: int = 100000 + self._over_max_workers: int = 10000 + self._allowed_max_workers: int = PYTHON_THREADPOOL_THREAD_COUNT_MAX_37 self._pre_env = dict(os.environ) self.mock_version_info = patch( 'azure_functions_worker.dispatcher.sys.version_info', @@ -128,33 +128,26 @@ class TestThreadPoolSettingsPython37(testutils.AsyncTestCase): await self._assert_workers_threadpool(self._ctrl, host, self._default_workers) mock_logger.warning.assert_any_call( - f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a value ' - f'between {PYTHON_THREADPOOL_THREAD_COUNT_MIN} and ' - f'{PYTHON_THREADPOOL_THREAD_COUNT_MAX}. Reverting to default ' - f'value for max_workers') + f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set ' + f'to a value between ' + f'{PYTHON_THREADPOOL_THREAD_COUNT_MIN} and ' + 'sys.maxint. Reverting to default value for ' + 'max_workers') - @unittest.skip("We no more check any max limit. This is up to the customer," - " how ever high int they want to set") async def test_dispatcher_sync_threadpool_exceed_max_setting(self): - """Test if the sync threadpool will pick up default value when the + """Test if the sync threadpool will pick up default max value when the setting is above maximum """ - with patch('azure_functions_worker.dispatcher.logger') as mock_logger: + with patch('azure_functions_worker.dispatcher.logger'): # Configure thread pool max worker to an invalid value os.environ.update({PYTHON_THREADPOOL_THREAD_COUNT: f'{self._over_max_workers}'}) async with self._ctrl as host: await self._check_if_function_is_ok(host) - # Ensure the dispatcher sync threadpool should fallback to 1 + # Ensure the dispatcher sync threadpool should fallback to max await self._assert_workers_threadpool(self._ctrl, host, - self._default_workers) - - mock_logger.warning.assert_any_call( - f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a value ' - f'between {PYTHON_THREADPOOL_THREAD_COUNT_MIN} and ' - f'{PYTHON_THREADPOOL_THREAD_COUNT_MAX}. Reverting to default ' - f'value for max_workers') + self._allowed_max_workers) async def test_dispatcher_sync_threadpool_in_placeholder(self): """Test if the sync threadpool will pick up app setting in placeholder @@ -189,13 +182,13 @@ class TestThreadPoolSettingsPython37(testutils.AsyncTestCase): mock_logger.warning.assert_any_call( f'{PYTHON_THREADPOOL_THREAD_COUNT} must be an integer') - @unittest.skip("We no more check any max limit. This is up to the customer," - " how ever high int they want to set") async def test_dispatcher_sync_threadpool_in_placeholder_above_max(self): - """Test if the sync threadpool will use the default setting when the - app setting is above maximum + """Test if the sync threadpool will use the default max setting when + the app setting is above maximum. + + Note: This is designed for Linux Consumption. """ - with patch('azure_functions_worker.dispatcher.logger') as mock_logger: + with patch('azure_functions_worker.dispatcher.logger'): async with self._ctrl as host: await self._check_if_function_is_ok(host) @@ -204,13 +197,7 @@ class TestThreadPoolSettingsPython37(testutils.AsyncTestCase): PYTHON_THREADPOOL_THREAD_COUNT: f'{self._over_max_workers}' }) await self._assert_workers_threadpool(self._ctrl, host, - self._default_workers) - - mock_logger.warning.assert_any_call( - f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a ' - f'value ' - 'between 1 and 1024. ' - 'Reverting to default value for max_workers') + self._allowed_max_workers) async def test_dispatcher_sync_threadpool_in_placeholder_below_min(self): """Test if the sync threadpool will use the default setting when the @@ -229,10 +216,11 @@ class TestThreadPoolSettingsPython37(testutils.AsyncTestCase): self._default_workers) mock_logger.warning.assert_any_call( - f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set to a value ' - f'between {PYTHON_THREADPOOL_THREAD_COUNT_MIN} and ' - f'{PYTHON_THREADPOOL_THREAD_COUNT_MAX}. Reverting to ' - f'default value for max_workers') + f'{PYTHON_THREADPOOL_THREAD_COUNT} must be set ' + f'to a value between ' + f'{PYTHON_THREADPOOL_THREAD_COUNT_MIN} and ' + 'sys.maxint. Reverting to default value for ' + 'max_workers') async def test_sync_invocation_request_log(self): with patch('azure_functions_worker.dispatcher.logger') as mock_logger: @@ -418,6 +406,8 @@ class TestThreadPoolSettingsPython38(TestThreadPoolSettingsPython37): self.mock_version_info = patch( 'azure_functions_worker.dispatcher.sys.version_info', SysVersionInfo(3, 8, 0, 'final', 0)) + self._over_max_workers: int = 10000 + self._allowed_max_workers: int = self._over_max_workers self.mock_version_info.start() def tearDown(self): @@ -425,25 +415,43 @@ class TestThreadPoolSettingsPython38(TestThreadPoolSettingsPython37): os.environ.update(self._pre_env) self.mock_version_info.stop() + async def test_dispatcher_sync_threadpool_in_placeholder_above_max(self): + """Test if the sync threadpool will use any value and there isn't any + artificial max value set. + """ + with patch('azure_functions_worker.dispatcher.logger'): + async with self._ctrl as host: + await self._check_if_function_is_ok(host) + + # Reload environment variable on specialization + await host.reload_environment(environment={ + PYTHON_THREADPOOL_THREAD_COUNT: f'{self._over_max_workers}' + }) + await self._assert_workers_threadpool(self._ctrl, host, + self._allowed_max_workers) + self.assertNotEqual( + self._ctrl._worker.get_sync_tp_workers_set(), + self._default_workers) + @unittest.skipIf(sys.version_info.minor != 9, "Run the tests only for Python 3.9. In other platforms, " "as the default passed is None, the cpu_count determines the " "number of max_workers and we cannot mock the os.cpu_count() " "in the concurrent.futures.ThreadPoolExecutor") -class TestThreadPoolSettingsPython39(TestThreadPoolSettingsPython37): +class TestThreadPoolSettingsPython39(TestThreadPoolSettingsPython38): def setUp(self): super(TestThreadPoolSettingsPython39, self).setUp() self.mock_os_cpu = patch( 'os.cpu_count', return_value=2) - self.mock_os_cpu.start() # 6 - based on 2 cores - min(32, (os.cpu_count() or 1) + 4) - 2 + 4 self._default_workers: Optional[int] = 6 - self.mock_version_info = patch( 'azure_functions_worker.dispatcher.sys.version_info', SysVersionInfo(3, 9, 0, 'final', 0)) + + self.mock_os_cpu.start() self.mock_version_info.start() def tearDown(self):
Allow specifying PYTHON_THREADPOOL_THREAD_COUNT larger than 32 Currently, the worker limits the maximum value of `PYTHON_THREADPOOL_THREAD_COUNT` to 32. Any value larger than that is ignored. There is no good reason for this in general case: if someone/something decided a larger number works better for a specific use case, this should be respected.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_set_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_in_placeholder_below_min", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_async_invocation_request_log_in_placeholder_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_sync_invocation_request_log_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_async_invocation_request_log", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_in_placeholder_invalid", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_default_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_invalid_worker_count", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_sync_invocation_request_log", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_in_placeholder", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_send_worker_request", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_below_min_setting", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_sync_invocation_request_log_in_placeholder_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_initialize_worker_logging", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_in_placeholder_above_max", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_sync_threadpool_exceed_max_setting", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_async_invocation_request_log_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython39::test_dispatcher_initialize_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_initialize_worker_logging", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_async_invocation_request_log_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_send_worker_request", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_set_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_exceed_max_setting", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_async_invocation_request_log_in_placeholder_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_default_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_async_invocation_request_log", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_in_placeholder_above_max", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_below_min_setting", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_invalid_worker_count", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_in_placeholder_invalid", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_sync_invocation_request_log_in_placeholder_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_in_placeholder", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_sync_invocation_request_log_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_initialize_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_dispatcher_sync_threadpool_in_placeholder_below_min", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython37::test_sync_invocation_request_log", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_set_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_sync_invocation_request_log_in_placeholder_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_in_placeholder_invalid", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_invalid_worker_count", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_in_placeholder", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_async_invocation_request_log_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_exceed_max_setting", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_sync_invocation_request_log", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_in_placeholder_below_min", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_below_min_setting", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_async_invocation_request_log", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_initialize_worker_logging", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_in_placeholder_above_max", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_sync_invocation_request_log_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_initialize_worker", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_send_worker_request", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_async_invocation_request_log_in_placeholder_threads", "tests/unittests/test_dispatcher.py::TestThreadPoolSettingsPython38::test_dispatcher_sync_threadpool_default_worker" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-09-14T05:57:33Z"
mit
Azure__iotedgedev-173
diff --git a/iotedgedev/azurecli.py b/iotedgedev/azurecli.py index c5bce70..6bce331 100644 --- a/iotedgedev/azurecli.py +++ b/iotedgedev/azurecli.py @@ -226,10 +226,10 @@ class AzureCli: return result - def apply_configuration(self, deviceId, connection_string, config): - self.output.status(f("Deploying '{config}' to '{deviceId}'...")) + def apply_configuration(self, device_id, connection_string, hub_name, config): + self.output.status(f("Deploying '{config}' to '{device_id}'...")) - return self.invoke_az_cli_outproc(["iot", "hub", "apply-configuration", "-d", deviceId, "-k", config, "-l", connection_string], error_message=f("Failed to deploy '{config}' to '{deviceId}'..."), suppress_output=True) + return self.invoke_az_cli_outproc(["iot", "hub", "apply-configuration", "-d", device_id, "-n", hub_name, "-k", config, "-l", connection_string], error_message=f("Failed to deploy '{config}' to '{device_id}'..."), suppress_output=True) def get_free_iothub(self): with output_io_cls() as io: diff --git a/iotedgedev/connectionstring.py b/iotedgedev/connectionstring.py index cc29b68..2c8c19e 100644 --- a/iotedgedev/connectionstring.py +++ b/iotedgedev/connectionstring.py @@ -1,10 +1,10 @@ class ConnectionString: def __init__(self, value): - self.value = value + self.ConnectionString = value self.data = dict() - if self.value: - parts = value.split(';') + if self.ConnectionString: + parts = self.ConnectionString.split(';') if len(parts) > 0: for part in parts: subpart = part.split('=', 1) @@ -13,6 +13,8 @@ class ConnectionString: if self.data: self.HostName = self["hostname"] + if self.HostName: + self.HubName = self.HostName.split('.')[0] self.SharedAccessKey = self["sharedaccesskey"] def __getitem__(self, key): @@ -23,7 +25,7 @@ class IoTHubConnectionString(ConnectionString): def __init__(self, value): ConnectionString.__init__(self, value) - if self.value: + if self.ConnectionString: self.SharedAccessKeyName = self["sharedaccesskeyname"] @@ -31,5 +33,5 @@ class DeviceConnectionString(ConnectionString): def __init__(self, value): ConnectionString.__init__(self, value) - if self.value: + if self.ConnectionString: self.DeviceId = self["deviceid"] diff --git a/iotedgedev/edge.py b/iotedgedev/edge.py index 6e71ba0..4d20943 100644 --- a/iotedgedev/edge.py +++ b/iotedgedev/edge.py @@ -10,11 +10,11 @@ class Edge: self.output.header("DEPLOYING CONFIGURATION") - self.envvars.verify_envvar_has_val("IOTHUB_CONNECTION_STRING", self.envvars.IOTHUB_CONNECTION_STRING) - self.envvars.verify_envvar_has_val("DEVICE_CONNECTION_STRING", self.envvars.DEVICE_CONNECTION_STRING) + self.envvars.verify_envvar_has_val("IOTHUB_CONNECTION_INFO", self.envvars.IOTHUB_CONNECTION_INFO) + self.envvars.verify_envvar_has_val("DEVICE_CONNECTION_INFO", self.envvars.DEVICE_CONNECTION_INFO) self.envvars.verify_envvar_has_val("DEPLOYMENT_CONFIG_FILE", self.envvars.DEPLOYMENT_CONFIG_FILE) - self.azure_cli.apply_configuration(self.envvars.DEVICE_CONNECTION_INFO.DeviceId, self.envvars.IOTHUB_CONNECTION_STRING, self.envvars.DEPLOYMENT_CONFIG_FILE_PATH) + self.azure_cli.apply_configuration(self.envvars.DEVICE_CONNECTION_INFO.DeviceId, self.envvars.IOTHUB_CONNECTION_INFO.ConnectionString, self.envvars.IOTHUB_CONNECTION_INFO.HubName, self.envvars.DEPLOYMENT_CONFIG_FILE_PATH) self.output.footer("DEPLOYMENT COMPLETE") \ No newline at end of file
Azure/iotedgedev
ce59bad1286bf650d442b2b7fbe16a3db676a497
diff --git a/tests/test_connectionstring.py b/tests/test_connectionstring.py new file mode 100644 index 0000000..21d0dc9 --- /dev/null +++ b/tests/test_connectionstring.py @@ -0,0 +1,78 @@ +import os +import pytest +from dotenv import load_dotenv +from iotedgedev.connectionstring import ConnectionString, IoTHubConnectionString, DeviceConnectionString + +emptystring = "" +valid_connectionstring = "HostName=testhub.azure-devices.net;SharedAccessKey=gibberish" +valid_iothub_connectionstring = "HostName=testhub.azure-devices.net;SharedAccessKeyName=iothubowner;SharedAccessKey=moregibberish" +valid_device_connectionstring = "HostName=testhub.azure-devices.net;DeviceId=testdevice;SharedAccessKey=othergibberish" +invalid_connectionstring = "HostName=azure-devices.net;SharedAccessKey=gibberish" +invalid_iothub_connectionstring = "HostName=testhub.azure-devices.net;SharedAccessKey=moregibberish" +invalid_device_connectionstring = "HostName=testhub.azure-devices.net;DeviceId=;SharedAccessKey=othergibberish" + +def test_empty_connectionstring(): + connectionstring = ConnectionString(emptystring) + assert not connectionstring.data + +def test_empty_iothub_connectionstring(): + connectionstring = IoTHubConnectionString(emptystring) + assert not connectionstring.data + +def test_empty_device_connectionstring(): + connectionstring = DeviceConnectionString(emptystring) + assert not connectionstring.data + +def test_valid_connectionstring(): + connectionstring = ConnectionString(valid_connectionstring) + assert connectionstring.HostName == "testhub.azure-devices.net" + assert connectionstring.HubName == "testhub" + assert connectionstring.SharedAccessKey == "gibberish" + +def test_valid_iothub_connectionstring(): + connectionstring = IoTHubConnectionString(valid_iothub_connectionstring) + assert connectionstring.HostName == "testhub.azure-devices.net" + assert connectionstring.HubName == "testhub" + assert connectionstring.SharedAccessKeyName == "iothubowner" + assert connectionstring.SharedAccessKey == "moregibberish" + +def test_valid_devicehub_connectionstring(): + connectionstring = DeviceConnectionString(valid_device_connectionstring) + assert connectionstring.HostName == "testhub.azure-devices.net" + assert connectionstring.HubName == "testhub" + assert connectionstring.DeviceId == "testdevice" + assert connectionstring.SharedAccessKey == "othergibberish" + +def test_invalid_connectionstring(): + connectionstring = ConnectionString(invalid_connectionstring) + assert connectionstring.HubName != "testhub" + +def test_invalid_iothub_connectionstring(): + with pytest.raises(KeyError): + IoTHubConnectionString(invalid_iothub_connectionstring) + +def test_invalid_devicehub_connectionstring(): + connectionstring = DeviceConnectionString(invalid_device_connectionstring) + assert connectionstring.HostName == "testhub.azure-devices.net" + assert connectionstring.HubName == "testhub" + assert not connectionstring.DeviceId + assert connectionstring.SharedAccessKey == "othergibberish" + +def test_valid_env_iothub_connectionstring(): + load_dotenv(".env") + env_iothub_connectionstring = os.getenv("IOTHUB_CONNECTION_STRING") + connectionstring = IoTHubConnectionString(env_iothub_connectionstring) + assert connectionstring.HostName + assert connectionstring.HubName + assert connectionstring.SharedAccessKey + assert connectionstring.SharedAccessKeyName + +def test_valid_env_device_connectionstring(): + load_dotenv(".env") + env_device_connectionstring = os.getenv("DEVICE_CONNECTION_STRING") + connectionstring = DeviceConnectionString(env_device_connectionstring) + assert connectionstring.HostName + assert connectionstring.HubName + assert connectionstring.SharedAccessKey + assert connectionstring.DeviceId + \ No newline at end of file diff --git a/tests/test_iotedgedev.py b/tests/test_iotedgedev.py index 2d08bad..c809c15 100644 --- a/tests/test_iotedgedev.py +++ b/tests/test_iotedgedev.py @@ -153,7 +153,7 @@ def test_monitor(request, capfd): print (err) print (result.output) - assert 'application properties' in out + assert 'timeCreated' in out @pytest.fixture
AZ IOT HUB apply-configuration needs hubname. If user has old version of az cli iot extension installed they get this: `az iot hub apply-configuration: error: argument --hub-name/-n is required ` - add the -n parameter to the apply-configuration call. you can get it in IOTHUB_CONNECTION_INFO.HostName apply-configuration might need ONLY hubname, but HostName has [name].azuredevices.net. Therefore, You might have to split the ConnectionString.HostName property and add a new property to that class called HubName.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_connectionstring.py::test_valid_connectionstring", "tests/test_connectionstring.py::test_valid_iothub_connectionstring", "tests/test_connectionstring.py::test_valid_devicehub_connectionstring", "tests/test_connectionstring.py::test_invalid_connectionstring", "tests/test_connectionstring.py::test_invalid_devicehub_connectionstring" ]
[ "tests/test_connectionstring.py::test_empty_connectionstring", "tests/test_connectionstring.py::test_empty_iothub_connectionstring", "tests/test_connectionstring.py::test_empty_device_connectionstring", "tests/test_connectionstring.py::test_invalid_iothub_connectionstring" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-05-16T17:52:55Z"
mit
Azure__iotedgedev-267
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..76cd57b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,16 @@ +--- +name: Bug report +about: Create a issue to help us improve +--- + +<!-- Fill in the information needed --> +- iotedgedev Version: +- Python Version: +- Pip Version: +- Development machine OS Version: +- IoT Edge device OS Version: + +Steps to Reproduce: + +1. +2. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..de27153 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,7 @@ +--- +name: Feature request +about: Suggest an idea for IoT Edge and IoT Edge development tools + +--- + +<!-- Describe the feature you'd like. --> \ No newline at end of file diff --git a/iotedgedev/cli.py b/iotedgedev/cli.py index 2fe5b45..c85aa25 100644 --- a/iotedgedev/cli.py +++ b/iotedgedev/cli.py @@ -85,13 +85,13 @@ def docker(): type=click.Choice(["csharp", "nodejs", "python", "csharpfunction"]), help="Specify the template used to create the default module") @with_telemetry -def create(name, module, template): +def new(name, module, template): utility = Utility(envvars, output) sol = Solution(output, utility) sol.create(name, module, template) -main.add_command(create) +main.add_command(new) @solution.command(context_settings=CONTEXT_SETTINGS, @@ -103,7 +103,7 @@ def init(): utility = Utility(envvars, output) if len(os.listdir(os.getcwd())) == 0: - solcmd = "iotedgedev solution create ." + solcmd = "iotedgedev new ." output.header(solcmd) utility.call_proc(solcmd.split()) diff --git a/iotedgedev/envvars.py b/iotedgedev/envvars.py index f180959..5c44f7c 100644 --- a/iotedgedev/envvars.py +++ b/iotedgedev/envvars.py @@ -20,7 +20,7 @@ class EnvVars: current_command = Args().get_current_command() # for some commands we don't want to load dotenv # TODO: temporary hack. A more grace solution would be a decorator on the command to indicate whether to bypass env - self.bypass_dotenv_load_commands = ['solution init', 'solution e2e', 'solution create', 'create', 'simulator stop', 'simulator modulecred'] + self.bypass_dotenv_load_commands = ['solution init', 'solution e2e', 'solution new', 'new', 'simulator stop', 'simulator modulecred'] self.bypass = self.is_bypass_command(current_command) # for some commands we don't want verbose dotenv load output self.terse_commands = ['', 'iothub setup'] @@ -151,7 +151,7 @@ class EnvVars: else: self.DOCKER_HOST = None except Exception as ex: - msg = "Environment variables not configured correctly. Run `iotedgedev solution create` to create a new solution with sample .env file. " + msg = "Environment variables not configured correctly. Run `iotedgedev new` to create a new solution with sample .env file. " "Please see README for variable configuration options. Tip: You might just need to restart your command prompt to refresh your Environment Variables. " "Variable that caused exception: {0}".format(str(ex)) raise ValueError(msg)
Azure/iotedgedev
3b26b6495293607b3752f83fe412c84d7da4fc23
diff --git a/tests/test_envvars.py b/tests/test_envvars.py index 038da27..49be0f3 100644 --- a/tests/test_envvars.py +++ b/tests/test_envvars.py @@ -75,49 +75,49 @@ def test_envvar_clean(): def test_in_command_list_true_1(): output = Output() envvars = EnvVars(output) - assert envvars.in_command_list("solution create test_solution", ["init", "e2e", "solution create", "create", "simulator stop"]) + assert envvars.in_command_list("solution new test_solution", ["init", "e2e", "solution new", "new", "simulator stop"]) def test_in_command_list_true_2(): output = Output() envvars = EnvVars(output) - assert envvars.in_command_list("solution create", ["init", "e2e", "solution create", "create", "simulator stop"]) + assert envvars.in_command_list("solution new", ["init", "e2e", "solution new", "new", "simulator stop"]) def test_in_command_list_false_1(): output = Output() envvars = EnvVars(output) - assert not envvars.in_command_list("solution add filtermodule", ["init", "e2e", "solution create", "create", "simulator stop"]) + assert not envvars.in_command_list("solution add filtermodule", ["init", "e2e", "solution new", "new", "simulator stop"]) def test_in_command_list_false_2(): output = Output() envvars = EnvVars(output) - assert not envvars.in_command_list("solution addotherstuff filtermodule", ["init", "e2e", "solution add", "create", "simulator stop"]) + assert not envvars.in_command_list("solution addotherstuff filtermodule", ["init", "e2e", "solution add", "new", "simulator stop"]) def test_in_command_list_empty_1(): output = Output() envvars = EnvVars(output) - assert not envvars.in_command_list("", ["init", "e2e", "solution create", "create", "simulator stop"]) + assert not envvars.in_command_list("", ["init", "e2e", "solution new", "new", "simulator stop"]) def test_in_command_list_empty_2(): output = Output() envvars = EnvVars(output) - assert not envvars.in_command_list("solution create test_solution", ["init", "e2e", "", "create", "simulator stop"]) + assert not envvars.in_command_list("solution new test_solution", ["init", "e2e", "", "new", "simulator stop"]) def test_in_command_list_empty_3(): output = Output() envvars = EnvVars(output) - assert envvars.in_command_list("", ["init", "e2e", "", "create", "simulator stop"]) + assert envvars.in_command_list("", ["init", "e2e", "", "new", "simulator stop"]) def test_is_bypass_command_true(): output = Output() envvars = EnvVars(output) - assert envvars.is_bypass_command("solution create EdgeSolution") + assert envvars.is_bypass_command("solution new EdgeSolution") def test_is_bypass_command_false(): @@ -141,7 +141,7 @@ def test_is_terse_command_true(): def test_is_terse_command_false(): output = Output() envvars = EnvVars(output) - assert not envvars.is_terse_command("solution create") + assert not envvars.is_terse_command("solution new") def test_is_terse_command_empty(): diff --git a/tests/test_iotedgedev.py b/tests/test_iotedgedev.py index c436ca8..60d7f06 100644 --- a/tests/test_iotedgedev.py +++ b/tests/test_iotedgedev.py @@ -36,7 +36,7 @@ def create_solution(request): runner = CliRunner() os.chdir(tests_dir) - result = runner.invoke(cli.main, ['solution', 'create', test_solution]) + result = runner.invoke(cli.main, ['solution', 'new', test_solution]) print(result.output) assert 'AZURE IOT EDGE SOLUTION CREATED' in result.output @@ -58,7 +58,7 @@ def test_solution_create_in_non_empty_current_path(request): cli = __import__("iotedgedev.cli", fromlist=['main']) runner = CliRunner() - result = runner.invoke(cli.main, ['solution', 'create', '.']) + result = runner.invoke(cli.main, ['solution', 'new', '.']) print(result.output) assert "Directory is not empty" in result.output @@ -75,7 +75,7 @@ def test_solution_create_in_empty_current_path(request): cli = __import__("iotedgedev.cli", fromlist=['main']) runner = CliRunner() - result = runner.invoke(cli.main, ['solution', 'create', '.']) + result = runner.invoke(cli.main, ['solution', 'new', '.']) print(result.output) assert 'AZURE IOT EDGE SOLUTION CREATED' in result.output @@ -88,7 +88,7 @@ def test_solution_create_in_non_empty_dir(request): cli = __import__("iotedgedev.cli", fromlist=['main']) runner = CliRunner() - result = runner.invoke(cli.main, ['solution', 'create', test_solution]) + result = runner.invoke(cli.main, ['solution', 'new', test_solution]) print(result.output) assert "Directory is not empty" in result.output @@ -104,7 +104,7 @@ def test_solution_create_in_empty_child_dir(request): cli = __import__("iotedgedev.cli", fromlist=['main']) runner = CliRunner() - result = runner.invoke(cli.main, ['solution', 'create', dirname]) + result = runner.invoke(cli.main, ['solution', 'new', dirname]) print(result.output) assert 'AZURE IOT EDGE SOLUTION CREATED' in result.output diff --git a/tests/test_simulator.py b/tests/test_simulator.py index 5ba1e56..38849d6 100644 --- a/tests/test_simulator.py +++ b/tests/test_simulator.py @@ -26,7 +26,7 @@ def create_solution(request): runner = CliRunner() os.chdir(tests_dir) - result = runner.invoke(cli.main, ['solution', 'create', test_solution]) + result = runner.invoke(cli.main, ['solution', 'new', test_solution]) print(result.output) assert 'AZURE IOT EDGE SOLUTION CREATED' in result.output
Renaming `iotedgedev create` to `iotedgedev new` I am thinking about renaming the command `iotedgedev create` to `iotedgedev new` for the sake of simplicity and aligning with common practices (as how the command to create new projects is called in VS and VS Code). @jongio, do you think this is a good idea?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_envvars.py::test_is_bypass_command_true" ]
[ "tests/test_envvars.py::test_valid_get_envvar", "tests/test_envvars.py::test_invalid_get_envvar", "tests/test_envvars.py::test_valid_load", "tests/test_envvars.py::test_valid_verify_envvar_has_val", "tests/test_envvars.py::test_valid_get_envvar_key_if_val", "tests/test_envvars.py::test_invalid_get_envvar_key_if_val", "tests/test_envvars.py::test_set_envvar", "tests/test_envvars.py::test_envvar_clean", "tests/test_envvars.py::test_in_command_list_true_1", "tests/test_envvars.py::test_in_command_list_true_2", "tests/test_envvars.py::test_in_command_list_false_1", "tests/test_envvars.py::test_in_command_list_false_2", "tests/test_envvars.py::test_in_command_list_empty_1", "tests/test_envvars.py::test_in_command_list_empty_2", "tests/test_envvars.py::test_in_command_list_empty_3", "tests/test_envvars.py::test_is_bypass_command_false", "tests/test_envvars.py::test_is_bypass_command_empty", "tests/test_envvars.py::test_is_terse_command_true", "tests/test_envvars.py::test_is_terse_command_false", "tests/test_envvars.py::test_is_terse_command_empty", "tests/test_envvars.py::test_default_container_registry_server_value_exists", "tests/test_envvars.py::test_default_container_registry_username_value_exists_or_returns_empty_string", "tests/test_envvars.py::test_default_container_registry_password_value_exists_or_returns_empty_string", "tests/test_envvars.py::test_container_registry_server_key_missing_sys_exit", "tests/test_envvars.py::test_container_registry_server_value_missing_sys_exit", "tests/test_envvars.py::test_unique_container_registry_server_tokens", "tests/test_envvars.py::test_unique_container_registry_username_tokens", "tests/test_envvars.py::test_unique_container_registry_password_tokens", "tests/test_envvars.py::test_additional_container_registry_server_has_val", "tests/test_envvars.py::test_additional_container_registry_username_has_val", "tests/test_envvars.py::test_additional_container_registry_password_has_val" ]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2018-08-20T06:58:26Z"
mit
Azure__iotedgehubdev-159
diff --git a/README.md b/README.md index 23ad402..db93bf4 100644 --- a/README.md +++ b/README.md @@ -32,20 +32,42 @@ The following table compares the steps needed to run the solution on the IoT Edg ## Quickstart 1. Setup + + Windows ``` - iotedgehubdev setup -c <edge-device-connection-string> + iotedgehubdev setup -c "<edge-device-connection-string>" + ``` + + Linux/macOS + ``` + sudo iotedgehubdev setup -c "<edge-device-connection-string>" ``` 2. Start/stop an IoT Edge solution in simulator + + Windows ``` - iotedgehubdev start -d <path/to/deployment manifest> + iotedgehubdev start -d <path/to/deployment-manifest> iotedgehubdev stop ``` + Linux/macOS + ``` + sudo iotedgehubdev start -d <path/to/deployment-manifest> + sudo iotedgehubdev stop + ``` + 3. Start and debug a single module natively 1. Start the module with specific input(s) + + Windows ``` - iotedgehubdev start -i <module-inputs> + iotedgehubdev start -i "<module-inputs>" + ``` + + Linux/macOS + ``` + sudo iotedgehubdev start -i "<module-inputs>" ``` For example: `iotedgehubdev start -i "input1,input2"` @@ -61,6 +83,17 @@ The following table compares the steps needed to run the solution on the IoT Edg For example: `curl --header "Content-Type: application/json" --request POST --data '{"inputName": "input1","data": "hello world"}' http://localhost:53000/api/v1/messages` + 5. Stop the simulator + + Windows + ``` + iotedgehubdev stop + ``` + + Linux/macOS + ``` + sudo iotedgehubdev stop + ``` ## Other resources - [Azure IoT Edge for Visual Studio Code](https://github.com/microsoft/vscode-azure-iot-edge) diff --git a/iotedgehubdev/cli.py b/iotedgehubdev/cli.py index e6a4b7c..843cf8a 100644 --- a/iotedgehubdev/cli.py +++ b/iotedgehubdev/cli.py @@ -49,7 +49,7 @@ def _with_telemetry(func): telemetry.flush() return value except Exception as e: - output.error('Error: {0}'.format(str(e))) + output.error(str(e)) telemetry.fail(str(e), 'Command failed') telemetry.flush() sys.exit(1) @@ -57,6 +57,29 @@ def _with_telemetry(func): return _wrapper +def _parse_config_json(): + config_file = HostPlatform.get_config_file_path() + + if not Utils.check_if_file_exists(config_file): + raise ValueError('Cannot find config file. Please run `{0}` first.'.format(_get_setup_command())) + + with open(config_file) as f: + try: + config_json = json.load(f) + + connection_str = config_json[CONN_STR] + cert_path = config_json[CERT_PATH] + gatewayhost = config_json[GATEWAY_HOST] + + return EdgeManager(connection_str, gatewayhost, cert_path) + except (ValueError, KeyError): + raise ValueError('Invalid config file. Please run `{0}` again.'.format(_get_setup_command())) + + +def _get_setup_command(): + return '{0}iotedgehubdev setup -c "<edge-device-connection-string>"'.format('' if os.name == 'nt' else 'sudo ') + + @click.group(context_settings=CONTEXT_SETTINGS, invoke_without_command=True) @click.version_option() def main(): @@ -136,27 +159,13 @@ def setup(connection_string, gateway_host): help='Specify the output file to save the connection string. If the file exists, the content will be overwritten.') @_with_telemetry def modulecred(modules, local, output_file): - configFile = HostPlatform.get_config_file_path() - if Utils.check_if_file_exists(configFile) is not True: - output.error('Cannot find config file. Please run `iotedgehubdev setup` first.') - sys.exit(1) - try: - with open(configFile) as f: - jsonObj = json.load(f) - if CONN_STR in jsonObj and CERT_PATH in jsonObj and GATEWAY_HOST in jsonObj: - connection_str = jsonObj[CONN_STR] - cert_path = jsonObj[CERT_PATH] - gatewayhost = jsonObj[GATEWAY_HOST] - edgeManager = EdgeManager(connection_str, gatewayhost, cert_path) - modules = [module.strip() for module in modules.strip().split('|')] - credential = edgeManager.outputModuleCred(modules, local, output_file) - output.info(credential[0]) - output.info(credential[1]) - else: - output.error('Missing keys in config file. Please run `iotedgehubdev setup` again.') - sys.exit(1) - except Exception as e: - raise e + edge_manager = _parse_config_json() + + if edge_manager: + modules = [module.strip() for module in modules.strip().split('|')] + credential = edge_manager.outputModuleCred(modules, local, output_file) + output.info(credential[0]) + output.info(credential[1]) @click.command(context_settings=CONTEXT_SETTINGS, @@ -189,28 +198,16 @@ def modulecred(modules, local, output_file): help='Docker daemon socket to connect to') @_with_telemetry def start(inputs, port, deployment, verbose, host): - configFile = HostPlatform.get_config_file_path() - try: - with open(configFile) as f: - jsonObj = json.load(f) - if CONN_STR in jsonObj and CERT_PATH in jsonObj and GATEWAY_HOST in jsonObj: - connection_str = jsonObj[CONN_STR] - cert_path = jsonObj[CERT_PATH] - gatewayhost = jsonObj[GATEWAY_HOST] - edgeManager = EdgeManager(connection_str, gatewayhost, cert_path) - if host is not None: - os.environ[DOCKER_HOST] = host - else: - output.error('Missing keys in config file. Please run `iotedgehubdev setup` again.') - sys.exit(1) - except Exception as e: - raise e + edge_manager = _parse_config_json() - hostname_hash, suffix = Utils.hash_connection_str_hostname(connection_str) - telemetry.add_extra_props({'iothubhostname': hostname_hash, 'iothubhostnamesuffix': suffix}) + if edge_manager: + if host is not None: + os.environ[DOCKER_HOST] = host - if inputs is None and deployment is not None: - try: + hostname_hash, suffix = Utils.hash_connection_str_hostname(edge_manager.hostname) + telemetry.add_extra_props({'iothubhostname': hostname_hash, 'iothubhostnamesuffix': suffix}) + + if inputs is None and deployment is not None: with open(deployment) as json_file: json_data = json.load(json_file) if 'modulesContent' in json_data: @@ -222,33 +219,31 @@ def start(inputs, port, deployment, verbose, host): except RegistriesLoginError as e: output.warning(e.message()) telemetry.add_extra_props({'failloginregistries': len(e.registries())}) - edgeManager.start_solution(module_content, verbose) + edge_manager.start_solution(module_content, verbose) if not verbose: output.info('IoT Edge Simulator has been started in solution mode.') - except Exception as e: - raise e - else: - if deployment is not None: - output.info('Deployment manifest is ignored when inputs are present.') - if inputs is None: - input_list = ['input1'] else: - input_list = [input_.strip() for input_ in inputs.strip().split(',')] + if deployment is not None: + output.info('Deployment manifest is ignored when inputs are present.') + if inputs is None: + input_list = ['input1'] + else: + input_list = [input_.strip() for input_ in inputs.strip().split(',')] - edgeManager.start_singlemodule(input_list, port) + edge_manager.start_singlemodule(input_list, port) - data = '--data \'{{"inputName": "{0}","data":"hello world"}}\''.format(input_list[0]) - url = 'http://localhost:{0}/api/v1/messages'.format(port) - curl_msg = ' curl --header "Content-Type: application/json" --request POST {0} {1}'.format(data, url) - output.info('IoT Edge Simulator has been started in single module mode.') - output.info('Please run `iotedgehubdev modulecred` to get credential to connect your module.') - output.info('And send message through:') - output.line() - output.echo(curl_msg, 'green') - output.line() - output.info( - 'Please refer to https://github.com/Azure/iot-edge-testing-utility/blob/master/swagger.json' - ' for detail schema') + data = '--data \'{{"inputName": "{0}","data":"hello world"}}\''.format(input_list[0]) + url = 'http://localhost:{0}/api/v1/messages'.format(port) + curl_msg = ' curl --header "Content-Type: application/json" --request POST {0} {1}'.format(data, url) + output.info('IoT Edge Simulator has been started in single module mode.') + output.info('Please run `iotedgehubdev modulecred` to get credential to connect your module.') + output.info('And send message through:') + output.line() + output.echo(curl_msg, 'green') + output.line() + output.info( + 'Please refer to https://github.com/Azure/iot-edge-testing-utility/blob/master/swagger.json' + ' for detail schema') @click.command(context_settings=CONTEXT_SETTINGS, @@ -259,13 +254,10 @@ def start(inputs, port, deployment, verbose, host): help='Docker daemon socket to connect to') @_with_telemetry def stop(host): - try: - if host is not None: - os.environ[DOCKER_HOST] = host - EdgeManager.stop() - output.info('IoT Edge Simulator has been stopped successfully.') - except Exception as e: - raise e + if host is not None: + os.environ[DOCKER_HOST] = host + EdgeManager.stop() + output.info('IoT Edge Simulator has been stopped successfully.') main.add_command(setup) diff --git a/iotedgehubdev/edgemanager.py b/iotedgehubdev/edgemanager.py index cf0183f..921b52f 100644 --- a/iotedgehubdev/edgemanager.py +++ b/iotedgehubdev/edgemanager.py @@ -42,14 +42,18 @@ class EdgeManager(object): def __init__(self, connection_str, gatewayhost, cert_path): connection_str_dict = Utils.parse_device_connection_str(connection_str) - self.hostname = connection_str_dict[EC.HOSTNAME_KEY] - self.device_id = connection_str_dict[EC.DEVICE_ID_KEY] - self.access_key = connection_str_dict[EC.ACCESS_KEY_KEY] - self.compose_file = None - self.gatewayhost = gatewayhost - self.device_uri = '{0}/devices/{1}'.format(self.hostname, self.device_id) - self.cert_path = cert_path - self.edge_cert = EdgeCert(self.cert_path, self.gatewayhost) + self._hostname = connection_str_dict[EC.HOSTNAME_KEY] + self._device_id = connection_str_dict[EC.DEVICE_ID_KEY] + self._access_key = connection_str_dict[EC.ACCESS_KEY_KEY] + self._compose_file = None + self._gatewayhost = gatewayhost + self._device_uri = '{0}/devices/{1}'.format(self._hostname, self._device_id) + self._cert_path = cert_path + self._edge_cert = EdgeCert(self._cert_path, self._gatewayhost) + + @property + def hostname(self): + return self._hostname @staticmethod def stop(edgedockerclient=None): @@ -117,7 +121,7 @@ class EdgeManager(object): edgedockerclient.copy_file_to_volume( EdgeManager.INPUT, EdgeManager.MODULE_VOLUME, self._device_cert(), module_mount, - self.edge_cert.get_cert_file_path(EC.EDGE_DEVICE_CA)) + self._edge_cert.get_cert_file_path(EC.EDGE_DEVICE_CA)) edgedockerclient.start(inputContainer.get('Id')) def config_solution(self, module_content, target, mount_base): @@ -152,7 +156,7 @@ class EdgeManager(object): network_info = { 'NW_NAME': EdgeManager.NW_NAME, - 'ALIASES': self.gatewayhost + 'ALIASES': self._gatewayhost } compose_project = ComposeProject(module_content) @@ -233,13 +237,13 @@ class EdgeManager(object): edgedockerclient.copy_file_to_volume( EdgeManager.CERT_HELPER, EdgeManager.HUB_VOLUME, EdgeManager._chain_cert(), - hub_mount, self.edge_cert.get_cert_file_path(EC.EDGE_CHAIN_CA)) + hub_mount, self._edge_cert.get_cert_file_path(EC.EDGE_CHAIN_CA)) edgedockerclient.copy_file_to_volume( EdgeManager.CERT_HELPER, EdgeManager.HUB_VOLUME, EdgeManager._hubserver_pfx(), - hub_mount, self.edge_cert.get_pfx_file_path(EC.EDGE_HUB_SERVER)) + hub_mount, self._edge_cert.get_pfx_file_path(EC.EDGE_HUB_SERVER)) edgedockerclient.copy_file_to_volume( EdgeManager.CERT_HELPER, EdgeManager.MODULE_VOLUME, self._device_cert(), - module_mount, self.edge_cert.get_cert_file_path(EC.EDGE_DEVICE_CA)) + module_mount, self._edge_cert.get_cert_file_path(EC.EDGE_DEVICE_CA)) def start(self, modulesDict, routes): return @@ -258,7 +262,7 @@ class EdgeManager(object): def outputModuleCred(self, names, islocal, output_file): connstrENV = 'EdgeHubConnectionString={0}'.format('|'.join([self.getOrAddModule(name, islocal) for name in names])) - deviceCAEnv = 'EdgeModuleCACertificateFile={0}'.format(self.edge_cert.get_cert_file_path(EC.EDGE_DEVICE_CA)) + deviceCAEnv = 'EdgeModuleCACertificateFile={0}'.format(self._edge_cert.get_cert_file_path(EC.EDGE_DEVICE_CA)) cred = [connstrENV, deviceCAEnv] if output_file is not None: @@ -272,7 +276,7 @@ class EdgeManager(object): def getModule(self, name, islocal): moduleUri = self._getModuleReqUri(name) - sas = Utils.get_iot_hub_sas_token(self.device_uri, self.access_key, None) + sas = Utils.get_iot_hub_sas_token(self._device_uri, self._access_key, None) res = requests.get( moduleUri, headers={ @@ -294,7 +298,7 @@ class EdgeManager(object): def updateModule(self, name, etag, islocal): moduleUri = self._getModuleReqUri(name) - sas = Utils.get_iot_hub_sas_token(self.device_uri, self.access_key, None) + sas = Utils.get_iot_hub_sas_token(self._device_uri, self._access_key, None) res = requests.put( moduleUri, headers={ @@ -304,7 +308,7 @@ class EdgeManager(object): }, data=json.dumps({ 'moduleId': name, - 'deviceId': self.device_id, + 'deviceId': self._device_id, 'authentication': { 'type': 'sas' } @@ -316,7 +320,7 @@ class EdgeManager(object): def addModule(self, name, islocal): moduleUri = self._getModuleReqUri(name) - sas = Utils.get_iot_hub_sas_token(self.device_uri, self.access_key, None) + sas = Utils.get_iot_hub_sas_token(self._device_uri, self._access_key, None) res = requests.put( moduleUri, headers={ @@ -325,7 +329,7 @@ class EdgeManager(object): }, data=json.dumps({ 'moduleId': name, - 'deviceId': self.device_id + 'deviceId': self._device_id }) ) if res.ok is not True: @@ -334,7 +338,7 @@ class EdgeManager(object): def _getModuleReqUri(self, name): return "https://{0}/devices/{1}/modules/{2}?api-version=2018-06-30".format( - self.hostname, self.device_id, name) + self._hostname, self._device_id, name) def _generateModuleConnectionStr(self, response, islocal): jsonObj = response.json() @@ -343,13 +347,13 @@ class EdgeManager(object): sasKey = jsonObj['authentication']['symmetricKey']['primaryKey'] hubTemplate = 'HostName={0};DeviceId={1};ModuleId={2};SharedAccessKey={3}' moduleTemplate = 'HostName={0};GatewayHostName={1};DeviceId={2};ModuleId={3};SharedAccessKey={4}' - gatewayhost = self.gatewayhost + gatewayhost = self._gatewayhost if (islocal): gatewayhost = 'localhost' if (moduleId == '$edgeHub'): - return hubTemplate.format(self.hostname, deviceId, moduleId, sasKey) + return hubTemplate.format(self._hostname, deviceId, moduleId, sasKey) else: - return moduleTemplate.format(self.hostname, gatewayhost, deviceId, moduleId, sasKey) + return moduleTemplate.format(self._hostname, gatewayhost, deviceId, moduleId, sasKey) def _generateRoutesEnvFromInputs(self, inputs): routes = [ @@ -368,7 +372,7 @@ class EdgeManager(object): def _start_edge_hub(self, edgedockerclient, edgeHubConnStr, routes, mount_base): edgedockerclient.pull(EdgeManager.EDGEHUB_IMG, None, None) - network_config = edgedockerclient.create_config_for_network(EdgeManager.NW_NAME, aliases=[self.gatewayhost]) + network_config = edgedockerclient.create_config_for_network(EdgeManager.NW_NAME, aliases=[self._gatewayhost]) hub_mount = EdgeManager.HUB_MOUNT.format(mount_base) hub_host_config = edgedockerclient.create_host_config( mounts=[docker.types.Mount(hub_mount, EdgeManager.HUB_VOLUME)], @@ -399,10 +403,10 @@ class EdgeManager(object): edgedockerclient.copy_file_to_volume( EdgeManager.EDGEHUB, EdgeManager.HUB_VOLUME, EdgeManager._chain_cert(), - hub_mount, self.edge_cert.get_cert_file_path(EC.EDGE_CHAIN_CA)) + hub_mount, self._edge_cert.get_cert_file_path(EC.EDGE_CHAIN_CA)) edgedockerclient.copy_file_to_volume( EdgeManager.EDGEHUB, EdgeManager.HUB_VOLUME, EdgeManager._hubserver_pfx(), - hub_mount, self.edge_cert.get_pfx_file_path(EC.EDGE_HUB_SERVER)) + hub_mount, self._edge_cert.get_pfx_file_path(EC.EDGE_HUB_SERVER)) edgedockerclient.start(hubContainer.get('Id')) def _obtain_mount_path(self, edgedockerclient): diff --git a/iotedgehubdev/utils.py b/iotedgehubdev/utils.py index 08a04ad..937b24e 100644 --- a/iotedgehubdev/utils.py +++ b/iotedgehubdev/utils.py @@ -147,14 +147,8 @@ class Utils(object): @staticmethod @suppress_all_exceptions() - def hash_connection_str_hostname(connection_str): + def hash_connection_str_hostname(hostname): """Hash connection string hostname to count distint IoT Hub number""" - try: - connection_str_dict = Utils.parse_device_connection_str(connection_str) - hostname = connection_str_dict[EC.HOSTNAME_KEY] - except Exception: - hostname = None - if not hostname: return ("", "")
Azure/iotedgehubdev
a6fb00a5109860f12bec2b386f368abb20f8d979
diff --git a/tests/test_utils.py b/tests/test_utils.py index f063596..69e1880 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -265,9 +265,9 @@ class TestUtilAPIs(unittest.TestCase): assert Utils.get_sha256_hash("foo") == "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae" def test_hash_connection_str_hostname(self): - connection_str = "HostName=ChaoyiTestIoT.azure-devices.net;DeviceId=edge-device;SharedAccessKey=foobarbazqux=" + hostname = "ChaoyiTestIoT.azure-devices.net" - assert Utils.hash_connection_str_hostname(connection_str) == ( + assert Utils.hash_connection_str_hostname(hostname) == ( '6b8fcfea09003d5f104771e83bd9ff54c592ec2277ec1815df91dd64d1633778', 'azure-devices.net') assert Utils.hash_connection_str_hostname("") == ("", "")
Provide more friendly information when starting if the user has not setup From https://github.com/Azure/iotedgedev/pull/233#issuecomment-410917902 > @LazarusX - In start, can you automatically detect if setup hasn't been called and call it for the user? > > In start can you detect that it hasn't been build and let them know. The error wasn't enough to get me there. Thanks, Jon
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_utils.py::TestUtilAPIs::test_hash_connection_str_hostname" ]
[ "tests/test_utils.py::TestUtilAPIs::test_check_if_dir_exists_returns_false_path_is_none", "tests/test_utils.py::TestUtilAPIs::test_get_hostname_raises_ioerror_when_getfqdn_raises_ioerror", "tests/test_utils.py::TestUtilAPIs::test_get_hostname_valid", "tests/test_utils.py::TestUtilAPIs::test_check_if_dir_exists_returns_false_if_isdir_returns_false", "tests/test_utils.py::TestUtilAPIs::test_delete_dir_raises_oserror_when_rmtree_fails", "tests/test_utils.py::TestUtilAPIs::test_delete_dir_when_dir_exists", "tests/test_utils.py::TestUtilAPIs::test_check_if_file_exists_returns_true", "tests/test_utils.py::TestUtilAPIs::test_get_sha256_hash", "tests/test_utils.py::TestUtilAPIs::test_mkdir_if_needed_when_dir_does_not_exist", "tests/test_utils.py::TestUtilAPIs::test_mkdir_if_needed_raises_oserror_when_mkdir_fails", "tests/test_utils.py::TestUtilAPIs::test_delete_dir_execute_onerror_callback", "tests/test_utils.py::TestUtilAPIs::test_check_if_file_exists_returns_false_if_exists_returns_false", "tests/test_utils.py::TestUtilAPIs::test_mkdir_if_needed_when_dir_exists", "tests/test_utils.py::TestUtilAPIs::test_check_if_file_exists_returns_false_path_is_none", "tests/test_utils.py::TestUtilAPIs::test_check_if_file_exists_returns_false_if_isfile_returns_false", "tests/test_utils.py::TestUtilAPIs::test_check_if_dir_exists_returns_true", "tests/test_utils.py::TestUtilAPIs::test_delete_dir_when_dir_does_not_exist", "tests/test_utils.py::TestUtilAPIs::test_check_if_dir_exists_returns_false_if_exists_returns_false" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-01-18T02:06:37Z"
mit
Azure__msrest-for-python-12
diff --git a/doc/operation_config.rst b/doc/operation_config.rst index 5482a9c..fc0a478 100644 --- a/doc/operation_config.rst +++ b/doc/operation_config.rst @@ -5,17 +5,17 @@ Operation config Methods on operations have extra parameters which can be provided in the kwargs. This is called `operation_config`. -The list of operation configuration is: +The options for operation configuration are: =============== ==== ==== Parameter name Type Role =============== ==== ==== -verify bool -cert str -timeout int -allow_redirects bool -max_redirects int -proxies dict -use_env_proxies bool whether to read proxy settings from local env vars -retries int number of retries +verify bool Whether to verify the SSL certificate. Default is True. +cert str Path to local certificate for client side verification. +timeout int Timeout for establishing a server connection in seconds. +allow_redirects bool Whether to allow redirects. +max_redirects int Maimum number of allowed redirects. +proxies dict Proxy server settings. +use_env_proxies bool Whether to read proxy settings from local environment variables. +retries int Total number of retry attempts. =============== ==== ==== diff --git a/msrest/serialization.py b/msrest/serialization.py index ee81c21..a18bebe 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -113,31 +113,38 @@ class Model(object): return base._subtype_map return {} + @classmethod + def _flatten_subtype(cls, key, objects): + if not '_subtype_map' in cls.__dict__: + return {} + result = dict(cls._subtype_map[key]) + for valuetype in cls._subtype_map[key].values(): + result.update(objects[valuetype]._flatten_subtype(key, objects)) + return result + @classmethod def _classify(cls, response, objects): """Check the class _subtype_map for any child classes. - We want to ignore any inheirited _subtype_maps. + We want to ignore any inherited _subtype_maps. + Remove the polymorphic key from the initial data. """ - try: - map = cls.__dict__.get('_subtype_map', {}) + for subtype_key in cls.__dict__.get('_subtype_map', {}).keys(): + subtype_value = None - for _type, _classes in map.items(): - classification = response.get(_type) - try: - return objects[_classes[classification]] - except KeyError: - pass + rest_api_response_key = _decode_attribute_map_key(cls._attribute_map[subtype_key]['key']) + subtype_value = response.pop(rest_api_response_key, None) or response.pop(subtype_key, None) + if subtype_value: + flatten_mapping_type = cls._flatten_subtype(subtype_key, objects) + return objects[flatten_mapping_type[subtype_value]] + return cls - for c in _classes: - try: - _cls = objects[_classes[c]] - return _cls._classify(response, objects) - except (KeyError, TypeError): - continue - raise TypeError("Object cannot be classified futher.") - except AttributeError: - raise TypeError("Object cannot be classified futher.") +def _decode_attribute_map_key(key): + """This decode a key in an _attribute_map to the actual key we want to look at + inside the received data. + :param str key: A key string from the generated code + """ + return key.replace('\\.', '.') def _convert_to_datatype(data, data_type, localtypes): if data is None: @@ -157,6 +164,7 @@ def _convert_to_datatype(data, data_type, localtypes): elif issubclass(data_obj, Enum): return data elif not isinstance(data, data_obj): + data_obj = data_obj._classify(data, localtypes) result = { key: _convert_to_datatype( data[key], @@ -191,11 +199,11 @@ class Serializer(object): "maximum_ex": lambda x, y: x >= y, "min_items": lambda x, y: len(x) < y, "max_items": lambda x, y: len(x) > y, - "pattern": lambda x, y: not re.match(y, x), + "pattern": lambda x, y: not re.match(y, x, re.UNICODE), "unique": lambda x, y: len(x) != len(set(x)), "multiple": lambda x, y: x % y != 0 } - flattten = re.compile(r"(?<!\\)\.") + flatten = re.compile(r"(?<!\\)\.") def __init__(self, classes=None): self.serialize_type = { @@ -241,14 +249,12 @@ class Serializer(object): try: attributes = target_obj._attribute_map - self._classify_data(target_obj, class_name, serialized) - for attr, map in attributes.items(): attr_name = attr debug_name = "{}.{}".format(class_name, attr_name) try: - keys = self.flattten.split(map['key']) - keys = [k.replace('\\.', '.') for k in keys] + keys = self.flatten.split(map['key']) + keys = [_decode_attribute_map_key(k) for k in keys] attr_type = map['type'] orig_attr = getattr(target_obj, attr) validation = target_obj._validation.get(attr_name, {}) @@ -278,18 +284,6 @@ class Serializer(object): else: return serialized - def _classify_data(self, target_obj, class_name, serialized): - """Check whether this object is a child and therefor needs to be - classified in the message. - """ - try: - for _type, _classes in target_obj._get_subtype_map().items(): - for ref, name in _classes.items(): - if name == class_name: - serialized[_type] = ref - except AttributeError: - pass # TargetObj has no _subtype_map so we don't need to classify. - def body(self, data, data_type, **kwargs): """Serialize data intended for a request body. @@ -752,9 +746,9 @@ class Deserializer(object): while '.' in key: dict_keys = self.flatten.split(key) if len(dict_keys) == 1: - key = dict_keys[0].replace('\\.', '.') + key = _decode_attribute_map_key(dict_keys[0]) break - working_key = dict_keys[0].replace('\\.', '.') + working_key = _decode_attribute_map_key(dict_keys[0]) working_data = working_data.get(working_key, data) key = '.'.join(dict_keys[1:]) @@ -786,8 +780,8 @@ class Deserializer(object): try: target = target._classify(data, self.dependencies) - except (TypeError, AttributeError): - pass # Target has no subclasses, so can't classify further. + except AttributeError: + pass # Target is not a Model, no classify return target, target.__class__.__name__ def _unpack_content(self, raw_data):
Azure/msrest-for-python
cef4818746df436465cfc810723f79aa3a72da89
diff --git a/test/unittest_serialization.py b/test/unittest_serialization.py index 166a375..6a93723 100644 --- a/test/unittest_serialization.py +++ b/test/unittest_serialization.py @@ -158,6 +158,11 @@ class TestRuntimeSerialized(unittest.TestCase): self.s = Serializer() return super(TestRuntimeSerialized, self).setUp() + def test_validate(self): + # Assert not necessary, should not raise exception + self.s.validate("simplestring", "StringForLog", pattern="^[a-z]+$") + self.s.validate(u"UTF8ééééé", "StringForLog", pattern=r"^[\w]+$") + def test_obj_serialize_none(self): """Test that serialize None in object is still None. """ @@ -562,51 +567,56 @@ class TestRuntimeSerialized(unittest.TestCase): _attribute_map = { "animals":{"key":"Animals", "type":"[Animal]"}, - } + } - def __init__(self): - self.animals = None + def __init__(self, animals=None): + self.animals = animals class Animal(Model): _attribute_map = { - "name":{"key":"Name", "type":"str"} - } + "name":{"key":"Name", "type":"str"}, + "d_type":{"key":"dType", "type":"str"} + } _subtype_map = { - 'dType': {"cat":"Cat", "dog":"Dog"} - } + 'd_type': {"cat":"Cat", "dog":"Dog"} + } - def __init__(self): - self.name = None + def __init__(self, name=None): + self.name = name class Dog(Animal): _attribute_map = { "name":{"key":"Name", "type":"str"}, - "likes_dog_food":{"key":"likesDogFood","type":"bool"} + "likes_dog_food":{"key":"likesDogFood","type":"bool"}, + "d_type":{"key":"dType", "type":"str"} } - def __init__(self): - self.likes_dog_food = None - super(Dog, self).__init__() + def __init__(self, name=None, likes_dog_food=None): + self.likes_dog_food = likes_dog_food + super(Dog, self).__init__(name) + self.d_type = 'dog' class Cat(Animal): _attribute_map = { "name":{"key":"Name", "type":"str"}, "likes_mice":{"key":"likesMice","type":"bool"}, - "dislikes":{"key":"dislikes","type":"Animal"} + "dislikes":{"key":"dislikes","type":"Animal"}, + "d_type":{"key":"dType", "type":"str"} } _subtype_map = { - "dType":{"siamese":"Siamese"} + "d_type":{"siamese":"Siamese"} } - def __init__(self): - self.likes_mice = None - self.dislikes = None - super(Cat, self).__init__() + def __init__(self, name=None, likes_mice=None, dislikes = None): + self.likes_mice = likes_mice + self.dislikes = dislikes + super(Cat, self).__init__(name) + self.d_type = 'cat' class Siamese(Cat): @@ -614,12 +624,14 @@ class TestRuntimeSerialized(unittest.TestCase): "name":{"key":"Name", "type":"str"}, "likes_mice":{"key":"likesMice","type":"bool"}, "dislikes":{"key":"dislikes","type":"Animal"}, - "color":{"key":"Color", "type":"str"} + "color":{"key":"Color", "type":"str"}, + "d_type":{"key":"dType", "type":"str"} } - def __init__(self): - self.color = None - super(Siamese, self).__init__() + def __init__(self, name=None, likes_mice=None, dislikes = None, color=None): + self.color = color + super(Siamese, self).__init__(name, likes_mice, dislikes) + self.d_type = 'siamese' message = { "Animals": [ @@ -669,6 +681,40 @@ class TestRuntimeSerialized(unittest.TestCase): serialized = self.s._serialize(zoo) self.assertEqual(serialized, message) + old_dependencies = self.s.dependencies + self.s.dependencies = { + 'Zoo': Zoo, + 'Animal': Animal, + 'Dog': Dog, + 'Cat': Cat, + 'Siamese': Siamese + } + + serialized = self.s.body({ + "animals": [{ + "dType": "dog", + "likes_dog_food": True, + "name": "Fido" + },{ + "dType": "cat", + "likes_mice": False, + "dislikes": { + "dType": "dog", + "likes_dog_food": True, + "name": "Angry" + }, + "name": "Felix" + },{ + "dType": "siamese", + "color": "grey", + "likes_mice": True, + "name": "Finch" + }] + }, "Zoo") + self.assertEqual(serialized, message) + + self.s.dependencies = old_dependencies + class TestRuntimeDeserialized(unittest.TestCase): @@ -1100,48 +1146,72 @@ class TestRuntimeDeserialized(unittest.TestCase): _attribute_map = { "animals":{"key":"Animals", "type":"[Animal]"}, - } + } + + def __init__(self, animals=None): + self.animals = animals class Animal(Model): _attribute_map = { - "name":{"key":"Name", "type":"str"} - } - - _test_attr = 123 + "name":{"key":"Name", "type":"str"}, + "d_type":{"key":"dType", "type":"str"} + } _subtype_map = { - 'dType': {"cat":"Cat", "dog":"Dog"} - } + 'd_type': {"cat":"Cat", "dog":"Dog"} + } + + def __init__(self, name=None): + self.name = name class Dog(Animal): _attribute_map = { "name":{"key":"Name", "type":"str"}, - "likes_dog_food":{"key":"likesDogFood","type":"bool"} + "likes_dog_food":{"key":"likesDogFood","type":"bool"}, + "d_type":{"key":"dType", "type":"str"} } + def __init__(self, name=None, likes_dog_food=None): + self.likes_dog_food = likes_dog_food + super(Dog, self).__init__(name) + self.d_type = 'dog' + class Cat(Animal): _attribute_map = { "name":{"key":"Name", "type":"str"}, "likes_mice":{"key":"likesMice","type":"bool"}, - "dislikes":{"key":"dislikes","type":"Animal"} + "dislikes":{"key":"dislikes","type":"Animal"}, + "d_type":{"key":"dType", "type":"str"} } _subtype_map = { - "dType":{"siamese":"Siamese"} + "d_type":{"siamese":"Siamese"} } + def __init__(self, name=None, likes_mice=None, dislikes = None): + self.likes_mice = likes_mice + self.dislikes = dislikes + super(Cat, self).__init__(name) + self.d_type = 'cat' + class Siamese(Cat): _attribute_map = { "name":{"key":"Name", "type":"str"}, "likes_mice":{"key":"likesMice","type":"bool"}, "dislikes":{"key":"dislikes","type":"Animal"}, - "color":{"key":"Color", "type":"str"} + "color":{"key":"Color", "type":"str"}, + "d_type":{"key":"dType", "type":"str"} } + def __init__(self, name=None, likes_mice=None, dislikes = None, color=None): + self.color = color + super(Siamese, self).__init__(name, likes_mice, dislikes) + self.d_type = 'siamese' + message = { "Animals": [ { @@ -1188,5 +1258,49 @@ class TestRuntimeDeserialized(unittest.TestCase): self.assertEqual(animals[2].color, message['Animals'][2]["Color"]) self.assertTrue(animals[2].likes_mice) + def test_polymorphic_deserialization_with_escape(self): + + class Animal(Model): + + _attribute_map = { + "name":{"key":"Name", "type":"str"}, + "d_type":{"key":"odata\\.type", "type":"str"} + } + + _subtype_map = { + 'd_type': {"dog":"Dog"} + } + + def __init__(self, name=None): + self.name = name + + class Dog(Animal): + + _attribute_map = { + "name":{"key":"Name", "type":"str"}, + "likes_dog_food":{"key":"likesDogFood","type":"bool"}, + "d_type":{"key":"odata\\.type", "type":"str"} + } + + def __init__(self, name=None, likes_dog_food=None): + self.likes_dog_food = likes_dog_food + super(Dog, self).__init__(name) + self.d_type = 'dog' + + message = { + "odata.type": "dog", + "likesDogFood": True, + "Name": "Fido" + } + + self.d.dependencies = { + 'Animal':Animal, 'Dog':Dog} + + animal = self.d('Animal', message) + + self.assertIsInstance(animal, Dog) + self.assertTrue(animal.likes_dog_food) + + if __name__ == '__main__': unittest.main()
Support building instance from dict with polymorphic object Currently the SDKs can accept a dict instead of a model and transform it to the right model automatically. This is not available if there is at any level a polymorphic object. This should be possible looking at `_subtype_map` to identify the right instance type FYI @annatisch @vishrutshah
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/unittest_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "test/unittest_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "test/unittest_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization" ]
[ "test/unittest_serialization.py::TestModelDeserialization::test_response", "test/unittest_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "test/unittest_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "test/unittest_serialization.py::TestRuntimeDeserialized::test_attr_none", "test/unittest_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "test/unittest_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "test/unittest_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "test/unittest_serialization.py::TestRuntimeDeserialized::test_attr_bool", "test/unittest_serialization.py::TestRuntimeDeserialized::test_attr_str", "test/unittest_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "test/unittest_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "test/unittest_serialization.py::TestRuntimeDeserialized::test_attr_int", "test/unittest_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "test/unittest_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "test/unittest_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "test/unittest_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_int", "test/unittest_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "test/unittest_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "test/unittest_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map", "test/unittest_serialization.py::TestRuntimeSerialized::test_empty_list", "test/unittest_serialization.py::TestRuntimeSerialized::test_validate", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_str", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_bool", "test/unittest_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "test/unittest_serialization.py::TestRuntimeSerialized::test_serialize_object", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_none", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_sequence", "test/unittest_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "test/unittest_serialization.py::TestRuntimeSerialized::test_attr_list_complex" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2017-01-31T01:03:14Z"
mit
Azure__msrest-for-python-167
diff --git a/msrest/pipeline/universal.py b/msrest/pipeline/universal.py index f124ea7..b8dc40c 100644 --- a/msrest/pipeline/universal.py +++ b/msrest/pipeline/universal.py @@ -32,6 +32,7 @@ import os import xml.etree.ElementTree as ET import platform import codecs +import re from typing import Mapping, Any, Optional, AnyStr, Union, IO, cast, TYPE_CHECKING # pylint: disable=unused-import @@ -129,10 +130,9 @@ class HTTPLogger(SansIOHTTPPolicy): class RawDeserializer(SansIOHTTPPolicy): - JSON_MIMETYPES = [ - 'application/json', - 'text/json' # Because we're open minded people... - ] + # Accept "text" because we're open minded people... + JSON_REGEXP = re.compile(r'^(application|text)/([a-z+.]+\+)?json$') + # Name used in context CONTEXT_NAME = "deserialized_data" @@ -165,7 +165,7 @@ class RawDeserializer(SansIOHTTPPolicy): if content_type is None: return data - if content_type in cls.JSON_MIMETYPES: + if cls.JSON_REGEXP.match(content_type): try: return json.loads(data_as_str) except ValueError as err:
Azure/msrest-for-python
c4086bfac4e45b11b6bd4267cff62aa302a51877
diff --git a/tests/test_universal_pipeline.py b/tests/test_universal_pipeline.py index 2568e34..cd92ca7 100644 --- a/tests/test_universal_pipeline.py +++ b/tests/test_universal_pipeline.py @@ -151,6 +151,12 @@ def test_raw_deserializer(): result = response.context["deserialized_data"] assert result["success"] is True + # Simple JSON with complex content_type + response = build_response(b'{"success": true}', content_type="application/vnd.microsoft.appconfig.kv+json") + raw_deserializer.on_response(None, response, stream=False) + result = response.context["deserialized_data"] + assert result["success"] is True + # JSON with UTF-8 BOM response = build_response(b'\xef\xbb\xbf{"success": true}', content_type="application/json; charset=utf-8") raw_deserializer.on_response(None, response, stream=False)
Response content type json parser Currently the deserializer only supports a list of content type values like "application/json" and "text/json". Based on HTTP spec, "application/<randomtext>+json" is also json. So we should have a type parser to accept this syntax.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_universal_pipeline.py::test_raw_deserializer" ]
[ "tests/test_universal_pipeline.py::test_no_log", "tests/test_universal_pipeline.py::test_user_agent" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-07-05T19:26:19Z"
mit
Azure__msrest-for-python-188
diff --git a/msrest/exceptions.py b/msrest/exceptions.py index 31bedf0..dcd14d2 100644 --- a/msrest/exceptions.py +++ b/msrest/exceptions.py @@ -107,6 +107,13 @@ class ValidationError(ClientException): "type": "must be of type {!r}" } + @staticmethod + def _format_message(rule, reason, value): + if rule == "type" and value.startswith(r"{"): + internal_type = value.strip(r"{}") + value = "dict[str, {}]".format(internal_type) + return reason.format(value) + def __init__(self, rule, target, value, *args, **kwargs): # type: (str, str, str, str, str) -> None self.rule = rule @@ -114,7 +121,7 @@ class ValidationError(ClientException): message = "Parameter {!r} ".format(target) reason = self._messages.get( rule, "failed to meet validation requirement.") - message += reason.format(value) + message += self._format_message(rule, reason, value) super(ValidationError, self).__init__(message, *args, **kwargs)
Azure/msrest-for-python
4a0a44ae6d1a0d8a196a7809d54f2a52c2c27479
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 399d963..00d253c 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -709,6 +709,10 @@ class TestRuntimeSerialized(unittest.TestCase): with self.assertRaises(SerializationError): self.s._serialize(test_obj) + with pytest.raises(ValidationError) as err: + test_obj.validate() + assert "Parameter 'attr_e' must be of type 'dict[str, float]'" in str(err.value) + test_obj.attr_e = {"value": "NotAFloat"} with self.assertRaises(SerializationError): @@ -1290,7 +1294,7 @@ class TestRuntimeSerialized(unittest.TestCase): long_type = long except NameError: long_type = int - + class TestModel(Model): _attribute_map = {'data': {'key': 'data', 'type': 'object'}}
Improve msrest exception message msrest.exceptions : Parameter 'tags' must be of type '{str}'
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple" ]
[ "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_date", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_missing_info", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_array_deserialize", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_not_configured", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_with_auto_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_no_send", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_from_dict_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_json_with_xml_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_manual", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_int_as_iter_with_div", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestModelDeserialization::test_empty_enum_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs_logs" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-12-26T21:51:25Z"
mit
Azure__msrest-for-python-189
diff --git a/msrest/serialization.py b/msrest/serialization.py index 1da64ae..7abaf70 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -217,6 +217,9 @@ class Model(object): try: debug_name = "{}.{}".format(self.__class__.__name__, attr_name) + # https://github.com/Azure/msrest-for-python/issues/85 + if value is not None and attr_type in Serializer.basic_types.values(): + value = Serializer.serialize_basic(value, attr_type) Serializer.validate(value, debug_name, **self._validation.get(attr_name, {})) except ValidationError as validation_error: validation_result.append(validation_error) @@ -578,6 +581,14 @@ class Serializer(object): raise errors[0] return self._serialize(data, data_type, **kwargs) + def _http_component_validation(self, data, data_type, name, **kwargs): + if self.client_side_validation: + # https://github.com/Azure/msrest-for-python/issues/85 + if data is not None and data_type in self.basic_types.values(): + data = self.serialize_basic(data, data_type, **kwargs) + data = self.validate(data, name, required=True, **kwargs) + return data + def url(self, name, data, data_type, **kwargs): """Serialize data intended for a URL path. @@ -587,8 +598,7 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - if self.client_side_validation: - data = self.validate(data, name, required=True, **kwargs) + data = self._http_component_validation(data, data_type, name, **kwargs) try: output = self.serialize_data(data, data_type, **kwargs) if data_type == 'bool': @@ -612,8 +622,7 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - if self.client_side_validation: - data = self.validate(data, name, required=True, **kwargs) + data = self._http_component_validation(data, data_type, name, **kwargs) try: if data_type in ['[str]']: data = ["" if d is None else d for d in data] @@ -639,8 +648,7 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - if self.client_side_validation: - data = self.validate(data, name, required=True, **kwargs) + data = self._http_component_validation(data, data_type, name, **kwargs) try: if data_type in ['[str]']: data = ["" if d is None else d for d in data] @@ -713,14 +721,16 @@ class Serializer(object): else: return self._serialize(data, **kwargs) - def _get_custom_serializers(self, data_type, **kwargs): + @classmethod + def _get_custom_serializers(cls, data_type, **kwargs): custom_serializer = kwargs.get("basic_types_serializers", {}).get(data_type) if custom_serializer: return custom_serializer if kwargs.get("is_xml", False): - return self._xml_basic_types_serializers.get(data_type) + return cls._xml_basic_types_serializers.get(data_type) - def serialize_basic(self, data, data_type, **kwargs): + @classmethod + def serialize_basic(cls, data, data_type, **kwargs): """Serialize basic builting data type. Serializes objects to str, int, float or bool. @@ -731,14 +741,15 @@ class Serializer(object): :param data: Object to be serialized. :param str data_type: Type of object in the iterable. """ - custom_serializer = self._get_custom_serializers(data_type, **kwargs) + custom_serializer = cls._get_custom_serializers(data_type, **kwargs) if custom_serializer: return custom_serializer(data) if data_type == 'str': - return self.serialize_unicode(data) + return cls.serialize_unicode(data) return eval(data_type)(data) - def serialize_unicode(self, data): + @classmethod + def serialize_unicode(cls, data): """Special handling for serializing unicode strings in Py2. Encode to UTF-8 if unicode, otherwise handle as a str.
Azure/msrest-for-python
c2249d459e8af8912c5da4cd728c8201066b9304
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 00d253c..46df7f9 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -245,6 +245,32 @@ class TestRuntimeSerialized(unittest.TestCase): self.s = Serializer({'TestObj': self.TestObj}) return super(TestRuntimeSerialized, self).setUp() + def test_validation_type(self): + # https://github.com/Azure/msrest-for-python/issues/85 + s = Serializer() + + s.query("filter", 186, "int", maximum=666) + s.query("filter", "186", "int", maximum=666) + + class TestValidationObj(Model): + + _attribute_map = { + 'attr_a': {'key':'id', 'type':'int'}, + } + _validation = { + 'attr_a': {'maximum': 4294967295, 'minimum': 1}, + } + + + test_obj = TestValidationObj() + test_obj.attr_a = 186 + errors_found = test_obj.validate() + assert not errors_found + + test_obj.attr_a = '186' + errors_found = test_obj.validate() + assert not errors_found + def test_validation_flag(self): s = Serializer() s.client_side_validation = True
Validation failed if string is valid integer, but minimum/maximum is used. Example from @derekbekoe ```python >>> ExpressRouteCircuitPeering(peer_asn='10002').validate() [ValidationError("Parameter 'ExpressRouteCircuitPeering.peer_asn' failed to meet validation requirement.",)] ``` I see no reason to refuse that, and should take a look
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeSerialized::test_validation_type" ]
[ "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_date", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_not_configured", "tests/test_serialization.py::TestRuntimeDeserialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_missing_info", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeDeserialized::test_array_deserialize", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_no_send", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_json_with_xml_map", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_from_dict_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_with_auto_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_int_as_iter_with_div", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_manual", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs_logs", "tests/test_serialization.py::TestModelDeserialization::test_empty_enum_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-12-26T22:49:23Z"
mit
Azure__msrest-for-python-198
diff --git a/msrest/serialization.py b/msrest/serialization.py index 27fb7e9..59187e1 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -1108,6 +1108,11 @@ def rest_key_extractor(attr, attr_desc, data): break working_key = _decode_attribute_map_key(dict_keys[0]) working_data = working_data.get(working_key, data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None key = '.'.join(dict_keys[1:]) return working_data.get(key) @@ -1123,6 +1128,11 @@ def rest_key_case_insensitive_extractor(attr, attr_desc, data): break working_key = _decode_attribute_map_key(dict_keys[0]) working_data = attribute_key_case_insensitive_extractor(working_key, None, working_data) + if working_data is None: + # If at any point while following flatten JSON path see None, it means + # that all properties under are None as well + # https://github.com/Azure/msrest-for-python/issues/197 + return None key = '.'.join(dict_keys[1:]) if working_data:
Azure/msrest-for-python
df4cea0e45976951700e21d5c192da372754443c
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 4b3f537..12a0042 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -1527,6 +1527,46 @@ class TestRuntimeDeserialized(unittest.TestCase): obj = TestObj.from_dict({'name': 'ab'}) self.assertEqual('ab', obj.name) + def test_deserialize_flattening(self): + # https://github.com/Azure/msrest-for-python/issues/197 + + json_body = { + "properties" : { + "properties": None + } + } + + class ComputeResource(Model): + + _attribute_map = { + 'properties': {'key': 'properties', 'type': 'VirtualMachine'}, + } + + def __init__(self, properties=None, **kwargs): + self.properties = properties + + class VirtualMachine(Model): + + _attribute_map = { + 'virtual_machine_size': {'key': 'properties.virtualMachineSize', 'type': 'str'}, + 'ssh_port': {'key': 'properties.sshPort', 'type': 'int'}, + 'address': {'key': 'properties.address', 'type': 'str'}, + 'administrator_account': {'key': 'properties.administratorAccount', 'type': 'VirtualMachineSshCredentials'}, + } + + def __init__(self, **kwargs): + super(VirtualMachine, self).__init__(**kwargs) + self.virtual_machine_size = kwargs.get('virtual_machine_size', None) + self.ssh_port = kwargs.get('ssh_port', None) + self.address = kwargs.get('address', None) + self.administrator_account = kwargs.get('administrator_account', None) + + d = Deserializer({ + 'ComputeResource': ComputeResource, + 'VirtualMachine': VirtualMachine, + }) + response = d(ComputeResource, json.dumps(json_body), 'application/json') + def test_deserialize_storage(self): StorageAccount = storage_models.StorageAccount
Error when serializing flattened properties with null value as the container object i found the msrest will report error when serializing flattened properties with null value as the container object, could you help to check? I tried to look at the msrest code, and seems the situation when 'data' is null is not handled in the last screenshot below. thanks. ![image](https://user-images.githubusercontent.com/6038235/78621314-e6cf1980-78b4-11ea-9316-58bee4dbbb38.png) ![image](https://user-images.githubusercontent.com/6038235/78621324-ef275480-78b4-11ea-94b3-29b6350b7218.png) ![image](https://user-images.githubusercontent.com/6038235/78621335-f64e6280-78b4-11ea-972b-b062f01efeab.png)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_flattening" ]
[ "tests/test_serialization.py::TestModelDeserialization::test_empty_enum_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs_logs", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_manual", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_no_send", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_with_auto_model", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_json_with_xml_map", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_from_dict_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_int_as_iter_with_div", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_query", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_time", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_type", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_not_configured", "tests/test_serialization.py::TestRuntimeDeserialized::test_array_deserialize", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_date", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_time", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_missing_info", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media" ], "has_test_patch": true, "is_lite": false }
"2020-04-07T17:03:45Z"
mit
Azure__msrest-for-python-201
diff --git a/msrest/serialization.py b/msrest/serialization.py index 59187e1..0e65d8e 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -28,6 +28,7 @@ from base64 import b64decode, b64encode import calendar import datetime import decimal +import email from enum import Enum import json import logging @@ -78,6 +79,26 @@ class UTC(datetime.tzinfo): """No daylight saving for UTC.""" return datetime.timedelta(hours=1) +class _FixedOffset(datetime.tzinfo): + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param int offset: offset in minutes + """ + + def __init__(self, offset): + self.__offset = datetime.timedelta(minutes=offset) + + def utcoffset(self, dt): + return self.__offset + + def tzname(self, dt): + return str(self.__offset.total_seconds()/3600) + + def __repr__(self): + return "<FixedOffset {}>".format(self.tzname(None)) + + def dst(self, dt): + return datetime.timedelta(0) try: from datetime import timezone @@ -1814,10 +1835,13 @@ class Deserializer(object): if isinstance(attr, ET.Element): attr = attr.text try: - date_obj = datetime.datetime.strptime( - attr, "%a, %d %b %Y %H:%M:%S %Z") + parsed_date = email.utils.parsedate_tz(attr) + date_obj = datetime.datetime( + *parsed_date[:6], + tzinfo=_FixedOffset((parsed_date[9] or 0)/60) + ) if not date_obj.tzinfo: - date_obj = date_obj.replace(tzinfo=TZ_UTC) + date_obj = date_obj.astimezone(tz=TZ_UTC) except ValueError as err: msg = "Cannot deserialize to rfc datetime object." raise_with_traceback(DeserializationError, msg, err)
Azure/msrest-for-python
9e2e6529ac3cc91454a859c01775493872b20e92
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 12a0042..fffd8a9 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -1974,6 +1974,55 @@ class TestRuntimeDeserialized(unittest.TestCase): self.assertEqual(utc.tm_sec, 52) self.assertEqual(a.microsecond, 780000) + def test_deserialize_datetime_rfc(self): + + a = Deserializer.deserialize_rfc("Mon, 20 Nov 1995 19:12:08 -0500") + utc = a.utctimetuple() + + # UTC: 21 Nov, 00:12:08 + self.assertEqual(utc.tm_year, 1995) + self.assertEqual(utc.tm_mon, 11) + self.assertEqual(utc.tm_mday, 21) + self.assertEqual(utc.tm_hour, 0) + self.assertEqual(utc.tm_min, 12) + self.assertEqual(utc.tm_sec, 8) + self.assertEqual(a.microsecond, 0) + + a = Deserializer.deserialize_rfc("Mon, 20 Nov 1995 19:12:08 CDT") + utc = a.utctimetuple() + + # UTC: 21 Nov, 00:12:08 + self.assertEqual(utc.tm_year, 1995) + self.assertEqual(utc.tm_mon, 11) + self.assertEqual(utc.tm_mday, 21) + self.assertEqual(utc.tm_hour, 0) + self.assertEqual(utc.tm_min, 12) + self.assertEqual(utc.tm_sec, 8) + self.assertEqual(a.microsecond, 0) + + a = Deserializer.deserialize_rfc("Mon, 20 Nov 1995 19:12:08") + utc = a.utctimetuple() + + # UTC: No info is considered UTC + self.assertEqual(utc.tm_year, 1995) + self.assertEqual(utc.tm_mon, 11) + self.assertEqual(utc.tm_mday, 20) + self.assertEqual(utc.tm_hour, 19) + self.assertEqual(utc.tm_min, 12) + self.assertEqual(utc.tm_sec, 8) + self.assertEqual(a.microsecond, 0) + + a = Deserializer.deserialize_rfc("Mon, 20 Nov 1995 19:12:08 GMT") + utc = a.utctimetuple() + + self.assertEqual(utc.tm_year, 1995) + self.assertEqual(utc.tm_mon, 11) + self.assertEqual(utc.tm_mday, 20) + self.assertEqual(utc.tm_hour, 19) + self.assertEqual(utc.tm_min, 12) + self.assertEqual(utc.tm_sec, 8) + self.assertEqual(a.microsecond, 0) + def test_polymorphic_deserialization(self): class Zoo(Model):
Parsing RFC date assumes current local is English Example, trying to parse `Fri, 28 Feb 2020 19:04:06 GMT` with a spanish locale will fail, since "Fri" or "Feb" is not spanish. This is because this parser uses `strptime` which is local dependent. Python doesn't support configuration for locale. The only reliable way would be to stop using "strptime", since other solution like [this ](https://stackoverflow.com/a/24070673/4074838)relies on a thread lock and I don't like that.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime_rfc" ]
[ "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestModelDeserialization::test_empty_enum_logs", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_from_dict_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_time", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_query", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_int_as_iter_with_div", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_no_send", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_type", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_manual", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_with_auto_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_json_with_xml_map", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeSerialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_not_configured", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_missing_info", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_array_deserialize", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_time", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_date", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_declared" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2020-05-06T23:57:23Z"
mit
Azure__msrest-for-python-208
diff --git a/msrest/serialization.py b/msrest/serialization.py index b3519f1..a8abd9a 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -79,26 +79,32 @@ class UTC(datetime.tzinfo): """No daylight saving for UTC.""" return datetime.timedelta(hours=1) -class _FixedOffset(datetime.tzinfo): - """Fixed offset in minutes east from UTC. - Copy/pasted from Python doc - :param int offset: offset in minutes - """ +try: + from datetime import timezone as _FixedOffset +except ImportError: # Python 2.7 + class _FixedOffset(datetime.tzinfo): # type: ignore + """Fixed offset in minutes east from UTC. + Copy/pasted from Python doc + :param datetime.timedelta offset: offset in timedelta format + """ - def __init__(self, offset): - self.__offset = datetime.timedelta(minutes=offset) + def __init__(self, offset): + self.__offset = offset - def utcoffset(self, dt): - return self.__offset + def utcoffset(self, dt): + return self.__offset - def tzname(self, dt): - return str(self.__offset.total_seconds()/3600) + def tzname(self, dt): + return str(self.__offset.total_seconds()/3600) - def __repr__(self): - return "<FixedOffset {}>".format(self.tzname(None)) + def __repr__(self): + return "<FixedOffset {}>".format(self.tzname(None)) - def dst(self, dt): - return datetime.timedelta(0) + def dst(self, dt): + return datetime.timedelta(0) + + def __getinitargs__(self): + return (self.__offset,) try: from datetime import timezone @@ -1868,7 +1874,7 @@ class Deserializer(object): parsed_date = email.utils.parsedate_tz(attr) date_obj = datetime.datetime( *parsed_date[:6], - tzinfo=_FixedOffset((parsed_date[9] or 0)/60) + tzinfo=_FixedOffset(datetime.timedelta(minutes=(parsed_date[9] or 0)/60)) ) if not date_obj.tzinfo: date_obj = date_obj.astimezone(tz=TZ_UTC)
Azure/msrest-for-python
053469458820db6a33b2cb55b3f9a5e55a2f9716
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 5991753..cb41372 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -29,6 +29,7 @@ import sys import json import isodate import logging +import pickle from enum import Enum from datetime import datetime, timedelta, date, time import unittest @@ -2058,6 +2059,20 @@ class TestRuntimeDeserialized(unittest.TestCase): self.assertEqual(utc.tm_sec, 8) self.assertEqual(a.microsecond, 0) + def test_rfc_pickable(self): + """Check datetime created by RFC parser are pickable. + + See https://github.com/Azure/msrest-for-python/issues/205 + """ + + datetime_rfc = "Mon, 25 May 2020 11:00:00 GMT" + datetime1 = Deserializer.deserialize_rfc(datetime_rfc) + + pickled = pickle.dumps(datetime1) + datetime2 = pickle.loads(pickled) + + assert datetime1 == datetime2 + def test_polymorphic_deserialization(self): class Zoo(Model):
datetime returned by Deserializer.deserialize_rfc() is not picklable ### Repro ```python import pickle from msrest.serialization import Deserializer datetime_rfc = "Mon, 25 May 2020 11:00:00 GMT" datetime1 = Deserializer.deserialize_rfc(datetime_rfc) print("datetime1: %s" % datetime1) pickled = pickle.dumps(datetime1) datetime2 = pickle.loads(pickled) print("datetime2: %s" % datetime2) ``` ### Output (msrest 0.6.13) ``` datetime1: 2020-05-25 11:00:00+00:00 datetime2: 2020-05-25 11:00:00+00:00 ``` ### Output (msrest 0.6.14) ``` datetime1: 2020-05-25 11:00:00+00:00 Traceback (most recent call last): File "d:\__temp\repro\main.py", line 8, in <module> datetime2 = pickle.loads(pickled) TypeError: __init__() missing 1 required positional argument: 'offset' ``` ### Details This regression was introduced in https://github.com/Azure/msrest-for-python/pull/201. After that change, in the example above `timedate1` is not picklable because `timedate1.tzinfo` contains an instance of [`_FixedOffset`](https://github.com/Azure/msrest-for-python/blob/v0.6.14/msrest/serialization.py#L82,L101) which is not picklable itself. `pickle.dumps(datetime1)` invokes `timedate1.tzinfo.__reduce__()`. `_FixedOffset` class doesn't define the [`__reduce__()`](https://docs.python.org/3/library/pickle.html#object.__reduce__) method and the implementation from its parent class is used. [`tzinfo.__reduce__()`](https://github.com/python/cpython/blob/v3.8.3/Lib/datetime.py#L1193,L1207) assumes that the class implements `__getinitargs__()` method. This is true for `datetime.timezone`, but not for `_FixedOffset`. Eventually, `pickle.loads(pickled)` tries to call `_FixedOffset.__init__()` without the required `offset` argument, resulting in a `TypeError`. In practice the issue happens when trying to pickle/unpickle any object containing a `datetime` generated by `Deserializer.deserialize_rfc()`, e.g. with [`multiprocessing`](https://docs.python.org/3/library/multiprocessing.html). ### Potential solutions 1. Implement `_FixedOffset.__getinitargs__()`. 2. Implement `_FixedOffset.__reduce__()`. 2. Make `_FixedOffset` use the default implementation of `__reduce__()`, instead of one inherited from `datetime.tzinfo`: `__reduce__ = object.__reduce__` Once Python 2.7 compatibility is no longer required, `datetime.timezone` can be used instead of `_FixedOffset`.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeDeserialized::test_rfc_pickable" ]
[ "tests/test_serialization.py::TestModelDeserialization::test_empty_enum_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs_logs", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_manual", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_no_send", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_with_auto_model", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_json_with_xml_map", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_from_dict_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_int_as_iter_with_div", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_query", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_time", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_type", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_not_configured", "tests/test_serialization.py::TestRuntimeDeserialized::test_array_deserialize", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_date", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime_rfc", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_time", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_missing_info", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_twice_key_scenario", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality" ]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2020-06-02T20:55:10Z"
mit
Azure__msrest-for-python-222
diff --git a/README.rst b/README.rst index 94950c2..ebce1fb 100644 --- a/README.rst +++ b/README.rst @@ -26,6 +26,7 @@ Release History **Bugfixes** - Fix serialization of random Model object #220 +- Fix serialization of unicode string in Py2 and object mode #221 2020-07-27 Version 0.6.18 diff --git a/msrest/serialization.py b/msrest/serialization.py index ab7a28f..f9037b9 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -951,6 +951,8 @@ class Serializer(object): return self.serialize_basic(attr, self.basic_types[obj_type], **kwargs) if obj_type is _long_type: return self.serialize_long(attr) + if obj_type is unicode_str: + return self.serialize_unicode(attr) # If it's a model or I know this dependency, serialize as a Model elif obj_type in self.dependencies.values() or isinstance(attr, Model):
Azure/msrest-for-python
118735008cab12c4cb2e2d24594d3bd0786b546f
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 99d3980..ab04cfd 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -1366,6 +1366,8 @@ class TestRuntimeSerialized(unittest.TestCase): except NameError: long_type = int + s = Serializer() + assert s.serialize_data(long_type(1), 'object') == long_type(1) class TestModel(Model): _attribute_map = {'data': {'key': 'data', 'type': 'object'}} @@ -1376,6 +1378,23 @@ class TestRuntimeSerialized(unittest.TestCase): 'data': {'id': long_type(1)} } + def test_unicode_as_type_object(self): + """Test irrelevant on Python 3. But still doing it to test regresssion. + https://github.com/Azure/msrest-for-python/issue/221 + """ + + s = Serializer() + assert s.serialize_data(u"\ua015", 'object') == u"\ua015" + + class TestModel(Model): + _attribute_map = {'data': {'key': 'data', 'type': 'object'}} + + m = TestModel(data = {'id': u"\ua015"}) + serialized = m.serialize() + assert serialized == { + 'data': {'id': u"\ua015"} + } + def test_json_with_xml_map(self): basic_json = {'age': 37, 'country': 'france'}
Unable to serialize unicode string as type object on Python 2.7 ``` from msrest.serialization import Serializer s=Serializer() s._serialize(u"\ua015", 'object') ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration" ]
[ "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_not_configured", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_time", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_rfc_pickable", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_array_deserialize", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_date", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime_rfc", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_missing_info", "tests/test_serialization.py::TestRuntimeDeserialized::test_twice_key_scenario", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_time", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_custom_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_manual", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_query", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_type", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_with_auto_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_from_dict_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_unicode_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_int_as_iter_with_div", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_json_with_xml_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_no_send", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestModelDeserialization::test_empty_enum_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs_logs", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2020-09-04T22:52:27Z"
mit
Azure__msrest-for-python-224
diff --git a/msrest/serialization.py b/msrest/serialization.py index f9037b9..378de20 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -953,6 +953,16 @@ class Serializer(object): return self.serialize_long(attr) if obj_type is unicode_str: return self.serialize_unicode(attr) + if obj_type is datetime.datetime: + return self.serialize_iso(attr) + if obj_type is datetime.date: + return self.serialize_date(attr) + if obj_type is datetime.time: + return self.serialize_time(attr) + if obj_type is datetime.timedelta: + return self.serialize_duration(attr) + if obj_type is decimal.Decimal: + return self.serialize_decimal(attr) # If it's a model or I know this dependency, serialize as a Model elif obj_type in self.dependencies.values() or isinstance(attr, Model):
Azure/msrest-for-python
c16e5218fe99742c5bf93d73ce0bb71c9b1c0953
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index ab04cfd..6837bad 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -25,6 +25,7 @@ # #-------------------------------------------------------------------------- +from decimal import Decimal import sys import json import isodate @@ -1395,6 +1396,42 @@ class TestRuntimeSerialized(unittest.TestCase): 'data': {'id': u"\ua015"} } + def test_datetime_types_as_type_object(self): + """https://github.com/Azure/msrest-for-python/issues/223 + """ + + class TestModel(Model): + _attribute_map = {'data': {'key': 'data', 'type': 'object'}} + + m = TestModel(data = { + 'datetime': isodate.parse_datetime('2012-02-24T00:53:52.780Z'), + 'date': date(2019,5,1), + 'time': time(11,12,13), + 'timedelta': timedelta(56) + }) + serialized = m.serialize() + assert serialized['data'] == { + 'datetime': '2012-02-24T00:53:52.780Z', + 'date': '2019-05-01', + 'time': '11:12:13', + 'timedelta': 'P56D' + } + + def test_decimal_types_as_type_object(self): + """https://github.com/Azure/msrest-for-python/issues/223 + """ + + class TestModel(Model): + _attribute_map = {'data': {'key': 'data', 'type': 'object'}} + + m = TestModel(data = { + 'decimal': Decimal('1.1'), + }) + serialized = m.serialize() + assert serialized['data'] == { + 'decimal': 1.1 + } + def test_json_with_xml_map(self): basic_json = {'age': 37, 'country': 'france'}
Serialization for non-standard types yields incorrect output Hi, I am using the event grid client SDK in python to generate custom events. I have come across an issue I can't seem to solve without going away from the event grid SDK. The problem is that the event grid model serializer does not give me the correct output for when including types that are not the basic types. A simple reproducible example: ``` from azure.eventgrid.models import EventGridEvent import datetime import uuid event=EventGridEvent( topic="test", id=uuid.uuid4(), subject="testUpdated", data={"time":datetime.datetime.now().replace(tzinfo=datetime.timezone.utc)}, event_type="test.test", event_time=datetime.datetime.now().replace(tzinfo=datetime.timezone.utc), data_version=2.0, ) print(event.serialize()) ``` This would return ` {'id': '3e02a22c-f327-4f62-af25-b71e0865888b', 'topic': 'product', 'subject': 'ProductUpdated', 'data': {'time': '2020-09-07 10:37:08.348679+00:00'}, 'eventType': 'supplychain.product', 'eventTime': '2020-09-07T10:37:08.348679Z', 'dataVersion': '2.0'} ` the serialize is not called by me in the code I actually use, but it looks like that is what is called behind the scenes when I send off the event. I want the datetime (the key "time" in the above example) to be serialized just as the parent-level "event_time". My problem is not only with datetime as in the current example, but also with decimals. I guess this fits in this repo and not in the EventGrid SDK repo, but feel free to redirect me there.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeSerialized::test_datetime_types_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_decimal_types_as_type_object" ]
[ "tests/test_serialization.py::TestRuntimeDeserialized::test_rfc_pickable", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_time", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_missing_info", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime_rfc", "tests/test_serialization.py::TestRuntimeDeserialized::test_array_deserialize", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_twice_key_scenario", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_not_configured", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_date", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_additional_properties_flattening", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_long_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_int_as_iter_with_div", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_manual", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_json_with_xml_map", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_no_send", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_from_dict_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_query", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_type", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_custom_model", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_with_auto_model", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties_declared", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_time", "tests/test_serialization.py::TestRuntimeSerialized::test_additional_properties", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_unicode_as_type_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestModelDeserialization::test_empty_enum_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs_logs", "tests/test_serialization.py::TestModelDeserialization::test_model_kwargs" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2020-09-21T23:26:48Z"
mit
Azure__msrest-for-python-28
diff --git a/msrest/exceptions.py b/msrest/exceptions.py index 85668a6..6ac7dcc 100644 --- a/msrest/exceptions.py +++ b/msrest/exceptions.py @@ -113,7 +113,7 @@ class AuthenticationError(ClientException): class HttpOperationError(ClientException): - """Client request failed due to server-specificed HTTP operation error. + """Client request failed due to server-specified HTTP operation error. Attempts to deserialize response into specific error object. :param Deserializer deserialize: Deserializer with data on custom diff --git a/msrest/service_client.py b/msrest/service_client.py index a204089..ac70211 100644 --- a/msrest/service_client.py +++ b/msrest/service_client.py @@ -138,7 +138,7 @@ class ServiceClient(object): return kwargs - def send_formdata(self, request, headers={}, content={}, **config): + def send_formdata(self, request, headers=None, content=None, **config): """Send data as a multipart form-data request. We only deal with file-like objects or strings at this point. The requests is not yet streamed. @@ -148,11 +148,11 @@ class ServiceClient(object): :param dict content: Dictionary of the fields of the formdata. :param config: Any specific config overrides. """ + if content is None: + content = {} file_data = {f: self._format_data(d) for f, d in content.items()} - try: - del headers['Content-Type'] - except KeyError: - pass + if headers: + headers.pop('Content-Type', None) return self.send(request, headers, None, files=file_data, **config) def send(self, request, headers=None, content=None, **config): @@ -290,7 +290,7 @@ class ServiceClient(object): """ self._headers[header] = value - def get(self, url=None, params={}): + def get(self, url=None, params=None): """Create a GET request object. :param str url: The request URL. @@ -300,7 +300,7 @@ class ServiceClient(object): request.method = 'GET' return request - def put(self, url=None, params={}): + def put(self, url=None, params=None): """Create a PUT request object. :param str url: The request URL. @@ -310,7 +310,7 @@ class ServiceClient(object): request.method = 'PUT' return request - def post(self, url=None, params={}): + def post(self, url=None, params=None): """Create a POST request object. :param str url: The request URL. @@ -320,7 +320,7 @@ class ServiceClient(object): request.method = 'POST' return request - def head(self, url=None, params={}): + def head(self, url=None, params=None): """Create a HEAD request object. :param str url: The request URL. @@ -330,7 +330,7 @@ class ServiceClient(object): request.method = 'HEAD' return request - def patch(self, url=None, params={}): + def patch(self, url=None, params=None): """Create a PATCH request object. :param str url: The request URL. @@ -340,7 +340,7 @@ class ServiceClient(object): request.method = 'PATCH' return request - def delete(self, url=None, params={}): + def delete(self, url=None, params=None): """Create a DELETE request object. :param str url: The request URL. @@ -350,7 +350,7 @@ class ServiceClient(object): request.method = 'DELETE' return request - def merge(self, url=None, params={}): + def merge(self, url=None, params=None): """Create a MERGE request object. :param str url: The request URL.
Azure/msrest-for-python
02b1e35c7cfb045bd4752abe800ad6912282eb6e
diff --git a/test/unittest_client.py b/test/unittest_client.py index c22b13e..4a193ee 100644 --- a/test/unittest_client.py +++ b/test/unittest_client.py @@ -201,7 +201,7 @@ class TestServiceClient(unittest.TestCase): mock_client._format_data.return_value = "formatted" request = ClientRequest('GET') ServiceClient.send_formdata(mock_client, request) - mock_client.send.assert_called_with(request, {}, None, files={}) + mock_client.send.assert_called_with(request, None, None, files={}) ServiceClient.send_formdata(mock_client, request, {'id':'1234'}, {'Test':'Data'}) mock_client.send.assert_called_with(request, {'id':'1234'}, None, files={'Test':'formatted'})
Default argument value is mutable Hi, Just want to confirm, I noticed there are some methods in `ServiceClient` contain a mutable default argument, for instance: `def get(self, url=None, params={}): """Create a GET request object. :param str url: The request URL. :param dict params: Request URL parameters. """ request = self._request(url, params) request.method = 'GET' return request` And the default argument `params` is changed in`self._request(url, params)`. Is that as design? I just think it's a little wired. Thanks!
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/unittest_client.py::TestServiceClient::test_client_formdata_send" ]
[ "test/unittest_client.py::TestServiceClient::test_format_url", "test/unittest_client.py::TestServiceClient::test_client_header", "test/unittest_client.py::TestServiceClient::test_client_send", "test/unittest_client.py::TestServiceClient::test_client_request", "test/unittest_client.py::TestServiceClient::test_format_data", "test/unittest_client.py::TestServiceClient::test_client_add_hook" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2017-04-03T22:04:40Z"
mit
Azure__msrest-for-python-43
diff --git a/msrest/serialization.py b/msrest/serialization.py index 6eb8ec9..063f2e6 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -918,6 +918,9 @@ class Deserializer(object): '[]': self.deserialize_iter, '{}': self.deserialize_dict } + self.deserialize_expected_types = { + 'duration': (isodate.Duration, datetime.timedelta) + } self.dependencies = dict(classes) if classes else {} self.key_extractors = [ rest_key_extractor @@ -1080,6 +1083,8 @@ class Deserializer(object): if data_type in self.basic_types.values(): return self.deserialize_basic(data, data_type) if data_type in self.deserialize_type: + if isinstance(data, self.deserialize_expected_types.get(data_type, tuple())): + return data data_val = self.deserialize_type[data_type](data) return data_val
Azure/msrest-for-python
11f19f936f2d2d912782c7280f02f01ed89baf47
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index f70dcbd..787a086 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -138,7 +138,6 @@ class TestRuntimeSerialized(unittest.TestCase): class TestObj(Model): - _validation = {} _attribute_map = { 'attr_a': {'key':'id', 'type':'str'}, 'attr_b': {'key':'AttrB', 'type':'int'}, @@ -147,23 +146,30 @@ class TestRuntimeSerialized(unittest.TestCase): 'attr_e': {'key':'AttrE', 'type': '{float}'}, 'attr_f': {'key':'AttrF', 'type': 'duration'}, 'attr_g': {'key':'properties.AttrG', 'type':'str'}, - } - - def __init__(self): + } - self.attr_a = None - self.attr_b = None - self.attr_c = None - self.attr_d = None - self.attr_e = None - self.attr_f = None - self.attr_g = None + def __init__(self, + attr_a=None, + attr_b=None, + attr_c=None, + attr_d=None, + attr_e=None, + attr_f=None, + attr_g=None): + + self.attr_a = attr_a + self.attr_b = attr_b + self.attr_c = attr_c + self.attr_d = attr_d + self.attr_e = attr_e + self.attr_f = attr_f + self.attr_g = attr_g def __str__(self): return "Test_Object" def setUp(self): - self.s = Serializer() + self.s = Serializer({'TestObj': self.TestObj}) return super(TestRuntimeSerialized, self).setUp() def test_serialize_direct_model(self): @@ -496,6 +502,14 @@ class TestRuntimeSerialized(unittest.TestCase): message = self.s._serialize(test_obj) self.assertEquals("P1D", message["AttrF"]) + test_obj = self.TestObj() + test_obj.attr_f = isodate.parse_duration("P3Y6M4DT12H30M5S") + + message = self.s.body({ + "attr_f": isodate.parse_duration("P3Y6M4DT12H30M5S")}, + 'TestObj') + self.assertEquals("P3Y6M4DT12H30M5S", message["AttrF"]) + def test_attr_list_simple(self): """ Test serializing an object with simple-typed list attributes @@ -657,8 +671,8 @@ class TestRuntimeSerialized(unittest.TestCase): g = self.s.body({"test":{"value":"data"}}, 'object') self.assertEqual(g, {"test":{"value":"data"}}) - h = self.s.serialize_data({"test":self.TestObj()}, 'object') - self.assertEqual(h, {"test":"Test_Object"}) + h = self.s.serialize_data({"test":self.TestObj('id')}, 'object') + self.assertEqual(h, {"test":{'id': 'id'}}) i = self.s.serialize_data({"test":[1,2,3,4,5]}, 'object') self.assertEqual(i, {"test":[1,2,3,4,5]})
Serialization issue if dict syntax and Duration used ```python msrest.exceptions.SerializationError: Unable to build a model: Unable to deserialize response data. Data: 3 years, 6 months, 4 days, 12:30:05, duration, TypeError: Expecting a string isodate.duration.Duration(4, 45005, 0, years=3, months=6), DeserializationError: Unable to deserialize response data. Data: 3 years, 6 months, 4 days, 12:30:05, duration, TypeError: Expecting a string isodate.duration.Duration(4, 45005, 0, years=3, months=6) ``` Regression introduced in 0.4.12
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration" ]
[ "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestModelDeserialization::test_response" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2017-08-22T03:53:10Z"
mit
Azure__msrest-for-python-45
diff --git a/msrest/serialization.py b/msrest/serialization.py index 063f2e6..a3d50cd 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -1200,6 +1200,12 @@ class Deserializer(object): :param str data: response string to be deserialized. :rtype: str or unicode """ + # We might be here because we have an enum modeled as string, + # and we try to deserialize a partial dict with enum inside + if isinstance(data, Enum): + return data + + # Consider this is real string try: if isinstance(data, unicode): return data
Azure/msrest-for-python
07cec915d60e29193935dfca17d5e8a7afd0a3d4
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 787a086..10fb82f 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -353,10 +353,10 @@ class TestRuntimeSerialized(unittest.TestCase): class TestEnum(Enum): val = "Value" - t = test_obj + t = test_obj() t.abc = TestEnum.val - serialized = self.s._serialize(test_obj) + serialized = self.s._serialize(t) expected = { "ABC": "Value" } @@ -374,6 +374,31 @@ class TestRuntimeSerialized(unittest.TestCase): with self.assertRaises(SerializationError): serializer._serialize(t) + serializer = Serializer({ + 'TestEnumObj': test_obj, + 'TestEnum': TestEnum + }) + serialized = serializer.body({ + 'abc': TestEnum.val + }, 'TestEnumObj') + expected = { + 'ABC': 'Value' + } + self.assertEqual(expected, serialized) + + # model-as-string=True + test_obj._attribute_map = { + "abc":{"key":"ABC", "type":"str"} + } + serialized = serializer.body({ + 'abc': TestEnum.val + }, 'TestEnumObj') + expected = { + 'ABC': 'Value' + } + self.assertEqual(expected, serialized) + + def test_attr_none(self): """ Test serializing an object with None attributes.
v0.4.12 breaks mixed dict with enum if model-as-string=true This breaks: ``` python async_security_rule = self.network_client.security_rules.create_or_update( self.group_name, security_group_name, new_security_rule_name, { 'access':azure.mgmt.network.models.SecurityRuleAccess.allow, 'description':'New Test security rule', 'destination_address_prefix':'*', 'destination_port_range':'123-3500', 'direction':azure.mgmt.network.models.SecurityRuleDirection.outbound, 'priority':400, 'protocol':azure.mgmt.network.models.SecurityRuleProtocol.tcp, 'source_address_prefix':'*', 'source_port_range':'655', } ) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum" ]
[ "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestModelDeserialization::test_response" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2017-08-23T16:10:17Z"
mit
Azure__msrest-for-python-53
diff --git a/msrest/configuration.py b/msrest/configuration.py index 7b47b39..4b46b56 100644 --- a/msrest/configuration.py +++ b/msrest/configuration.py @@ -44,6 +44,19 @@ from .pipeline import ( from .version import msrest_version +def default_session_configuration_callback(session, global_config, local_config, **kwargs): + """Configuration callback if you need to change default session configuration. + + :param requests.Session session: The session. + :param Configuration global_config: The global configuration. + :param dict local_config: The on-the-fly configuration passed on the call. + :param dict kwargs: The current computed values for session.request method. + :return: Must return kwargs, to be passed to session.request. If None is return, initial kwargs will be used. + :rtype: dict + """ + return kwargs + + class Configuration(object): """Client configuration. @@ -79,6 +92,8 @@ class Configuration(object): # - kwargs['msrest']['session'] with the current session self.hooks = [] + self.session_configuration_callback = default_session_configuration_callback + self._config = configparser.ConfigParser() self._config.optionxform = str diff --git a/msrest/serialization.py b/msrest/serialization.py index 1902ca2..cc03063 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -350,6 +350,7 @@ class Serializer(object): } self.dependencies = dict(classes) if classes else {} self.key_transformer = full_restapi_key_transformer + self.client_side_validation = True def _serialize(self, target_obj, data_type=None, **kwargs): """Serialize data into a string according to type. @@ -440,9 +441,10 @@ class Serializer(object): raise_with_traceback( SerializationError, "Unable to build a model: "+str(err), err) - errors = _recursive_validate(data_type, data) - if errors: - raise errors[0] + if self.client_side_validation: + errors = _recursive_validate(data_type, data) + if errors: + raise errors[0] return self._serialize(data, data_type, **kwargs) def url(self, name, data, data_type, **kwargs): @@ -454,7 +456,8 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - data = self.validate(data, name, required=True, **kwargs) + if self.client_side_validation: + data = self.validate(data, name, required=True, **kwargs) try: output = self.serialize_data(data, data_type, **kwargs) if data_type == 'bool': @@ -478,7 +481,8 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - data = self.validate(data, name, required=True, **kwargs) + if self.client_side_validation: + data = self.validate(data, name, required=True, **kwargs) try: if data_type in ['[str]']: data = ["" if d is None else d for d in data] @@ -504,7 +508,8 @@ class Serializer(object): :raises: TypeError if serialization fails. :raises: ValueError if data is None """ - data = self.validate(data, name, required=True, **kwargs) + if self.client_side_validation: + data = self.validate(data, name, required=True, **kwargs) try: if data_type in ['[str]']: data = ["" if d is None else d for d in data] diff --git a/msrest/service_client.py b/msrest/service_client.py index d0e6fdb..eed50c5 100644 --- a/msrest/service_client.py +++ b/msrest/service_client.py @@ -145,6 +145,11 @@ class ServiceClient(object): for protocol in self._protocols: session.mount(protocol, requests.adapters.HTTPAdapter(max_retries=max_retries)) + + output_kwargs = self.config.session_configuration_callback(session, self.config, config, **kwargs) + if output_kwargs is not None: + kwargs = output_kwargs + return kwargs def send_formdata(self, request, headers=None, content=None, **config):
Azure/msrest-for-python
bad8585bcbe5f92f3b2c892c8b373ee367dff70f
diff --git a/tests/test_client.py b/tests/test_client.py index 8e73444..ee10d48 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -50,6 +50,22 @@ class TestServiceClient(unittest.TestCase): self.creds = mock.create_autospec(OAuthTokenAuthentication) return super(TestServiceClient, self).setUp() + def test_session_callback(self): + + client = ServiceClient(self.creds, self.cfg) + local_session = requests.Session() + + def callback(session, global_config, local_config, **kwargs): + self.assertIs(session, local_session) + self.assertIs(global_config, self.cfg) + self.assertTrue(local_config["test"]) + return {'used_callback': True} + + self.cfg.session_configuration_callback = callback + + output_kwargs = client._configure_session(local_session, **{"test": True}) + self.assertTrue(output_kwargs['used_callback']) + def test_client_request(self): client = ServiceClient(self.creds, self.cfg) diff --git a/tests/test_serialization.py b/tests/test_serialization.py index c6cf07d..4d6a80a 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -172,6 +172,32 @@ class TestRuntimeSerialized(unittest.TestCase): self.s = Serializer({'TestObj': self.TestObj}) return super(TestRuntimeSerialized, self).setUp() + def test_validation_flag(self): + s = Serializer() + s.client_side_validation = True + + with self.assertRaises(ValidationError): + s.query("filter", "", "str", min_length=666) + with self.assertRaises(ValidationError): + s.url("filter", "", "str", min_length=666) + with self.assertRaises(ValidationError): + s.header("filter", "", "str", min_length=666) + + test_obj = self.TestObj() + self.TestObj._validation = { + 'attr_b': {'required': True}, + } + test_obj.attr_b = None + + with self.assertRaises(ValidationError): + self.s.body(test_obj, 'TestObj') + + s.client_side_validation = False + s.query("filter", "", "str", min_length=666) + s.url("filter", "", "str", min_length=666) + s.header("filter", "", "str", min_length=666) + s.body(test_obj, 'TestObj') + def test_serialize_direct_model(self): testobj = self.TestObj() testobj.attr_a = "myid"
Add support to disable validation on the runtime side Should be a flag in configuration to disable runtime validation. This flag should be default to `client-side-validation` value from Autorest: https://github.com/Azure/autorest/issues/1583#issuecomment-311142690 FYI @matthchr
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_client.py::TestServiceClient::test_session_callback", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag" ]
[ "tests/test_client.py::TestServiceClient::test_client_formdata_send", "tests/test_client.py::TestServiceClient::test_client_header", "tests/test_client.py::TestServiceClient::test_format_url", "tests/test_client.py::TestServiceClient::test_client_request", "tests/test_client.py::TestServiceClient::test_client_send", "tests/test_client.py::TestServiceClient::test_format_data", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2017-09-28T17:58:23Z"
mit
Azure__msrest-for-python-54
diff --git a/msrest/serialization.py b/msrest/serialization.py index cc03063..61e811f 100644 --- a/msrest/serialization.py +++ b/msrest/serialization.py @@ -623,6 +623,8 @@ class Serializer(object): in the iterable into a combined string. Default is 'None'. :rtype: list, str """ + if isinstance(data, str): + raise SerializationError("Refuse str type as a valid iter type.") serialized = [] for d in data: try:
Azure/msrest-for-python
36172c1011c1a6b62eb57f7608ef571b71747a1a
diff --git a/tests/test_serialization.py b/tests/test_serialization.py index 4d6a80a..39f2878 100644 --- a/tests/test_serialization.py +++ b/tests/test_serialization.py @@ -736,6 +736,10 @@ class TestRuntimeSerialized(unittest.TestCase): b = self.s.serialize_iter([], 'int') self.assertEqual(b, []) + def test_serialize_str_as_iter(self): + with self.assertRaises(SerializationError): + self.s.serialize_iter("I am a string", 'str') + def test_serialize_json_obj(self): class ComplexId(Model):
Should refuse a string as a valid list of string Ends up in the portal as ['a','b','c'] if we use `Model('abc')` instead of `Model(['abc'])`. Should fail, accepting a string for a list of string is likely an error and not a feature. See https://github.com/Azure/azure-sdk-for-python/issues/1376#issuecomment-323409463
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_str_as_iter" ]
[ "tests/test_serialization.py::TestModelDeserialization::test_response", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_dict_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_duration", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_enum", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_int", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_none", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_sequence", "tests/test_serialization.py::TestRuntimeSerialized::test_attr_str", "tests/test_serialization.py::TestRuntimeSerialized::test_empty_list", "tests/test_serialization.py::TestRuntimeSerialized::test_key_type", "tests/test_serialization.py::TestRuntimeSerialized::test_model_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_serialize_none", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_with_mismatched_map", "tests/test_serialization.py::TestRuntimeSerialized::test_obj_without_attr_map", "tests/test_serialization.py::TestRuntimeSerialized::test_polymorphic_serialization", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_datetime", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_direct_model", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_empty_iter", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_json_obj", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_object", "tests/test_serialization.py::TestRuntimeSerialized::test_serialize_primitive_types", "tests/test_serialization.py::TestRuntimeSerialized::test_validate", "tests/test_serialization.py::TestRuntimeSerialized::test_validation_flag", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_bool", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_int", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_complex", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_in_list", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_list_simple", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_none", "tests/test_serialization.py::TestRuntimeDeserialized::test_attr_str", "tests/test_serialization.py::TestRuntimeDeserialized::test_basic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_cls_method_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_datetime", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_object", "tests/test_serialization.py::TestRuntimeDeserialized::test_deserialize_storage", "tests/test_serialization.py::TestRuntimeDeserialized::test_invalid_json", "tests/test_serialization.py::TestRuntimeDeserialized::test_non_obj_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_malformed_map", "tests/test_serialization.py::TestRuntimeDeserialized::test_obj_with_no_attr", "tests/test_serialization.py::TestRuntimeDeserialized::test_personalize_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization", "tests/test_serialization.py::TestRuntimeDeserialized::test_polymorphic_deserialization_with_escape", "tests/test_serialization.py::TestRuntimeDeserialized::test_robust_deserialization", "tests/test_serialization.py::TestModelInstanceEquality::test_model_instance_equality" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2017-09-28T18:30:24Z"
mit
Azure__msrest-for-python-55
diff --git a/msrest/exceptions.py b/msrest/exceptions.py index 6ac7dcc..3b8788e 100644 --- a/msrest/exceptions.py +++ b/msrest/exceptions.py @@ -136,7 +136,13 @@ class HttpOperationError(ClientException): self.error = deserialize(resp_type, response) if self.error is None: self.error = deserialize.dependencies[resp_type]() - self.message = self.error.message + # ARM uses OData v4 + # http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + # Code and Message are REQUIRED + self.message = "({}) {}".format( + self.error.error.code, + self.error.error.message + ) except (DeserializationError, AttributeError, KeyError): pass
Azure/msrest-for-python
bb876c23427448d293c793e16d415971eb1753bc
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py new file mode 100644 index 0000000..2995593 --- /dev/null +++ b/tests/test_exceptions.py @@ -0,0 +1,92 @@ +#-------------------------------------------------------------------------- +# +# Copyright (c) Microsoft Corporation. All rights reserved. +# +# The MIT License (MIT) +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the ""Software""), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +# +#-------------------------------------------------------------------------- +import json +import unittest +try: + from unittest import mock +except ImportError: + import mock + +import requests + +from msrest.serialization import Model, Deserializer +from msrest.exceptions import HttpOperationError + + +class TestExceptions(unittest.TestCase): + + def test_custom_exception(self): + + class ErrorResponse(Model): + _attribute_map = { + 'error': {'key': 'error', 'type': 'ErrorDetails'}, + } + def __init__(self, error=None): + self.error = error + + + class ErrorResponseException(HttpOperationError): + def __init__(self, deserialize, response, *args): + super(ErrorResponseException, self).__init__(deserialize, response, 'ErrorResponse', *args) + + class ErrorDetails(Model): + _validation = { + 'code': {'readonly': True}, + 'message': {'readonly': True}, + 'target': {'readonly': True}, + } + + _attribute_map = { + 'code': {'key': 'code', 'type': 'str'}, + 'message': {'key': 'message', 'type': 'str'}, + 'target': {'key': 'target', 'type': 'str'}, + } + + def __init__(self): + self.code = None + self.message = None + self.target = None + + deserializer = Deserializer({ + 'ErrorResponse': ErrorResponse, + 'ErrorDetails': ErrorDetails + }) + + response = mock.create_autospec(requests.Response) + response.text = json.dumps( + { + "error": { + "code": "NotOptedIn", + "message": "You are not allowed to download invoices. Please contact your account administrator (ptvsazure@outlook.com) to turn on access in the management portal for allowing to download invoices through the API." + } + } + ) + response.headers = {"content-type": "application/json; charset=utf8"} + + excep = ErrorResponseException(deserializer, response) + + self.assertIn("NotOptedIn", str(excep)) + self.assertIn("You are not allowed to download invoices", str(excep))
Improve exception string if custom exception Example, with this: https://github.com/Azure/azure-sdk-for-python/blob/master/azure-mgmt-billing/azure/mgmt/billing/models/error_response.py And this answer: ```python { "error": { "code": "NotOptedIn", "message": "You are not allowed to download invoices. Please contact your account administrator (ptvsazure@outlook.com) to turn on access in the management portal for allowing to download invoices through the API." } } ``` We should have something better than: ```python D:\VEnvs\AzureCli\Lib\site-packages\azure\mgmt\billing\operations\invoices_operations.py in internal_paging(next_link, raw) 110 111 if response.status_code not in [200]: --> 112 raise models.ErrorResponseException(self._deserialize, response) 113 114 return response ErrorResponseException: Operation returned an invalid status code 'Unauthorized' ``` We loose the message
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_exceptions.py::TestExceptions::test_custom_exception" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks" ], "has_test_patch": true, "is_lite": false }
"2017-09-28T19:26:51Z"
mit
Azure__msrest-for-python-62
diff --git a/msrest/authentication.py b/msrest/authentication.py index 48c90ae..8d21624 100644 --- a/msrest/authentication.py +++ b/msrest/authentication.py @@ -79,6 +79,14 @@ class BasicTokenAuthentication(Authentication): self.scheme = 'Bearer' self.token = token + def set_token(self): + """Should be used to define the self.token attribute. + + In this implementation, does nothing since the token is statically provided + at creation. + """ + pass + def signed_session(self): """Create requests session with any required auth headers applied. @@ -91,7 +99,7 @@ class BasicTokenAuthentication(Authentication): return session -class OAuthTokenAuthentication(Authentication): +class OAuthTokenAuthentication(BasicTokenAuthentication): """OAuth Token Authentication. Requires that supplied token contains an expires_in field.
Azure/msrest-for-python
a34b2d5521e9fa12f3b9d40c42ed783c06afedf1
diff --git a/tests/test_auth.py b/tests/test_auth.py index e573a02..32f67f5 100644 --- a/tests/test_auth.py +++ b/tests/test_auth.py @@ -39,6 +39,7 @@ except ImportError: from msrest.authentication import ( BasicAuthentication, + BasicTokenAuthentication, OAuthTokenAuthentication) from requests import Request @@ -71,6 +72,19 @@ class TestAuthentication(unittest.TestCase): self.assertTrue('Authorization' in req.headers) self.assertTrue(req.headers['Authorization'].startswith('Basic ')) + def test_basic_token_auth(self): + + token = { + 'access_token': '123456789' + } + basic = BasicTokenAuthentication(token) + basic.set_token() # Just check that this does not raise + session = basic.signed_session() + + req = session.prepare_request(self.request) + self.assertTrue('Authorization' in req.headers) + self.assertEquals(req.headers['Authorization'], 'Bearer 123456789') + def test_token_auth(self): token = {"my_token":123}
Make BasicTokenAuthentication the new base class of OAuthTokenAuthentication To enable some KV scenarios releated to MSI
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_auth.py::TestAuthentication::test_basic_token_auth" ]
[ "tests/test_auth.py::TestAuthentication::test_basic_auth", "tests/test_auth.py::TestAuthentication::test_token_auth" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2017-10-12T21:12:34Z"
mit
Azure__msrest-for-python-67
diff --git a/msrest/service_client.py b/msrest/service_client.py index eed50c5..d86fcbb 100644 --- a/msrest/service_client.py +++ b/msrest/service_client.py @@ -164,10 +164,15 @@ class ServiceClient(object): """ if content is None: content = {} - file_data = {f: self._format_data(d) for f, d in content.items()} - if headers: - headers.pop('Content-Type', None) - return self.send(request, headers, None, files=file_data, **config) + content_type = headers.pop('Content-Type', None) if headers else None + + if content_type and content_type.lower() == 'application/x-www-form-urlencoded': + # Do NOT use "add_content" that assumes input is JSON + request.data = {f: d for f, d in content.items() if d is not None} + return self.send(request, headers, None, **config) + else: # Assume "multipart/form-data" + file_data = {f: self._format_data(d) for f, d in content.items() if d is not None} + return self.send(request, headers, None, files=file_data, **config) def send(self, request, headers=None, content=None, **config): """Prepare and send request object according to configuration.
Azure/msrest-for-python
24deba7a7a9e335314058ec2d0b39a710f61be60
diff --git a/tests/test_client.py b/tests/test_client.py index ee10d48..650eac5 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -204,6 +204,17 @@ class TestServiceClient(unittest.TestCase): ServiceClient.send_formdata(mock_client, request, {'Content-Type':'1234'}, {'1':'1', '2':'2'}) mock_client.send.assert_called_with(request, {}, None, files={'1':'formatted', '2':'formatted'}) + ServiceClient.send_formdata(mock_client, request, {'Content-Type':'1234'}, {'1':'1', '2':None}) + mock_client.send.assert_called_with(request, {}, None, files={'1':'formatted'}) + + ServiceClient.send_formdata(mock_client, request, {'Content-Type':'application/x-www-form-urlencoded'}, {'1':'1', '2':'2'}) + mock_client.send.assert_called_with(request, {}, None) + self.assertEqual(request.data, {'1':'1', '2':'2'}) + + ServiceClient.send_formdata(mock_client, request, {'Content-Type':'application/x-www-form-urlencoded'}, {'1':'1', '2':None}) + mock_client.send.assert_called_with(request, {}, None) + self.assertEqual(request.data, {'1':'1'}) + def test_format_data(self): mock_client = mock.create_autospec(ServiceClient)
Optional formData parameters crash msrest If a parameter that is supposed to be formData is optional, we give `None` to requests: ```python files = [('Text', (None, 'cognituve services')), ('Mode', (None, None)), ('PreContextText', (None, None)), ('PostContextText', (None, None))] data = {} @staticmethod def _encode_files(files, data): """Build the body for a multipart/form-data request. Will successfully encode files when passed as a dict or a list of tuples. Order is retained if data is a list of tuples but arbitrary if parameters are supplied as a dict. The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype) or 4-tuples (filename, fileobj, contentype, custom_headers). """ if (not files): raise ValueError("Files must be provided.") elif isinstance(data, basestring): raise ValueError("Data must not be a string.") new_fields = [] fields = to_key_val_list(data or {}) files = to_key_val_list(files or {}) for field, val in fields: if isinstance(val, basestring) or not hasattr(val, '__iter__'): val = [val] for v in val: if v is not None: # Don't call str() on bytestrings: in Py3 it all goes wrong. if not isinstance(v, bytes): v = str(v) new_fields.append( (field.decode('utf-8') if isinstance(field, bytes) else field, v.encode('utf-8') if isinstance(v, str) else v)) for (k, v) in files: # support for explicit filename ft = None fh = None if isinstance(v, (tuple, list)): if len(v) == 2: fn, fp = v elif len(v) == 3: fn, fp, ft = v else: fn, fp, ft, fh = v else: fn = guess_filename(v) or k fp = v if isinstance(fp, (str, bytes, bytearray)): fdata = fp else: > fdata = fp.read() E AttributeError: 'NoneType' object has no attribute 'read' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_client.py::TestServiceClient::test_client_formdata_send" ]
[ "tests/test_client.py::TestServiceClient::test_client_header", "tests/test_client.py::TestServiceClient::test_format_url", "tests/test_client.py::TestServiceClient::test_session_callback", "tests/test_client.py::TestServiceClient::test_format_data", "tests/test_client.py::TestServiceClient::test_client_request", "tests/test_client.py::TestServiceClient::test_client_send" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2017-11-20T21:05:32Z"
mit
Azure__msrestazure-for-python-55
diff --git a/msrestazure/azure_exceptions.py b/msrestazure/azure_exceptions.py index bb85333..5b4792c 100644 --- a/msrestazure/azure_exceptions.py +++ b/msrestazure/azure_exceptions.py @@ -30,6 +30,15 @@ from msrest.exceptions import ClientException from msrest.serialization import Deserializer from msrest.exceptions import DeserializationError +class CloudErrorRoot(object): + """Just match the "error" key at the root of a OdataV4 JSON. + """ + _validation = {} + _attribute_map = { + 'error': {'key': 'error', 'type': 'CloudErrorData'}, + } + def __init__(self, error): + self.error = error class CloudErrorData(object): """Cloud Error Data object, deserialized from error data returned @@ -47,7 +56,7 @@ class CloudErrorData(object): def __init__(self, *args, **kwargs): self.error = kwargs.get('error') - self._message = kwargs.get('message') + self.message = kwargs.get('message') self.request_id = None self.error_time = None self.target = kwargs.get('target') @@ -122,7 +131,10 @@ class CloudError(ClientException): """ def __init__(self, response, error=None, *args, **kwargs): - self.deserializer = Deserializer({'CloudErrorData': CloudErrorData}) + self.deserializer = Deserializer({ + 'CloudErrorRoot': CloudErrorRoot, + 'CloudErrorData': CloudErrorData + }) self.error = None self.message = None self.response = response @@ -149,13 +161,7 @@ class CloudError(ClientException): def _build_error_data(self, response): try: - data = response.json() - except ValueError: - data = response - else: - data = data.get('error', data) - try: - self.error = self.deserializer(CloudErrorData(), data) + self.error = self.deserializer('CloudErrorRoot', response).error except DeserializationError: self.error = None else: @@ -178,7 +184,10 @@ class CloudError(ClientException): except ValueError: message = "none" else: - message = data.get("message", self._get_state(data)) + try: + message = data.get("message", self._get_state(data)) + except AttributeError: # data is not a dict, but is a requests.Response parsable as JSON + message = str(response.text) try: response.raise_for_status() except RequestException as err:
Azure/msrestazure-for-python
005f5a4320385930ba82d4c0e13ce90506884b27
diff --git a/tests/test_exceptions.py b/tests/test_exceptions.py index 2506a9c..45a4770 100644 --- a/tests/test_exceptions.py +++ b/tests/test_exceptions.py @@ -125,20 +125,6 @@ class TestCloudException(unittest.TestCase): response.headers = {"content-type": "application/json; charset=utf8"} response.reason = 'BadRequest' - message = { - 'code': '500', - 'message': {'value': 'Bad Request\nRequest:34875\nTime:1999-12-31T23:59:59-23:59'}, - 'values': {'invalid_attribute':'data'} - } - - response.text = json.dumps(message) - response.json = lambda: json.loads(response.text) - - error = CloudError(response) - self.assertEqual(error.message, 'Bad Request') - self.assertEqual(error.status_code, 400) - self.assertIsInstance(error.error, CloudErrorData) - message = { 'error': { 'code': '500', 'message': {'value': 'Bad Request\nRequest:34875\nTime:1999-12-31T23:59:59-23:59'}, @@ -146,6 +132,7 @@ class TestCloudException(unittest.TestCase): }} response.text = json.dumps(message) + response.json = lambda: json.loads(response.text) error = CloudError(response) self.assertEqual(error.message, 'Bad Request') self.assertEqual(error.status_code, 400) @@ -175,9 +162,9 @@ class TestCloudException(unittest.TestCase): response.text = '{\r\n "odata.metadata":"https://account.region.batch.azure.com/$metadata#Microsoft.Azure.Batch.Protocol.Entities.Container.errors/@Element","code":"InvalidHeaderValue","message":{\r\n "lang":"en-US","value":"The value for one of the HTTP headers is not in the correct format.\\nRequestId:5f4c1f05-603a-4495-8e80-01f776310bbd\\nTime:2016-01-04T22:12:33.9245931Z"\r\n },"values":[\r\n {\r\n "key":"HeaderName","value":"Content-Type"\r\n },{\r\n "key":"HeaderValue","value":"application/json; odata=minimalmetadata; charset=utf-8"\r\n }\r\n ]\r\n}' error = CloudError(response) - self.assertIsInstance(error.error, CloudErrorData) + self.assertIn("The value for one of the HTTP headers is not in the correct format", error.message) - response.text = '{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","target":null,"details":[{"message":"The maximum number of Free ServerFarms allowed in a Subscription is 10."},{"code":"Conflict"},{"errorentity":{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","extendedCode":"59301","messageTemplate":"The maximum number of {0} ServerFarms allowed in a Subscription is {1}.","parameters":["Free","10"],"innerErrors":null}}],"innererror":null}' + response.text = '{"error":{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","target":null,"details":[{"message":"The maximum number of Free ServerFarms allowed in a Subscription is 10."},{"code":"Conflict"},{"errorentity":{"code":"Conflict","message":"The maximum number of Free ServerFarms allowed in a Subscription is 10.","extendedCode":"59301","messageTemplate":"The maximum number of {0} ServerFarms allowed in a Subscription is {1}.","parameters":["Free","10"],"innerErrors":null}}],"innererror":null}}' error = CloudError(response) self.assertIsInstance(error.error, CloudErrorData) self.assertEqual(error.error.error, "Conflict") @@ -199,6 +186,11 @@ class TestCloudException(unittest.TestCase): self.assertIsInstance(error.error, CloudErrorData) self.assertEqual(error.error.error, "BadArgument") + # See https://github.com/Azure/msrestazure-for-python/issues/54 + response.text = '"{\\"error\\": {\\"code\\": \\"ResourceGroupNotFound\\", \\"message\\": \\"Resource group \'res_grp\' could not be found.\\"}}"' + error = CloudError(response) + self.assertIn(response.text, error.message) + if __name__ == '__main__': unittest.main()
CloudError parsing should be resilient if input type is string In so (messy) scenario, we don't receive a dict (from a JSON), but a string. We should be robust to that and print the while string as the error message: ```python msrest.http_logger : b'"{\\"error\\":{\\"code\\":\\"ResourceGroupNotFound\\",\\"message\\":\\"Resource group \'res_grp\' could not be found.\\"}}"' 'str' object has no attribute 'get' Traceback (most recent call last): File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\main.py", line 36, in main cmd_result = APPLICATION.execute(args) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\application.py", line 212, in execute result = expanded_arg.func(params) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\commands\__init__.py", line 377, in __call__ return self.handler(*args, **kwargs) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\commands\__init__.py", line 620, in _execute_command reraise(*sys.exc_info()) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\six.py", line 693, in reraise raise value File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\cli\core\commands\__init__.py", line 614, in _execute_command return list(result) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrest\paging.py", line 109, in __next__ self.advance_page() File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrest\paging.py", line 95, in advance_page self._response = self._get_next(self.next_link) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\azure\mgmt\compute\v2017_03_30\operations\disks_operations.py", line 441, in internal_paging exp = CloudError(response) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrestazure\azure_exceptions.py", line 136, in __init__ self._build_error_data(response) File "C:\Users\lmazuel\Git\AzureCli\lib\site-packages\msrestazure\azure_exceptions.py", line 156, in _build_error_data data = data.get('error', data) AttributeError: 'str' object has no attribute 'get' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_exceptions.py::TestCloudException::test_cloud_error" ]
[ "tests/test_exceptions.py::TestCloudException::test_cloud_exception" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2017-10-30T22:28:54Z"
mit
Azure__pykusto-20
diff --git a/pykusto/client.py b/pykusto/client.py index 1d708c7..2bc47e4 100644 --- a/pykusto/client.py +++ b/pykusto/client.py @@ -1,5 +1,8 @@ from typing import Union, List, Tuple +# noinspection PyProtectedMember +from urllib.parse import urlparse + # noinspection PyProtectedMember from azure.kusto.data._response import KustoResponseDataSet from azure.kusto.data.request import KustoClient, KustoConnectionStringBuilder, ClientRequestProperties @@ -12,6 +15,7 @@ class PyKustoClient: Handle to a Kusto cluster """ _client: KustoClient + _cluster_name: str def __init__(self, client_or_cluster: Union[str, KustoClient]) -> None: """ @@ -22,8 +26,11 @@ class PyKustoClient: """ if isinstance(client_or_cluster, KustoClient): self._client = client_or_cluster + # noinspection PyProtectedMember + self._cluster_name = urlparse(client_or_cluster._query_endpoint).netloc # TODO neater way else: self._client = self._get_client_for_cluster(client_or_cluster) + self._cluster_name = client_or_cluster def execute(self, database: str, query: KQL, properties: ClientRequestProperties = None) -> KustoResponseDataSet: return self._client.execute(database, query, properties) @@ -35,6 +42,9 @@ class PyKustoClient: def __getitem__(self, database_name: str) -> 'Database': return Database(self, database_name) + def get_cluster_name(self) -> str: + return self._cluster_name + @staticmethod def _get_client_for_cluster(cluster: str) -> KustoClient: return KustoClient(KustoConnectionStringBuilder.with_aad_device_authentication(cluster)) @@ -70,7 +80,7 @@ class Table: Handle to a Kusto table """ database: Database - table: KQL + tables: Union[str, List[str], Tuple[str, ...]] def __init__(self, database: Database, tables: Union[str, List[str], Tuple[str, ...]]) -> None: """ @@ -82,19 +92,31 @@ class Table: """ self.database = database - - if isinstance(tables, (List, Tuple)): - self.table = KQL(', '.join(tables)) + self.tables = [tables] if isinstance(tables, str) else tables + + def get_table(self) -> KQL: + result = KQL(', '.join(self.tables)) + if '*' in result or ',' in result: + result = KQL('union ' + result) + return result + + def get_full_table(self) -> KQL: + assert len(self.tables) > 0 + if len(self.tables) == 1 and not any('*' in t for t in self.tables): + return self._format_full_table_name(self.tables[0]) else: - self.table = KQL(tables) - if '*' in self.table or ',' in self.table: - self.table = KQL('union ' + self.table) + return KQL("union " + ", ".join(self._format_full_table_name(t) for t in self.tables)) + + def _format_full_table_name(self, table): + table_format_str = 'cluster("{}").database("{}").table("{}")' + return KQL( + table_format_str.format(self.database.client.get_cluster_name(), self.database.name, table)) def execute(self, rendered_query: KQL) -> KustoResponseDataSet: return self.database.execute(rendered_query) def show_columns(self) -> Tuple[Tuple[str, str], ...]: - res: KustoResponseDataSet = self.execute(KQL('.show table {}'.format(self.table))) + res: KustoResponseDataSet = self.execute(KQL('.show table {}'.format(self.get_table()))) return tuple( ( r[0], # Column name diff --git a/pykusto/query.py b/pykusto/query.py index 547c1c9..42caba4 100644 --- a/pykusto/query.py +++ b/pykusto/query.py @@ -156,14 +156,18 @@ class Query: def _compile(self) -> KQL: pass - def _compile_all(self) -> KQL: + def _compile_all(self, use_full_table_name) -> KQL: if self._head is None: if self._table is None: return KQL("") else: - return self._table.table + table = self._table + if use_full_table_name: + return table.get_full_table() + else: + return table.get_table() else: - return KQL("{} | {}".format(self._head._compile_all(), self._compile())) + return KQL("{} | {}".format(self._head._compile_all(use_full_table_name), self._compile())) def get_table(self) -> Table: if self._head is None: @@ -171,8 +175,8 @@ class Query: else: return self._head.get_table() - def render(self) -> KQL: - result = self._compile_all() + def render(self, use_full_table_name: bool = False) -> KQL: + result = self._compile_all(use_full_table_name) logger.debug("Complied query: " + result) return result @@ -180,7 +184,7 @@ class Query: if self.get_table() is None: if table is None: raise RuntimeError("No table supplied") - rendered_query = table.table + self.render() + rendered_query = table.get_table() + self.render() else: if table is not None: raise RuntimeError("This table is already bound to a query") @@ -412,7 +416,7 @@ class JoinQuery(Query): return KQL("join {} ({}) on {}".format( "" if self._kind is None else "kind={}".format(self._kind.value), - self._joined_query.render(), + self._joined_query.render(use_full_table_name=True), ", ".join([self._compile_on_attribute(attr) for attr in self._on_attributes])))
Azure/pykusto
13fc2f12a84ca19b0cc8c6d61d2c67da858fdae0
diff --git a/test/test_query.py b/test/test_query.py index b4ed016..a228e03 100644 --- a/test/test_query.py +++ b/test/test_query.py @@ -69,7 +69,7 @@ class TestQuery(TestBase): table = PyKustoClient(mock_kusto_client)['test_db']['test_table'] self.assertEqual( - " | where foo > 4 | take 5 | join kind=inner (test_table) on col0, $left.col1==$right.col2", + ' | where foo > 4 | take 5 | join kind=inner (cluster("test_cluster.kusto.windows.net").database("test_db").table("test_table")) on col0, $left.col1==$right.col2', Query().where(col.foo > 4).take(5).join( Query(table), kind=JoinKind.INNER).on(col.col0).on(col.col1, col.col2).render(), ) @@ -79,7 +79,7 @@ class TestQuery(TestBase): table = PyKustoClient(mock_kusto_client)['test_db']['test_table'] self.assertEqual( - " | where foo > 4 | take 5 | join kind=inner (test_table | where bla == 2 | take 6) on col0, " + ' | where foo > 4 | take 5 | join kind=inner (cluster("test_cluster.kusto.windows.net").database("test_db").table("test_table") | where bla == 2 | take 6) on col0, ' "$left.col1==$right.col2", Query().where(col.foo > 4).take(5).join( Query(table).where(col.bla == 2).take(6), kind=JoinKind.INNER).on(col.col0).on(col.col1, diff --git a/test/test_table.py b/test/test_table.py index d7a81b6..755acb2 100644 --- a/test/test_table.py +++ b/test/test_table.py @@ -1,8 +1,10 @@ from typing import List, Tuple from unittest.mock import patch +from urllib.parse import urljoin from azure.kusto.data.request import KustoClient, ClientRequestProperties +from pykusto.expressions import column_generator as col from pykusto.client import PyKustoClient from pykusto.query import Query from test.test_base import TestBase @@ -12,8 +14,9 @@ from test.test_base import TestBase class MockKustoClient(KustoClient): executions: List[Tuple[str, str, ClientRequestProperties]] - def __init__(self): + def __init__(self, cluster="https://test_cluster.kusto.windows.net"): self.executions = [] + self._query_endpoint = urljoin(cluster, "/v2/rest/query") def execute(self, database: str, rendered_query: str, properties: ClientRequestProperties = None): self.executions.append((database, rendered_query, properties)) @@ -85,3 +88,43 @@ class TestTable(TestBase): [('test_db', 'test_table | take 5', None)], mock_kusto_client.executions, ) + + def test_cross_cluster_join(self): + mock_kusto_client_1 = MockKustoClient("https://one.kusto.windows.net") + mock_kusto_client_2 = MockKustoClient("https://two.kusto.windows.net") + + table1 = PyKustoClient(mock_kusto_client_1)['test_db_1']['test_table_1'] + table2 = PyKustoClient(mock_kusto_client_2)['test_db_2']['test_table_2'] + Query(table1).take(5).join(Query(table2).take(6)).on(col.foo).execute() + self.assertEqual( + [('test_db_1', 'test_table_1 | take 5 | join (cluster("two.kusto.windows.net").database("test_db_2").table("test_table_2") | take 6) on foo', None)], + mock_kusto_client_1.executions, + ) + + def test_cross_cluster_join_with_union(self): + mock_kusto_client_1 = MockKustoClient("https://one.kusto.windows.net") + mock_kusto_client_2 = MockKustoClient("https://two.kusto.windows.net") + + table1 = PyKustoClient(mock_kusto_client_1)['test_db_1']['test_table_1'] + table2 = PyKustoClient(mock_kusto_client_2)['test_db_2'].get_tables('test_table_2_*') + Query(table1).take(5).join(Query(table2).take(6)).on(col.foo).execute() + self.assertEqual( + [('test_db_1', + 'test_table_1 | take 5 | join (union cluster("two.kusto.windows.net").database("test_db_2").table("test_table_2_*") | take 6) on foo', + None)], + mock_kusto_client_1.executions, + ) + + def test_cross_cluster_join_with_union_2(self): + mock_kusto_client_1 = MockKustoClient("https://one.kusto.windows.net") + mock_kusto_client_2 = MockKustoClient("https://two.kusto.windows.net") + + table1 = PyKustoClient(mock_kusto_client_1)['test_db_1']['test_table_1'] + table2 = PyKustoClient(mock_kusto_client_2)['test_db_2'].get_tables('test_table_2_*', 'test_table_3_*') + Query(table1).take(5).join(Query(table2).take(6)).on(col.foo).execute() + self.assertEqual( + [('test_db_1', + 'test_table_1 | take 5 | join (union cluster("two.kusto.windows.net").database("test_db_2").table("test_table_2_*"), cluster("two.kusto.windows.net").database("test_db_2").table("test_table_3_*") | take 6) on foo', + None)], + mock_kusto_client_1.executions, + )
Add support for cross-cluster join If the joined table is from a different cluster, render it using the following syntax: cluster("a").database("b").table("c")
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_query.py::TestQuery::test_join_with_table", "test/test_query.py::TestQuery::test_join_with_table_and_query", "test/test_table.py::TestTable::test_cross_cluster_join", "test/test_table.py::TestTable::test_cross_cluster_join_with_union", "test/test_table.py::TestTable::test_cross_cluster_join_with_union_2" ]
[ "test/test_query.py::TestQuery::test_add_queries", "test/test_query.py::TestQuery::test_count", "test/test_query.py::TestQuery::test_custom", "test/test_query.py::TestQuery::test_distinct", "test/test_query.py::TestQuery::test_distinct_all", "test/test_query.py::TestQuery::test_extend", "test/test_query.py::TestQuery::test_extend_assign_to_multiple_columns", "test/test_query.py::TestQuery::test_extend_generate_column_name", "test/test_query.py::TestQuery::test_join_no_joined_table", "test/test_query.py::TestQuery::test_join_no_on", "test/test_query.py::TestQuery::test_limit", "test/test_query.py::TestQuery::test_mv_expand", "test/test_query.py::TestQuery::test_mv_expand_args", "test/test_query.py::TestQuery::test_mv_expand_no_args", "test/test_query.py::TestQuery::test_order", "test/test_query.py::TestQuery::test_order_expression_in_arg", "test/test_query.py::TestQuery::test_project", "test/test_query.py::TestQuery::test_project_assign_to_multiple_columns", "test/test_query.py::TestQuery::test_project_away", "test/test_query.py::TestQuery::test_project_away_wildcard", "test/test_query.py::TestQuery::test_project_rename", "test/test_query.py::TestQuery::test_project_unspecified_column", "test/test_query.py::TestQuery::test_project_with_expression", "test/test_query.py::TestQuery::test_sample", "test/test_query.py::TestQuery::test_sanity", "test/test_query.py::TestQuery::test_sort", "test/test_query.py::TestQuery::test_sort_multiple_cols", "test/test_query.py::TestQuery::test_summarize", "test/test_query.py::TestQuery::test_summarize_by", "test/test_query.py::TestQuery::test_summarize_by_expression", "test/test_query.py::TestQuery::test_take", "test/test_query.py::TestQuery::test_top", "test/test_query.py::TestQuery::test_udf", "test/test_query.py::TestQuery::test_where", "test/test_table.py::TestTable::test_default_authentication", "test/test_table.py::TestTable::test_execute_already_bound", "test/test_table.py::TestTable::test_execute_no_table", "test/test_table.py::TestTable::test_single_table", "test/test_table.py::TestTable::test_single_table_on_execute", "test/test_table.py::TestTable::test_union_table", "test/test_table.py::TestTable::test_union_table_with_wildcard" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-08-12T08:54:35Z"
mit
Azure__pykusto-22
diff --git a/pykusto/query.py b/pykusto/query.py index 42caba4..272bbaf 100644 --- a/pykusto/query.py +++ b/pykusto/query.py @@ -53,7 +53,12 @@ class Query: self._table = head if isinstance(head, Table) else None def __add__(self, other: 'Query'): - other._head = self + other_base = other + while other_base._head is not None: + if other_base._head._head is None: + break + other_base = other_base._head + other_base._head = self return other def where(self, predicate: BooleanType) -> 'WhereQuery':
Azure/pykusto
aff79137b6d310f33a2085ece2fbe41886c50c11
diff --git a/test/test_query.py b/test/test_query.py index a228e03..034feec 100644 --- a/test/test_query.py +++ b/test/test_query.py @@ -15,9 +15,21 @@ class TestQuery(TestBase): ) def test_add_queries(self): - query = Query().where(col.foo > 4) + Query().take(5) + Query().sort_by(col.bar, Order.ASC, Nulls.LAST) + query = (Query().where(col.foo > 4) + + Query().take(5) + + Query().where(col.foo > 1).sort_by(col.bar, Order.ASC, Nulls.LAST)) self.assertEqual( - " | where foo > 4 | take 5 | sort by bar asc nulls last", + " | where foo > 4 | take 5 | where foo > 1 | sort by bar asc nulls last", + query.render(), + ) + + def test_add_queries_with_table(self): + mock_kusto_client = MockKustoClient() + table = PyKustoClient(mock_kusto_client)['test_db']['test_table'] + b = Query().take(5).take(2).sort_by(col.bar, Order.ASC, Nulls.LAST) + query = Query(table).where(col.foo > 4) + b + self.assertEqual( + "test_table | where foo > 4 | take 5 | take 2 | sort by bar asc nulls last", query.render(), )
query.__add__ only handles short queries
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_query.py::TestQuery::test_add_queries", "test/test_query.py::TestQuery::test_add_queries_with_table" ]
[ "test/test_query.py::TestQuery::test_count", "test/test_query.py::TestQuery::test_custom", "test/test_query.py::TestQuery::test_distinct", "test/test_query.py::TestQuery::test_distinct_all", "test/test_query.py::TestQuery::test_extend", "test/test_query.py::TestQuery::test_extend_assign_to_multiple_columns", "test/test_query.py::TestQuery::test_extend_generate_column_name", "test/test_query.py::TestQuery::test_join_no_joined_table", "test/test_query.py::TestQuery::test_join_no_on", "test/test_query.py::TestQuery::test_join_with_table", "test/test_query.py::TestQuery::test_join_with_table_and_query", "test/test_query.py::TestQuery::test_limit", "test/test_query.py::TestQuery::test_mv_expand", "test/test_query.py::TestQuery::test_mv_expand_args", "test/test_query.py::TestQuery::test_mv_expand_no_args", "test/test_query.py::TestQuery::test_order", "test/test_query.py::TestQuery::test_order_expression_in_arg", "test/test_query.py::TestQuery::test_project", "test/test_query.py::TestQuery::test_project_assign_to_multiple_columns", "test/test_query.py::TestQuery::test_project_away", "test/test_query.py::TestQuery::test_project_away_wildcard", "test/test_query.py::TestQuery::test_project_rename", "test/test_query.py::TestQuery::test_project_unspecified_column", "test/test_query.py::TestQuery::test_project_with_expression", "test/test_query.py::TestQuery::test_sample", "test/test_query.py::TestQuery::test_sanity", "test/test_query.py::TestQuery::test_sort", "test/test_query.py::TestQuery::test_sort_multiple_cols", "test/test_query.py::TestQuery::test_summarize", "test/test_query.py::TestQuery::test_summarize_by", "test/test_query.py::TestQuery::test_summarize_by_expression", "test/test_query.py::TestQuery::test_take", "test/test_query.py::TestQuery::test_top", "test/test_query.py::TestQuery::test_udf", "test/test_query.py::TestQuery::test_where" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-08-12T14:53:39Z"
mit
Azure__pykusto-34
diff --git a/.gitignore b/.gitignore index 456846f..78112ac 100644 --- a/.gitignore +++ b/.gitignore @@ -334,3 +334,4 @@ ASALocalRun/ /.idea/checkstyle-idea.xml *.iml /.idea/misc.xml +/venv diff --git a/pykusto/functions.py b/pykusto/functions.py index 8450ef8..210958f 100644 --- a/pykusto/functions.py +++ b/pykusto/functions.py @@ -120,6 +120,8 @@ def ceiling(expr: NumberType) -> NumberExpression: def cos(expr: NumberType) -> NumberExpression: return expr.cos() + + # # def cot(self): return @@ -257,12 +259,12 @@ def hourofday(expr: DatetimeType) -> NumberExpression: return expr.hourofday() -def iif(predicate: BooleanType, if_true: ExpressionType, if_false: ExpressionType) -> BaseExpression: - return BaseExpression(KQL('iif({}, {}, {})'.format(predicate, if_true, if_false))) +def iff(predicate: BooleanType, if_true: ExpressionType, if_false: ExpressionType) -> BaseExpression: + return BaseExpression(KQL('iff({}, {}, {})'.format(predicate, _subexpr_to_kql(if_true), _subexpr_to_kql(if_false)))) -def iff(predicate: BooleanType, if_true: ExpressionType, if_false: ExpressionType) -> BaseExpression: - return BaseExpression(KQL('iff({}, {}, {})'.format(predicate, if_true, if_false))) +def iif(predicate: BooleanType, if_true: ExpressionType, if_false: ExpressionType) -> BaseExpression: + return iff(predicate, if_true, if_false) # diff --git a/pykusto/query.py b/pykusto/query.py index 5025977..b7157fb 100644 --- a/pykusto/query.py +++ b/pykusto/query.py @@ -12,7 +12,7 @@ from pykusto.expressions import BooleanType, ExpressionType, AggregationExpressi StringType, AssignmentBase, AssignmentFromAggregationToColumn, AssignmentToSingleColumn, Column, BaseExpression, \ AssignmentFromColumnToColumn from pykusto.udf import stringify_python_func -from pykusto.utils import KQL, logger +from pykusto.utils import KQL, logger, to_kql class Order(Enum): @@ -137,7 +137,10 @@ class Query: else: assignments.append(arg) for column_name, expression in kwargs.items(): - assignments.append(expression.assign_to(Column(column_name))) + if isinstance(expression, BaseExpression): + assignments.append(expression.assign_to(Column(column_name))) + else: + assignments.append(BaseExpression(to_kql(expression)).assign_to(Column(column_name))) return ExtendQuery(self, *assignments) def summarize(self, *args: Union[AggregationExpression, AssignmentFromAggregationToColumn], diff --git a/setup.py b/setup.py index 605de64..b31e771 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages setup( name='pykusto', - version='0.0.4', + version='0.0.5', packages=find_packages(exclude=['test']), url='https://github.com/Azure/pykusto', license='MIT License',
Azure/pykusto
a6f85c8015b59226608bafa1d2f5d862d1917ef4
diff --git a/test/test_expressions.py b/test/test_expressions.py index 3074ef4..7c42e8a 100644 --- a/test/test_expressions.py +++ b/test/test_expressions.py @@ -55,6 +55,16 @@ class TestExpressions(TestBase): " | extend ['foo.bar'] = (shoo * 2)", Query().extend((col.shoo * 2).assign_to(col.foo.bar)).render(), ) + self.assertEqual( + " | extend foo = (shoo * 2)", + Query().extend(foo=(col.shoo * 2)).render(), + ) + + def test_extend_const(self): + self.assertEqual( + " | extend foo = (5), bar = (\"bar\"), other_col = other", + Query().extend(foo=5, bar="bar", other_col=col.other).render(), + ) def test_between_timespan(self): self.assertEqual( diff --git a/test/test_functions.py b/test/test_functions.py index 247142d..55fc998 100644 --- a/test/test_functions.py +++ b/test/test_functions.py @@ -7,7 +7,7 @@ from pykusto.query import Query from test.test_base import TestBase -# TODO dcount_hll, iif +# TODO dcount_hll class TestFunction(TestBase): def test_acos(self): @@ -629,3 +629,16 @@ class TestFunction(TestBase): " | summarize active_days = dcount(bin(timestamp, time(1.0:0:0.0)))", Query().summarize(active_days=f.dcount(f.bin(col.timestamp, datetime.timedelta(1)))).render() ) + + def test_iff(self): + self.assertEqual( + " | project foo = (iff(foo > (ago(time(2.0:0:0.0))), time(3.0:0:0.0), time(4.0:0:0.0)))", + Query().project(foo=f.iff(col.foo > f.ago(datetime.timedelta(2)), datetime.timedelta(3), datetime.timedelta(4))).render() + ) + + def test_iif(self): + # iif is just an alias to iff + self.assertEqual( + " | project foo = (iff(foo > (ago(time(2.0:0:0.0))), time(3.0:0:0.0), time(4.0:0:0.0)))", + Query().project(foo=f.iif(col.foo > f.ago(datetime.timedelta(2)), datetime.timedelta(3), datetime.timedelta(4))).render() + )
iff doesn't support sub expressions The following syntax is not working: .extend(timeDelta=iff(col.day-col.day1 == 0, timedelta(DAYS_BACK_DIFF), col.day-col.day1))
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_expressions.py::TestExpressions::test_extend_const", "test/test_functions.py::TestFunction::test_iff", "test/test_functions.py::TestFunction::test_iif" ]
[ "test/test_expressions.py::TestExpressions::test_array_access", "test/test_expressions.py::TestExpressions::test_array_access_expression_index", "test/test_expressions.py::TestExpressions::test_assign_to", "test/test_expressions.py::TestExpressions::test_between_timespan", "test/test_expressions.py::TestExpressions::test_column_generator", "test/test_expressions.py::TestExpressions::test_column_generator_2", "test/test_expressions.py::TestExpressions::test_contains", "test/test_expressions.py::TestExpressions::test_has", "test/test_expressions.py::TestExpressions::test_is_empty", "test/test_expressions.py::TestExpressions::test_is_in", "test/test_expressions.py::TestExpressions::test_mapping_access", "test/test_expressions.py::TestExpressions::test_mapping_access_expression_index", "test/test_expressions.py::TestExpressions::test_method_does_not_exist", "test/test_expressions.py::TestExpressions::test_not_contains", "test/test_functions.py::TestFunction::test_acos", "test/test_functions.py::TestFunction::test_ago", "test/test_functions.py::TestFunction::test_any", "test/test_functions.py::TestFunction::test_arg_max", "test/test_functions.py::TestFunction::test_arg_min", "test/test_functions.py::TestFunction::test_array_length", "test/test_functions.py::TestFunction::test_avgif", "test/test_functions.py::TestFunction::test_bag_keys", "test/test_functions.py::TestFunction::test_bin", "test/test_functions.py::TestFunction::test_bin_at", "test/test_functions.py::TestFunction::test_bin_auto", "test/test_functions.py::TestFunction::test_case", "test/test_functions.py::TestFunction::test_ceiling", "test/test_functions.py::TestFunction::test_cos", "test/test_functions.py::TestFunction::test_count", "test/test_functions.py::TestFunction::test_countif", "test/test_functions.py::TestFunction::test_dcount", "test/test_functions.py::TestFunction::test_endofday", "test/test_functions.py::TestFunction::test_endofmonth", "test/test_functions.py::TestFunction::test_endofweek", "test/test_functions.py::TestFunction::test_exp", "test/test_functions.py::TestFunction::test_exp10", "test/test_functions.py::TestFunction::test_exp2", "test/test_functions.py::TestFunction::test_floor", "test/test_functions.py::TestFunction::test_format_datetime", "test/test_functions.py::TestFunction::test_format_timespan", "test/test_functions.py::TestFunction::test_getmonth", "test/test_functions.py::TestFunction::test_gettype", "test/test_functions.py::TestFunction::test_getyear", "test/test_functions.py::TestFunction::test_hash", "test/test_functions.py::TestFunction::test_hash_sha256", "test/test_functions.py::TestFunction::test_hourofday", "test/test_functions.py::TestFunction::test_isempty", "test/test_functions.py::TestFunction::test_isfinite", "test/test_functions.py::TestFunction::test_isinf", "test/test_functions.py::TestFunction::test_isnan", "test/test_functions.py::TestFunction::test_isnotempty", "test/test_functions.py::TestFunction::test_isnotnull", "test/test_functions.py::TestFunction::test_isnull", "test/test_functions.py::TestFunction::test_isutf8", "test/test_functions.py::TestFunction::test_log", "test/test_functions.py::TestFunction::test_log10", "test/test_functions.py::TestFunction::test_log2", "test/test_functions.py::TestFunction::test_loggamma", "test/test_functions.py::TestFunction::test_make_bag", "test/test_functions.py::TestFunction::test_make_datetime", "test/test_functions.py::TestFunction::test_make_list", "test/test_functions.py::TestFunction::test_make_set", "test/test_functions.py::TestFunction::test_max", "test/test_functions.py::TestFunction::test_min", "test/test_functions.py::TestFunction::test_nesting", "test/test_functions.py::TestFunction::test_now", "test/test_functions.py::TestFunction::test_percentile", "test/test_functions.py::TestFunction::test_percentiles", "test/test_functions.py::TestFunction::test_pow", "test/test_functions.py::TestFunction::test_round", "test/test_functions.py::TestFunction::test_sign", "test/test_functions.py::TestFunction::test_sqrt", "test/test_functions.py::TestFunction::test_startofday", "test/test_functions.py::TestFunction::test_startofmonth", "test/test_functions.py::TestFunction::test_startofweek", "test/test_functions.py::TestFunction::test_startofyear", "test/test_functions.py::TestFunction::test_stdev", "test/test_functions.py::TestFunction::test_stdevif", "test/test_functions.py::TestFunction::test_stdevp", "test/test_functions.py::TestFunction::test_strcat", "test/test_functions.py::TestFunction::test_strcat_array", "test/test_functions.py::TestFunction::test_strcat_delim", "test/test_functions.py::TestFunction::test_strcmp", "test/test_functions.py::TestFunction::test_string_size", "test/test_functions.py::TestFunction::test_strlen", "test/test_functions.py::TestFunction::test_strrep", "test/test_functions.py::TestFunction::test_substring", "test/test_functions.py::TestFunction::test_sum", "test/test_functions.py::TestFunction::test_sumif", "test/test_functions.py::TestFunction::test_tobool", "test/test_functions.py::TestFunction::test_toboolean", "test/test_functions.py::TestFunction::test_todouble", "test/test_functions.py::TestFunction::test_variance", "test/test_functions.py::TestFunction::test_varianceif", "test/test_functions.py::TestFunction::test_variancep" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-12-29T10:57:59Z"
mit
Azure__pykusto-53
diff --git a/pykusto/query.py b/pykusto/query.py index f5a89ba..b2f77f3 100644 --- a/pykusto/query.py +++ b/pykusto/query.py @@ -5,6 +5,8 @@ from itertools import chain from types import FunctionType from typing import Tuple, List, Union, Optional +# noinspection PyProtectedMember +from azure.kusto.data._response import KustoResponseDataSet from azure.kusto.data.helpers import dataframe_from_result_table from pykusto.client import Table @@ -13,6 +15,7 @@ from pykusto.expressions import BooleanType, ExpressionType, AggregationExpressi AssignmentFromColumnToColumn, AnyExpression, to_kql, ColumnToType from pykusto.kql_converters import KQL from pykusto.logger import logger +from pykusto.type_utils import TypeName from pykusto.udf import stringify_python_func @@ -41,6 +44,12 @@ class JoinKind(Enum): RIGHTSEMI = "rightsemi" +class Distribution(Enum): + SINGLE = 'single' + PER_NODE = 'per_node' + PER_SHARD = 'per_shard' + + class BagExpansion(Enum): BAG = "bag" ARRAY = "array" @@ -54,7 +63,7 @@ class Query: self._head = head if isinstance(head, Query) else None self._table = head if isinstance(head, Table) else None - def __add__(self, other: 'Query'): + def __add__(self, other: 'Query') -> 'Query': self_copy = deepcopy(self) other_copy = deepcopy(other) @@ -66,7 +75,7 @@ class Query: other_base._head = self_copy return other_copy - def __deepcopy__(self, memo): + def __deepcopy__(self, memo) -> 'Query': new_object = copy(self) if self._head is not None: new_object._head = self._head.__deepcopy__(memo) @@ -96,7 +105,7 @@ class Query: def top(self, num_rows: int, col: Column, order: Order = None, nulls: Nulls = None) -> 'TopQuery': return TopQuery(self, num_rows, col, order, nulls) - def join(self, query: 'Query', kind: JoinKind = None): + def join(self, query: 'Query', kind: JoinKind = None) -> 'JoinQuery': return JoinQuery(self, query, kind) def project(self, *args: Union[Column, AssignmentBase, BaseExpression], **kwargs: ExpressionType) -> 'ProjectQuery': @@ -121,13 +130,13 @@ class Query: assignments.append(AssignmentFromColumnToColumn(Column(column_name), column)) return ProjectRenameQuery(self, assignments) - def project_away(self, *columns: StringType): + def project_away(self, *columns: StringType) -> 'ProjectAwayQuery': return ProjectAwayQuery(self, columns) - def distinct(self, *columns: BaseExpression): + def distinct(self, *columns: BaseExpression) -> 'DistinctQuery': return DistinctQuery(self, columns) - def distinct_all(self): + def distinct_all(self) -> 'DistinctQuery': return DistinctQuery(self, (AnyExpression(KQL("*")),)) def extend(self, *args: Union[BaseExpression, AssignmentBase], **kwargs: ExpressionType) -> 'ExtendQuery': @@ -159,17 +168,29 @@ class Query: return SummarizeQuery(self, assignments) def mv_expand(self, *columns: Union[Column, ColumnToType], bag_expansion: BagExpansion = None, with_item_index: Column = None, - limit: int = None): + limit: int = None) -> 'MvExpandQuery': if len(columns) == 0: raise ValueError("Please specify one or more columns for mv-expand") return MvExpandQuery(self, columns, bag_expansion, with_item_index, limit) - def custom(self, custom_query: str): + def custom(self, custom_query: str) -> 'CustomQuery': return CustomQuery(self, custom_query) - # TODO convert python types to kusto types - def evaluate(self, udf: FunctionType, type_spec_str: str): - return EvaluatePythonQuery(self, udf, type_spec_str) + def evaluate(self, plugin_name, *args: ExpressionType, distribution: Distribution = None) -> 'EvaluateQuery': + return EvaluateQuery(self, plugin_name, *args, distribution=distribution) + + def evaluate_udf(self, udf: FunctionType, extend: bool = True, distribution: Distribution = None, **type_specs: TypeName) -> 'EvaluateQuery': + return EvaluateQuery( + self, 'python', + BaseExpression(KQL('typeof({})'.format(('*, ' if extend else '') + ', '.join(field_name + ':' + type_name.value for field_name, type_name in type_specs.items())))), + stringify_python_func(udf), + distribution=distribution + ) + + def bag_unpack(self, col: Column, prefix: str = None) -> 'EvaluateQuery': + if prefix is None: + return EvaluateQuery(self, 'bag_unpack', col) + return EvaluateQuery(self, 'bag_unpack', col, prefix) @abstractmethod def _compile(self) -> KQL: @@ -199,7 +220,7 @@ class Query: logger.debug("Complied query: " + result) return result - def execute(self, table: Table = None): + def execute(self, table: Table = None) -> KustoResponseDataSet: if self.get_table() is None: if table is None: raise RuntimeError("No table supplied") @@ -510,17 +531,20 @@ class CustomQuery(Query): return KQL(self._custom_query) -class EvaluatePythonQuery(Query): - _udf: FunctionType - _type_specs: str +class EvaluateQuery(Query): + _plugin_name: str + _args: Tuple[ExpressionType] + _distribution: Distribution - def __init__(self, head: Query, udf: FunctionType, type_specs: str): - super(EvaluatePythonQuery, self).__init__(head) - self._udf = udf - self._type_specs = type_specs + def __init__(self, head: Query, plugin_name: str, *args: ExpressionType, distribution: Distribution = None): + super().__init__(head) + self._plugin_name = plugin_name + self._args = args + self._distribution = distribution def _compile(self) -> KQL: - return KQL('evaluate python({},"{}")'.format( - self._type_specs, - stringify_python_func(self._udf) + return KQL('evaluate {}{}({})'.format( + '' if self._distribution is None else 'hint.distribution={} '.format(self._distribution.value), + self._plugin_name, + ', '.join(to_kql(arg) for arg in self._args), ))
Azure/pykusto
bd061e1ddeb7631200b9ee8ff042ed3619b97a10
diff --git a/test/test_query.py b/test/test_query.py index ba09eaa..e5911ea 100644 --- a/test/test_query.py +++ b/test/test_query.py @@ -1,10 +1,11 @@ from pykusto import functions as f from pykusto.client import PyKustoClient from pykusto.expressions import column_generator as col -from pykusto.query import Query, Order, Nulls, JoinKind, JoinException, BagExpansion +from pykusto.query import Query, Order, Nulls, JoinKind, JoinException, BagExpansion, Distribution from pykusto.type_utils import TypeName from test.test_base import TestBase from test.test_table import MockKustoClient +from test.udf import func, STRINGIFIED class TestQuery(TestBase): @@ -290,14 +291,38 @@ class TestQuery(TestBase): Query().distinct_all().render(), ) + def test_evaluate(self): + self.assertEqual( + " | evaluate some_plugin(foo, 3)", + Query().evaluate('some_plugin', col.foo, 3).render(), + ) + + def test_evaluate_with_distribution(self): + self.assertEqual( + " | evaluate hint.distribution=per_shard some_plugin(foo, 3)", + Query().evaluate('some_plugin', col.foo, 3, distribution=Distribution.PER_SHARD).render(), + ) + def test_udf(self): - # noinspection PyGlobalUndefined - def func(): - global result - global df + self.assertEqual( + " | evaluate python(typeof(*, StateZone:string), {})".format(STRINGIFIED), + Query().evaluate_udf(func, StateZone=TypeName.STRING).render(), + ) - result = df - result['StateZone'] = result["State"] + result["Zone"] + def test_udf_no_extend(self): + self.assertEqual( + " | evaluate python(typeof(StateZone:string), {})".format(STRINGIFIED), + Query().evaluate_udf(func, extend=False, StateZone=TypeName.STRING).render(), + ) - # TODO assert - Query().evaluate(func, "typeof(*, StateZone: string)").render() + def test_bag_unpack(self): + self.assertEqual( + " | evaluate bag_unpack(foo)", + Query().bag_unpack(col.foo).render(), + ) + + def test_bag_unpack_with_prefix(self): + self.assertEqual( + ' | evaluate bag_unpack(foo, "bar_")', + Query().bag_unpack(col.foo, 'bar_').render(), + ) diff --git a/test/udf.py b/test/udf.py new file mode 100644 index 0000000..c0c7b02 --- /dev/null +++ b/test/udf.py @@ -0,0 +1,17 @@ +# Function for testing python evaluation plugin, with the result of stringification given below. +# Kept in a separate file because any change to the function (even whitespace) might cause the stringified text to change. + + +# noinspection PyGlobalUndefined +def func(): + global result + global df + + result = df + result['StateZone'] = result["State"] + result["Zone"] + + +STRINGIFIED = "\"from types import CodeType\\n" \ + "code=CodeType(0,0,0,3,67,b't\\\\x00a\\\\x01t\\\\x01d\\\\x01\\\\x19\\\\x00t\\\\x01d\\\\x02\\\\x19\\\\x00\\\\x17\\\\x00t\\\\x01d\\\\x03<\\\\x00d\\\\x00S\\\\x00'," \ + "(None, 'State', 'Zone', 'StateZone'),('df', 'result'),(),'{}','func',6,b'\\\\x00\\\\x04\\\\x04\\\\x01',(),())\\n" \ + "exec(code)\\n\"".format(__file__)
Allow using "evaluate" operator for plugins other than python
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_query.py::TestQuery::test_add_queries", "test/test_query.py::TestQuery::test_add_queries_with_table", "test/test_query.py::TestQuery::test_bag_unpack", "test/test_query.py::TestQuery::test_bag_unpack_with_prefix", "test/test_query.py::TestQuery::test_count", "test/test_query.py::TestQuery::test_custom", "test/test_query.py::TestQuery::test_distinct", "test/test_query.py::TestQuery::test_distinct_all", "test/test_query.py::TestQuery::test_evaluate", "test/test_query.py::TestQuery::test_evaluate_with_distribution", "test/test_query.py::TestQuery::test_extend", "test/test_query.py::TestQuery::test_extend_assign_to_multiple_columns", "test/test_query.py::TestQuery::test_extend_generate_column_name", "test/test_query.py::TestQuery::test_join_no_joined_table", "test/test_query.py::TestQuery::test_join_no_on", "test/test_query.py::TestQuery::test_join_with_table", "test/test_query.py::TestQuery::test_join_with_table_and_query", "test/test_query.py::TestQuery::test_limit", "test/test_query.py::TestQuery::test_mv_expand", "test/test_query.py::TestQuery::test_mv_expand_args", "test/test_query.py::TestQuery::test_mv_expand_no_args", "test/test_query.py::TestQuery::test_mv_expand_to_type", "test/test_query.py::TestQuery::test_no_params_for_sort", "test/test_query.py::TestQuery::test_order", "test/test_query.py::TestQuery::test_order_expression_in_arg", "test/test_query.py::TestQuery::test_project", "test/test_query.py::TestQuery::test_project_assign_to_multiple_columns", "test/test_query.py::TestQuery::test_project_away", "test/test_query.py::TestQuery::test_project_away_wildcard", "test/test_query.py::TestQuery::test_project_rename", "test/test_query.py::TestQuery::test_project_unspecified_column", "test/test_query.py::TestQuery::test_project_with_expression", "test/test_query.py::TestQuery::test_sample", "test/test_query.py::TestQuery::test_sanity", "test/test_query.py::TestQuery::test_sort", "test/test_query.py::TestQuery::test_sort_multiple_cols", "test/test_query.py::TestQuery::test_summarize", "test/test_query.py::TestQuery::test_summarize_by", "test/test_query.py::TestQuery::test_summarize_by_expression", "test/test_query.py::TestQuery::test_take", "test/test_query.py::TestQuery::test_top", "test/test_query.py::TestQuery::test_udf", "test/test_query.py::TestQuery::test_udf_no_extend", "test/test_query.py::TestQuery::test_where" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-02-12T10:37:03Z"
mit
BQSKit__bqskit-215
diff --git a/bqskit/ir/gates/composed/controlled.py b/bqskit/ir/gates/composed/controlled.py index 3ed8a15..0056e18 100644 --- a/bqskit/ir/gates/composed/controlled.py +++ b/bqskit/ir/gates/composed/controlled.py @@ -286,6 +286,33 @@ class ControlledGate(ComposedGate, DifferentiableUnitary): ctrl_U = np.kron(self.ctrl, U) + self.ihalf self._utry = UnitaryMatrix(ctrl_U, self.radixes) + @property + def qasm_name(self) -> str: + """ + Override default `Gate.qasm_name` method. + + If the core gate is a standard gate, this function will output + qasm in the form 'c+<gate_qasm>'. Otherwise an error will be raised. + + Raises: + ValueError: If the core gate is non-standard in OpenQASM 2.0. + """ + _core_gate = self.gate.qasm_name + if self.num_controls <= 2: + _controls = 'c' * self.num_controls + else: + _controls = f'c{self.num_controls}' + qasm_name = _controls + _core_gate + supported_gates = ('cu1', 'cu2', 'cu3', 'cswap', 'c3x', 'c4x') + if qasm_name not in supported_gates: + raise ValueError( + f'Controlled gate {_core_gate} with {self.num_controls} ' + 'controls is not a standard OpenQASM 2.0 identifier. ' + 'To encode this gate, try decomposing it into gates with' + 'standard identifiers.', + ) + return qasm_name + def get_unitary(self, params: RealVector = []) -> UnitaryMatrix: """Return the unitary for this gate, see :class:`Unitary` for more.""" if hasattr(self, '_utry'):
BQSKit/bqskit
1ee59f11da206c3b18667c7691aded816016c8ed
diff --git a/tests/ir/lang/test_controlled_qasm.py b/tests/ir/lang/test_controlled_qasm.py new file mode 100644 index 0000000..d399494 --- /dev/null +++ b/tests/ir/lang/test_controlled_qasm.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from bqskit.ir.lang.qasm2 import OPENQASM2Language + + +class TestControlledQASM: + def test_cu1(self) -> None: + + input_qasm = ( + 'OPENQASM 2.0;\n' + 'include "qelib1.inc";\n' + 'qreg q[2];\n' + 'cu1(3.1415) q[0], q[1];\n' + ) + circuit = OPENQASM2Language().decode(input_qasm) + + output_qasm = circuit.to('qasm') + + assert input_qasm == output_qasm + + def test_cu2(self) -> None: + + input_qasm = ( + 'OPENQASM 2.0;\n' + 'include "qelib1.inc";\n' + 'qreg q[2];\n' + 'cu2(3.1415, 0.0) q[0], q[1];\n' + ) + circuit = OPENQASM2Language().decode(input_qasm) + + output_qasm = circuit.to('qasm') + + assert input_qasm == output_qasm + + def test_cu3(self) -> None: + + input_qasm = ( + 'OPENQASM 2.0;\n' + 'include "qelib1.inc";\n' + 'qreg q[2];\n' + 'cu3(3.1415, 0.0, -4.0) q[0], q[1];\n' + ) + circuit = OPENQASM2Language().decode(input_qasm) + + output_qasm = circuit.to('qasm') + + assert input_qasm == output_qasm + + def test_cswap(self) -> None: + + input_qasm = ( + 'OPENQASM 2.0;\n' + 'include "qelib1.inc";\n' + 'qreg q[3];\n' + 'cswap q[0], q[1], q[2];\n' + ) + circuit = OPENQASM2Language().decode(input_qasm) + + output_qasm = circuit.to('qasm') + + assert input_qasm == output_qasm + + def test_c3x(self) -> None: + + input_qasm = ( + 'OPENQASM 2.0;\n' + 'include "qelib1.inc";\n' + 'qreg q[4];\n' + 'c3x q[0], q[1], q[2], q[3];\n' + ) + circuit = OPENQASM2Language().decode(input_qasm) + + output_qasm = circuit.to('qasm') + + assert input_qasm == output_qasm + + def test_c4x(self) -> None: + + input_qasm = ( + 'OPENQASM 2.0;\n' + 'include "qelib1.inc";\n' + 'qreg q[5];\n' + 'c4x q[0], q[1], q[2], q[3], q[4];\n' + ) + circuit = OPENQASM2Language().decode(input_qasm) + + output_qasm = circuit.to('qasm') + + assert input_qasm == output_qasm + + def test_ch(self) -> None: + + input_qasm = ( + 'OPENQASM 2.0;\n' + 'include "qelib1.inc";\n' + 'qreg q[2];\n' + 'ch q[0], q[1];\n' + ) + try: + OPENQASM2Language().decode(input_qasm) + except ValueError: + assert True
Is it possible to get the QASM string representation of a BQSKit circuit? Hi, Is it possible to get the QASM string representation of a BQSKit circuit?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/ir/lang/test_controlled_qasm.py::TestControlledQASM::test_cu1", "tests/ir/lang/test_controlled_qasm.py::TestControlledQASM::test_cu2", "tests/ir/lang/test_controlled_qasm.py::TestControlledQASM::test_cu3", "tests/ir/lang/test_controlled_qasm.py::TestControlledQASM::test_cswap", "tests/ir/lang/test_controlled_qasm.py::TestControlledQASM::test_c3x", "tests/ir/lang/test_controlled_qasm.py::TestControlledQASM::test_c4x" ]
[ "tests/ir/lang/test_controlled_qasm.py::TestControlledQASM::test_ch" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2024-01-17T20:21:45Z"
bsd-3-clause
Bachmann1234__diff_cover-210
diff --git a/diff_cover/diff_cover_tool.py b/diff_cover/diff_cover_tool.py index 2894013..e74a453 100644 --- a/diff_cover/diff_cover_tool.py +++ b/diff_cover/diff_cover_tool.py @@ -60,9 +60,7 @@ def parse_coverage_args(argv): parser.add_argument("coverage_xml", type=str, help=COVERAGE_XML_HELP, nargs="+") - output_format = parser.add_mutually_exclusive_group() - - output_format.add_argument( + parser.add_argument( "--html-report", metavar="FILENAME", type=str, @@ -70,7 +68,7 @@ def parse_coverage_args(argv): help=HTML_REPORT_HELP, ) - output_format.add_argument( + parser.add_argument( "--json-report", metavar="FILENAME", type=str, @@ -78,7 +76,7 @@ def parse_coverage_args(argv): help=JSON_REPORT_HELP, ) - output_format.add_argument( + parser.add_argument( "--markdown-report", metavar="FILENAME", type=str, @@ -86,7 +84,7 @@ def parse_coverage_args(argv): help=MARKDOWN_REPORT_HELP, ) - output_format.add_argument( + parser.add_argument( "--show-uncovered", action="store_true", default=False, help=SHOW_UNCOVERED ) @@ -199,12 +197,12 @@ def generate_coverage_report( with open(css_file, "wb") as output_file: reporter.generate_css(output_file) - elif json_report is not None: + if json_report is not None: reporter = JsonReportGenerator(coverage, diff) with open(json_report, "wb") as output_file: reporter.generate_report(output_file) - elif markdown_report is not None: + if markdown_report is not None: reporter = MarkdownReportGenerator(coverage, diff) with open(markdown_report, "wb") as output_file: reporter.generate_report(output_file)
Bachmann1234/diff_cover
5f7aeea8b95441f8286a38524ce0234e1716e304
diff --git a/diff_cover/tests/test_diff_cover_tool.py b/diff_cover/tests/test_diff_cover_tool.py index afe8f77..b6f26f7 100644 --- a/diff_cover/tests/test_diff_cover_tool.py +++ b/diff_cover/tests/test_diff_cover_tool.py @@ -7,27 +7,47 @@ from diff_cover.diff_cover_tool import parse_coverage_args def test_parse_with_html_report(): argv = ["reports/coverage.xml", "--html-report", "diff_cover.html"] - arg_dict = parse_coverage_args(argv) assert arg_dict.get("coverage_xml") == ["reports/coverage.xml"] - assert arg_dict.get("html_report") == "diff_cover.html" + assert arg_dict.get("markdown_report") is None + assert arg_dict.get("json_report") is None assert not arg_dict.get("ignore_unstaged") -def test_parse_with_no_html_report(): +def test_parse_with_no_report(): argv = ["reports/coverage.xml"] + arg_dict = parse_coverage_args(argv) + + assert arg_dict.get("coverage_xml") == ["reports/coverage.xml"] + assert arg_dict.get("html_report") is None + assert arg_dict.get("markdown_report") is None + assert arg_dict.get("json_report") is None + assert not arg_dict.get("ignore_unstaged") + +def test_parse_with_multiple_reports(): + argv = [ + "reports/coverage.xml", + "--html-report", + "report.html", + "--markdown-report", + "report.md", + ] arg_dict = parse_coverage_args(argv) + assert arg_dict.get("coverage_xml") == ["reports/coverage.xml"] + assert arg_dict.get("html_report") == "report.html" + assert arg_dict.get("markdown_report") == "report.md" + assert arg_dict.get("json_report") is None assert not arg_dict.get("ignore_unstaged") def test_parse_with_ignored_unstaged(): argv = ["reports/coverage.xml", "--ignore-unstaged"] - arg_dict = parse_coverage_args(argv) + assert arg_dict.get("ignore_unstaged") @@ -46,11 +66,9 @@ def test_parse_with_exclude(): assert arg_dict.get("exclude") is None argv = ["reports/coverage.xml", "--exclude", "noneed/*.py"] - arg_dict = parse_coverage_args(argv) assert arg_dict.get("exclude") == ["noneed/*.py"] argv = ["reports/coverage.xml", "--exclude", "noneed/*.py", "other/**/*.py"] - arg_dict = parse_coverage_args(argv) assert arg_dict.get("exclude") == ["noneed/*.py", "other/**/*.py"]
why HTML&JSON reports are prohibited and why they cannot be generated at the same time Want to know why HTML&JSON reports are prohibited and why they cannot be generated at the same time `output_format = parser.add_mutually_exclusive_group()`
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "diff_cover/tests/test_diff_cover_tool.py::test_parse_with_multiple_reports" ]
[ "diff_cover/tests/test_diff_cover_tool.py::test_parse_with_no_report", "diff_cover/tests/test_diff_cover_tool.py::test_parse_with_exclude", "diff_cover/tests/test_diff_cover_tool.py::test_parse_with_html_report", "diff_cover/tests/test_diff_cover_tool.py::test_parse_invalid_arg", "diff_cover/tests/test_diff_cover_tool.py::test_parse_with_ignored_unstaged" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-06-29T18:54:17Z"
apache-2.0
Bachmann1234__diff_cover-235
diff --git a/README.rst b/README.rst index 82cc9b5..1dd50fc 100644 --- a/README.rst +++ b/README.rst @@ -246,6 +246,40 @@ It can be enabled by using the ``-q``/``--quiet`` flag: If enabled, the tool will only print errors and failures but no information or warning messages. +Configuration files +------------------- +Both tools allow users to specify the options in a configuration file with `--config-file`/`-c`: + +.. code:: bash + + diff-cover coverage.xml --config-file myconfig.toml + diff-quality --violations=pycodestyle --config-file myconfig.toml + +Currently, only TOML files are supported. +Please note, that only non-mandatory options are supported. +If an option is specified in the configuration file and over the command line, the value of the +command line is used. + +TOML configuration +~~~~~~~~~~~~~~~~~~ + +The parser will only react to configuration files ending with `.toml`. +To use it, install `diff-cover` with the extra requirement `toml`. + +The option names are the same as on the command line, but all dashes should be underscores. +If an option can be specified multiple times, the configuration value should be specified as a list. + +.. code:: toml + + [tool.diff_quality] + compare_branch = "origin/feature" + quiet = true + + [tool.diff_quality] + compare_branch = "origin/feature" + ignore_staged = true + + Troubleshooting ---------------------- diff --git a/diff_cover/config_parser.py b/diff_cover/config_parser.py new file mode 100644 index 0000000..2621f4a --- /dev/null +++ b/diff_cover/config_parser.py @@ -0,0 +1,82 @@ +import abc +import enum + +try: + import tomli + + _HAS_TOML = True +except ImportError: # pragma: no cover + _HAS_TOML = False + + +class Tool(enum.Enum): + DIFF_COVER = enum.auto() + DIFF_QUALITY = enum.auto() + + +class ParserError(Exception): + pass + + +class ConfigParser(abc.ABC): + def __init__(self, file_name, tool): + self._file_name = file_name + self._tool = tool + + @abc.abstractmethod + def parse(self): + """Returns a dict of the parsed data or None if the file cannot be handled.""" + + +class TOMLParser(ConfigParser): + def __init__(self, file_name, tool): + super().__init__(file_name, tool) + self._section = "diff_cover" if tool == Tool.DIFF_COVER else "diff_quality" + + def parse(self): + if not self._file_name.endswith(".toml"): + return None + + if not _HAS_TOML: + raise ParserError("tomli is not installed") + + with open(self._file_name, "rb") as file_handle: + config = tomli.load(file_handle) + + config = config.get("tool", {}).get(self._section, {}) + if not config: + raise ParserError(f"No 'tool.{self._section}' configuration available") + return config + + +_PARSERS = [TOMLParser] + + +def _parse_config_file(file_name, tool): + for parser_class in _PARSERS: + parser = parser_class(file_name, tool) + config = parser.parse() + if config: + return config + + raise ParserError(f"No config parser could handle {file_name}") + + +def get_config(parser, argv, defaults, tool): + cli_config = vars(parser.parse_args(argv)) + if cli_config["config_file"]: + file_config = _parse_config_file(cli_config["config_file"], tool) + else: + file_config = {} + + config = defaults + for config_dict in [file_config, cli_config]: + for key, value in config_dict.items(): + if value is None: + # if the value is None, it's a default one; only override if not present + config.setdefault(key, value) + else: + # else just override the existing value + config[key] = value + + return config diff --git a/diff_cover/diff_cover_tool.py b/diff_cover/diff_cover_tool.py index fc660ae..1e88187 100644 --- a/diff_cover/diff_cover_tool.py +++ b/diff_cover/diff_cover_tool.py @@ -6,6 +6,7 @@ import sys import xml.etree.ElementTree as etree from diff_cover import DESCRIPTION, VERSION +from diff_cover.config_parser import Tool, get_config from diff_cover.diff_reporter import GitDiffReporter from diff_cover.git_diff import GitDiffTool from diff_cover.git_path import GitPathTool @@ -37,6 +38,7 @@ DIFF_RANGE_NOTATION_HELP = ( QUIET_HELP = "Only print errors and failures" SHOW_UNCOVERED = "Show uncovered lines on the console" INCLUDE_UNTRACKED_HELP = "Include untracked files" +CONFIG_FILE_HELP = "The configuration file to use" LOGGER = logging.getLogger(__name__) @@ -65,7 +67,6 @@ def parse_coverage_args(argv): "--html-report", metavar="FILENAME", type=str, - default=None, help=HTML_REPORT_HELP, ) @@ -73,7 +74,6 @@ def parse_coverage_args(argv): "--json-report", metavar="FILENAME", type=str, - default=None, help=JSON_REPORT_HELP, ) @@ -81,19 +81,17 @@ def parse_coverage_args(argv): "--markdown-report", metavar="FILENAME", type=str, - default=None, help=MARKDOWN_REPORT_HELP, ) parser.add_argument( - "--show-uncovered", action="store_true", default=False, help=SHOW_UNCOVERED + "--show-uncovered", action="store_true", default=None, help=SHOW_UNCOVERED ) parser.add_argument( "--external-css-file", metavar="FILENAME", type=str, - default=None, help=CSS_FILE_HELP, ) @@ -101,7 +99,6 @@ def parse_coverage_args(argv): "--compare-branch", metavar="BRANCH", type=str, - default="origin/main", help=COMPARE_BRANCH_HELP, ) @@ -110,20 +107,20 @@ def parse_coverage_args(argv): ) parser.add_argument( - "--ignore-staged", action="store_true", default=False, help=IGNORE_STAGED_HELP + "--ignore-staged", action="store_true", default=None, help=IGNORE_STAGED_HELP ) parser.add_argument( "--ignore-unstaged", action="store_true", - default=False, + default=None, help=IGNORE_UNSTAGED_HELP, ) parser.add_argument( "--include-untracked", action="store_true", - default=False, + default=None, help=INCLUDE_UNTRACKED_HELP, ) @@ -136,7 +133,6 @@ def parse_coverage_args(argv): metavar="DIRECTORY", type=str, nargs="+", - default=["src/main/java", "src/test/java"], help=SRC_ROOTS_HELP, ) @@ -144,7 +140,6 @@ def parse_coverage_args(argv): "--diff-range-notation", metavar="RANGE_NOTATION", type=str, - default="...", choices=["...", ".."], help=DIFF_RANGE_NOTATION_HELP, ) @@ -154,13 +149,32 @@ def parse_coverage_args(argv): parser.add_argument( "--ignore-whitespace", action="store_true", - default=False, + default=None, help=IGNORE_WHITESPACE, ) - parser.add_argument("-q", "--quiet", action="store_true", help=QUIET_HELP) + parser.add_argument( + "-q", "--quiet", action="store_true", default=None, help=QUIET_HELP + ) + + parser.add_argument( + "-c", "--config-file", help=CONFIG_FILE_HELP, metavar="CONFIG_FILE" + ) - return vars(parser.parse_args(argv)) + defaults = { + "show_uncovered": False, + "compare_branch": "origin/main", + "fail_under": "0", + "ignore_staged": False, + "ignore_unstaged": False, + "ignore_untracked": False, + "src_roots": ["src/main/java", "src/test/java"], + "ignore_whitespace": False, + "diff_range_notation": "...", + "quiet": False, + } + + return get_config(parser=parser, argv=argv, defaults=defaults, tool=Tool.DIFF_COVER) def generate_coverage_report( diff --git a/diff_cover/diff_quality_tool.py b/diff_cover/diff_quality_tool.py index b9d3e37..29663e5 100644 --- a/diff_cover/diff_quality_tool.py +++ b/diff_cover/diff_quality_tool.py @@ -12,8 +12,10 @@ import pluggy import diff_cover from diff_cover import hookspecs +from diff_cover.config_parser import Tool, get_config from diff_cover.diff_cover_tool import ( COMPARE_BRANCH_HELP, + CONFIG_FILE_HELP, CSS_FILE_HELP, DIFF_RANGE_NOTATION_HELP, EXCLUDE_HELP, @@ -103,7 +105,6 @@ def parse_quality_args(argv): "--html-report", metavar="FILENAME", type=str, - default=None, help=HTML_REPORT_HELP, ) @@ -111,7 +112,6 @@ def parse_quality_args(argv): "--json-report", metavar="FILENAME", type=str, - default=None, help=JSON_REPORT_HELP, ) @@ -119,7 +119,6 @@ def parse_quality_args(argv): "--markdown-report", metavar="FILENAME", type=str, - default=None, help=MARKDOWN_REPORT_HELP, ) @@ -127,7 +126,6 @@ def parse_quality_args(argv): "--external-css-file", metavar="FILENAME", type=str, - default=None, help=CSS_FILE_HELP, ) @@ -135,37 +133,32 @@ def parse_quality_args(argv): "--compare-branch", metavar="BRANCH", type=str, - default="origin/main", help=COMPARE_BRANCH_HELP, ) - parser.add_argument( - "input_reports", type=str, nargs="*", default=[], help=INPUT_REPORTS_HELP - ) + parser.add_argument("input_reports", type=str, nargs="*", help=INPUT_REPORTS_HELP) - parser.add_argument( - "--options", type=str, nargs="?", default=None, help=OPTIONS_HELP - ) + parser.add_argument("--options", type=str, nargs="?", help=OPTIONS_HELP) parser.add_argument( - "--fail-under", metavar="SCORE", type=float, default="0", help=FAIL_UNDER_HELP + "--fail-under", metavar="SCORE", type=float, help=FAIL_UNDER_HELP ) parser.add_argument( - "--ignore-staged", action="store_true", default=False, help=IGNORE_STAGED_HELP + "--ignore-staged", action="store_true", default=None, help=IGNORE_STAGED_HELP ) parser.add_argument( "--ignore-unstaged", action="store_true", - default=False, + default=None, help=IGNORE_UNSTAGED_HELP, ) parser.add_argument( "--include-untracked", action="store_true", - default=False, + default=None, help=INCLUDE_UNTRACKED_HELP, ) @@ -181,7 +174,6 @@ def parse_quality_args(argv): "--diff-range-notation", metavar="RANGE_NOTATION", type=str, - default="...", help=DIFF_RANGE_NOTATION_HELP, ) @@ -193,13 +185,33 @@ def parse_quality_args(argv): parser.add_argument( "--ignore-whitespace", action="store_true", - default=False, + default=None, help=IGNORE_WHITESPACE, ) - parser.add_argument("-q", "--quiet", action="store_true", help=QUIET_HELP) + parser.add_argument( + "-q", "--quiet", action="store_true", default=None, help=QUIET_HELP + ) + + parser.add_argument( + "-c", "--config-file", help=CONFIG_FILE_HELP, metavar="CONFIG_FILE" + ) - return vars(parser.parse_args(argv)) + defaults = { + "ignore_whitespace": False, + "compare_branch": "origin/main", + "diff_range_notation": "...", + "input_reports": [], + "fail_under": 0, + "ignore_staged": False, + "ignore_unstaged": False, + "ignore_untracked": False, + "quiet": False, + } + + return get_config( + parser=parser, argv=argv, defaults=defaults, tool=Tool.DIFF_QUALITY + ) def generate_quality_report( diff --git a/poetry.lock b/poetry.lock index 9b11ba7..98838a3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -37,10 +37,10 @@ optional = false python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" [package.extras] -dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit"] docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface"] tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins"] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit"] [[package]] name = "black" @@ -62,10 +62,10 @@ typed-ast = {version = ">=1.4.2", markers = "python_version < \"3.8\""} typing-extensions = {version = ">=3.7.4", markers = "python_version < \"3.8\""} [package.extras] -colorama = ["colorama (>=0.4.3)"] -d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"] python2 = ["typed-ast (>=1.4.2)"] +colorama = ["colorama (>=0.4.3)"] uvloop = ["uvloop (>=0.15.2)"] +d = ["aiohttp (>=3.6.0)", "aiohttp-cors (>=0.4.0)"] [[package]] name = "chardet" @@ -164,8 +164,8 @@ zipp = ">=0.5" [package.extras] docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] -perf = ["ipython"] testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pep517", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy", "importlib-resources (>=1.3)"] +perf = ["ipython"] [[package]] name = "inflect" @@ -196,9 +196,9 @@ optional = false python-versions = ">=3.6.1,<4.0" [package.extras] +colors = ["colorama (>=0.4.3,<0.5.0)"] pipfile_deprecated_finder = ["pipreqs", "requirementslib"] requirements_deprecated_finder = ["pipreqs", "pip-api"] -colors = ["colorama (>=0.4.3,<0.5.0)"] plugins = ["setuptools"] [[package]] @@ -535,10 +535,13 @@ python-versions = ">=3.6" docs = ["sphinx", "jaraco.packaging (>=8.2)", "rst.linker (>=1.9)"] testing = ["pytest (>=4.6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy"] +[extras] +toml = ["tomli"] + [metadata] lock-version = "1.1" python-versions = "^3.6.2" -content-hash = "0b95c193832e8a28f469376c38f6d26f7a7cad6ce35bf557ffc5084fb6c30d1e" +content-hash = "adaa725397777a11ed97ecaafb0f963c2ea1c246359ba729fbc2c79c8d752b37" [metadata.files] appdirs = [ diff --git a/pyproject.toml b/pyproject.toml index 1e4aa60..37cd898 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -62,6 +62,9 @@ black = "^21.7b0" isort = "^5.9.3" doc8 = "0.9.0" +[tool.poetry.extras] +toml = ["tomli"] + [build-system] requires = ["poetry-core>=1.0.0"] build-backend = "poetry.core.masonry.api"
Bachmann1234/diff_cover
d409f0ed01b47bd10522d9f55d450b5689945d1a
diff --git a/tests/test_config_parser.py b/tests/test_config_parser.py new file mode 100644 index 0000000..eb0e300 --- /dev/null +++ b/tests/test_config_parser.py @@ -0,0 +1,100 @@ +import pytest + +from diff_cover import config_parser +from diff_cover.config_parser import ParserError, TOMLParser, Tool, get_config + +tools = pytest.mark.parametrize("tool", list(Tool)) + + +class TestTOMLParser: + @tools + def test_parse_no_toml_file(self, tool): + parser = TOMLParser("myfile", tool) + assert parser.parse() is None + + @tools + def test_parse_but_no_tomli_installed(self, tool, mocker): + mocker.patch.object(config_parser, "_HAS_TOML", False) + parser = TOMLParser("myfile.toml", tool) + with pytest.raises(ParserError): + parser.parse() + + @pytest.mark.parametrize( + "tool,content", + [ + (Tool.DIFF_COVER, ""), + (Tool.DIFF_COVER, "[tool.diff_quality]"), + (Tool.DIFF_QUALITY, ""), + (Tool.DIFF_COVER, "[tool.diff_cover]"), + ], + ) + def test_parse_but_no_data(self, tool, content, tmp_path): + toml_file = tmp_path / "foo.toml" + toml_file.write_text(content) + + parser = TOMLParser(str(toml_file), tool) + with pytest.raises(ParserError): + parser.parse() + + @pytest.mark.parametrize( + "tool,content,expected", + [ + (Tool.DIFF_COVER, "[tool.diff_cover]\nquiet=true", {"quiet": True}), + (Tool.DIFF_QUALITY, "[tool.diff_quality]\nquiet=true", {"quiet": True}), + ], + ) + def test_parse(self, tool, content, tmp_path, expected): + toml_file = tmp_path / "foo.toml" + toml_file.write_text(content) + + parser = TOMLParser(str(toml_file), tool) + assert parser.parse() == expected + + +@tools +def test_get_config_unrecognized_file(mocker, tool): + parser = mocker.Mock() + parser.parse_args().__dict__ = {"config_file": "foo.bar"} + with pytest.raises(ParserError): + get_config(parser, argv=[], defaults={}, tool=tool) + + +@pytest.mark.parametrize( + "tool,cli_config,defaults,file_content,expected", + [ + ( + Tool.DIFF_COVER, + {"a": 2, "b": None, "c": None}, + {"a": 4, "b": 3}, + None, + {"a": 2, "b": 3, "c": None}, + ), + ( + Tool.DIFF_QUALITY, + {"a": 2, "b": None, "c": None}, + {"a": 4, "b": 3}, + None, + {"a": 2, "b": 3, "c": None}, + ), + ( + Tool.DIFF_COVER, + {"a": 2, "b": None, "c": None, "d": None}, + {"a": 4, "b": 3}, + "[tool.diff_cover]\na=1\nd=6", + {"a": 2, "b": 3, "c": None, "d": 6}, + ), + ], +) +def test_get_config( + mocker, tmp_path, tool, cli_config, defaults, file_content, expected +): + if file_content: + toml_file = tmp_path / "foo.toml" + toml_file.write_text(file_content) + cli_config["config_file"] = expected["config_file"] = str(toml_file) + else: + cli_config["config_file"] = expected["config_file"] = None + + parser = mocker.Mock() + parser.parse_args().__dict__ = cli_config + assert get_config(parser, argv=[], defaults=defaults, tool=tool) == expected
Support configuration files ### Current state All options are given over the CLI which works fine for the most use cases ### My suggestion In some use cases it's better to use configuration files to define options. In addition, these options can be overwritten with options given to the CLI. ### Implementation Personally, I would first create a configuration file reader (abstract so it can handle other files in the future). Then I would add a TOML parser (pyproject.toml), because this seems to be the go-to way. Example toml ```toml [tool.diff_cover] compare_branch = "origin/foo" fail_under = 42 [tool.diff_quality] fail_under = 23 include = [ "project/foo/**" ] ``` So, the toml file: - uses both tools with their name - same options as on CLI, but with underscores - if an option can be specified multiple times, the value is a list ### Flow 1. Check if the user specifies a configuration file over CLI; if yes, use that (`--config myfile.toml`) 2. If no, then check if a pyproject.toml exists and check if it contains any section which belongs to us **(do we want to have a magic file name?)** 3. Afterwards merge all configuration values with the given CLI parameters (CLI overwrite config) 4. Proceed as normal
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_config_parser.py::TestTOMLParser::test_parse_no_toml_file[Tool.DIFF_COVER]", "tests/test_config_parser.py::TestTOMLParser::test_parse_no_toml_file[Tool.DIFF_QUALITY]", "tests/test_config_parser.py::TestTOMLParser::test_parse_but_no_tomli_installed[Tool.DIFF_COVER]", "tests/test_config_parser.py::TestTOMLParser::test_parse_but_no_tomli_installed[Tool.DIFF_QUALITY]", "tests/test_config_parser.py::TestTOMLParser::test_parse_but_no_data[Tool.DIFF_COVER-]", "tests/test_config_parser.py::TestTOMLParser::test_parse_but_no_data[Tool.DIFF_COVER-[tool.diff_quality]]", "tests/test_config_parser.py::TestTOMLParser::test_parse_but_no_data[Tool.DIFF_QUALITY-]", "tests/test_config_parser.py::TestTOMLParser::test_parse_but_no_data[Tool.DIFF_COVER-[tool.diff_cover]]", "tests/test_config_parser.py::TestTOMLParser::test_parse[Tool.DIFF_COVER-[tool.diff_cover]\\nquiet=true-expected0]", "tests/test_config_parser.py::TestTOMLParser::test_parse[Tool.DIFF_QUALITY-[tool.diff_quality]\\nquiet=true-expected1]", "tests/test_config_parser.py::test_get_config_unrecognized_file[Tool.DIFF_COVER]", "tests/test_config_parser.py::test_get_config_unrecognized_file[Tool.DIFF_QUALITY]", "tests/test_config_parser.py::test_get_config[Tool.DIFF_COVER-cli_config0-defaults0-None-expected0]", "tests/test_config_parser.py::test_get_config[Tool.DIFF_QUALITY-cli_config1-defaults1-None-expected1]", "tests/test_config_parser.py::test_get_config[Tool.DIFF_COVER-cli_config2-defaults2-[tool.diff_cover]\\na=1\\nd=6-expected2]" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2021-08-10T12:14:09Z"
apache-2.0
Bachmann1234__diff_cover-277
diff --git a/diff_cover/violationsreporters/violations_reporter.py b/diff_cover/violationsreporters/violations_reporter.py index c61aff6..75ae51f 100644 --- a/diff_cover/violationsreporters/violations_reporter.py +++ b/diff_cover/violationsreporters/violations_reporter.py @@ -402,7 +402,7 @@ class PylintDriver(QualityDriver): # Match lines of the form: # path/to/file.py:123: [C0111] Missing docstring # path/to/file.py:456: [C0111, Foo.bar] Missing docstring - self.multi_line_violation_regex = re.compile(r"==(\w|.+):(.*)") + self.multi_line_violation_regex = re.compile(r"==((?:\w|\.)+?):\[?(\d+)") self.dupe_code_violation_regex = re.compile(r"Similar lines in (\d+) files") def _process_dupe_code_violation(self, lines, current_line, message):
Bachmann1234/diff_cover
d9efc6c0e5f1c94a25d0772d149d08ecb2542aa1
diff --git a/tests/test_violations_reporter.py b/tests/test_violations_reporter.py index a4d4adc..927acbd 100644 --- a/tests/test_violations_reporter.py +++ b/tests/test_violations_reporter.py @@ -1279,6 +1279,11 @@ class TestPylintQualityReporterTest: import json import logging import random + file2.py:170: [R0801] Similar lines in 2 files + ==file1:[170:172] + ==student.views:[4:6] + import foo + import bar path/to/file2.py:100: [W0212, openid_login_complete] Access to a protected member """ ) @@ -1300,6 +1305,7 @@ class TestPylintQualityReporterTest: ), Violation(149, "C0324: Foo.__dict__: Comma not followed by a space"), Violation(162, "R0801: Similar lines in 2 files"), + Violation(170, "R0801: Similar lines in 2 files"), Violation(113, "W0613: cache_relation.clear_pk: Unused argument 'cls'"), ]
diff-quality fails with ValueError: invalid literal for int() with base 10: '470]' When running `diff-quality` tool I have the following exception: ``` Traceback (most recent call last): File "/usr/local/bin/diff-quality", line 8, in <module> sys.exit(main()) File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/diff_quality_tool.py", line 348, in main percent_passing = generate_quality_report( File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/diff_quality_tool.py", line 261, in generate_quality_report reporter.generate_report(output_file) File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/report_generator.py", line 265, in generate_report report = template.render(self._context()) File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/report_generator.py", line 314, in _context context = super().report_dict() File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/report_generator.py", line 187, in report_dict src_stats = {src: self._src_path_stats(src) for src in self.src_paths()} File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/report_generator.py", line 84, in src_paths for src, summary in self._diff_violations().items() File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/report_generator.py", line 176, in _diff_violations self._diff_violations_dict = { File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/report_generator.py", line 178, in <dictcomp> self._violations.violations(src_path), File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/violationsreporters/base.py", line 159, in violations self.violations_dict.update(self.driver.parse_reports([output])) File "/home/jenkins/.local/lib/python3.8/site-packages/diff_cover/violationsreporters/violations_reporter.py", line 469, in parse_reports violation = Violation(int(line_number), error_str) ValueError: invalid literal for int() with base 10: '470]' ``` The reason for this is the following: `violations_reporter` expects, that the output for duplicating lines looks like: ``` file1.py:162: [R0801] Similar lines in 2 files ==file1:162 ==student.views:4 ``` but it can look like: ``` file1.py:162: [R0801] Similar lines in 2 files ==file1:[162:165] ==student.views:[4:7] ``` And the [`multi_line_violation_regex`](https://github.com/Bachmann1234/diff_cover/blob/61a0ae286a54e7fcbacf063d339ddace6fd84155/diff_cover/violationsreporters/violations_reporter.py#L405) regular expression `==(\w|.+):(.*)` cannot parse it correctly. The correct regular expression is `==((?:\w|\.)+?):\[?(\d+)`.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_quality" ]
[ "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_violations", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_non_python_violations", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_non_python_violations_empty_path", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_two_inputs_first_violate", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_two_inputs_second_violate", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_three_inputs", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_different_files_in_inputs", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_empty_violations", "tests/test_violations_reporter.py::TestXmlCoverageReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestCloverXmlCoverageReporterTest::test_violations", "tests/test_violations_reporter.py::TestCloverXmlCoverageReporterTest::test_two_inputs_first_violate", "tests/test_violations_reporter.py::TestCloverXmlCoverageReporterTest::test_two_inputs_second_violate", "tests/test_violations_reporter.py::TestCloverXmlCoverageReporterTest::test_three_inputs", "tests/test_violations_reporter.py::TestCloverXmlCoverageReporterTest::test_different_files_in_inputs", "tests/test_violations_reporter.py::TestCloverXmlCoverageReporterTest::test_empty_violations", "tests/test_violations_reporter.py::TestCloverXmlCoverageReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestJacocoXmlCoverageReporterTest::test_violations", "tests/test_violations_reporter.py::TestJacocoXmlCoverageReporterTest::test_two_inputs_first_violate", "tests/test_violations_reporter.py::TestJacocoXmlCoverageReporterTest::test_two_inputs_second_violate", "tests/test_violations_reporter.py::TestJacocoXmlCoverageReporterTest::test_three_inputs", "tests/test_violations_reporter.py::TestJacocoXmlCoverageReporterTest::test_different_files_in_inputs", "tests/test_violations_reporter.py::TestJacocoXmlCoverageReporterTest::test_empty_violations", "tests/test_violations_reporter.py::TestJacocoXmlCoverageReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestPycodestyleQualityReporterTest::test_quality", "tests/test_violations_reporter.py::TestPycodestyleQualityReporterTest::test_no_quality_issues_newline", "tests/test_violations_reporter.py::TestPycodestyleQualityReporterTest::test_no_quality_issues_emptystring", "tests/test_violations_reporter.py::TestPycodestyleQualityReporterTest::test_quality_error", "tests/test_violations_reporter.py::TestPycodestyleQualityReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestPycodestyleQualityReporterTest::test_no_python_file", "tests/test_violations_reporter.py::TestPycodestyleQualityReporterTest::test_quality_pregenerated_report", "tests/test_violations_reporter.py::TestPyflakesQualityReporterTest::test_quality", "tests/test_violations_reporter.py::TestPyflakesQualityReporterTest::test_no_quality_issues_newline", "tests/test_violations_reporter.py::TestPyflakesQualityReporterTest::test_no_quality_issues_emptystring", "tests/test_violations_reporter.py::TestPyflakesQualityReporterTest::test_quality_error", "tests/test_violations_reporter.py::TestPyflakesQualityReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestPyflakesQualityReporterTest::test_no_python_file", "tests/test_violations_reporter.py::TestPyflakesQualityReporterTest::test_quality_pregenerated_report", "tests/test_violations_reporter.py::TestFlake8QualityReporterTest::test_quality", "tests/test_violations_reporter.py::TestFlake8QualityReporterTest::test_no_quality_issues_newline", "tests/test_violations_reporter.py::TestFlake8QualityReporterTest::test_no_quality_issues_emptystring", "tests/test_violations_reporter.py::TestFlake8QualityReporterTest::test_quality_error", "tests/test_violations_reporter.py::TestFlake8QualityReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestFlake8QualityReporterTest::test_no_python_file", "tests/test_violations_reporter.py::TestFlake8QualityReporterTest::test_quality_pregenerated_report", "tests/test_violations_reporter.py::TestPydocstlyeQualityReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestPydocstlyeQualityReporterTest::test_no_python_file", "tests/test_violations_reporter.py::TestPydocstlyeQualityReporterTest::test_quality", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_no_python_file", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_unicode", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_unicode_continuation_char", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_non_integer_line_num", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_quality_deprecation_warning", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_quality_error", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_no_quality_issues_newline", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_no_quality_issues_emptystring", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_quality_pregenerated_report", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_quality_pregenerated_report_continuation_char", "tests/test_violations_reporter.py::TestPylintQualityReporterTest::test_windows_paths", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_quality", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_no_quality_issues_newline", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_no_quality_issues_emptystring", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_quality_error", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_no_js_file", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_quality_pregenerated_report", "tests/test_violations_reporter.py::TestJsHintQualityReporterTest::test_not_installed", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_quality", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_no_quality_issues_newline", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_no_quality_issues_emptystring", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_quality_error", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_no_such_file", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_no_js_file", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_quality_pregenerated_report", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_not_installed", "tests/test_violations_reporter.py::TestESLintQualityReporterTest::test_report_root_path", "tests/test_violations_reporter.py::TestSimpleCommandTestCase::test_run_simple_failure", "tests/test_violations_reporter.py::TestSimpleCommandTestCase::test_run_simple_success", "tests/test_violations_reporter.py::TestSubprocessErrorTestCase::test_quality_reporter", "tests/test_violations_reporter.py::TestCppcheckQualityDriverTest::test_parse_report" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2022-04-14T15:38:43Z"
apache-2.0
Bachmann1234__diff_cover-300
diff --git a/README.rst b/README.rst index f184bc5..6c8180b 100644 --- a/README.rst +++ b/README.rst @@ -64,7 +64,8 @@ To install the development version: git clone https://github.com/Bachmann1234/diff-cover.git cd diff-cover - python setup.py install + poetry install + poetry shell Getting Started diff --git a/diff_cover/report_generator.py b/diff_cover/report_generator.py index b77a182..35fa06f 100644 --- a/diff_cover/report_generator.py +++ b/diff_cover/report_generator.py @@ -172,15 +172,29 @@ class BaseReportGenerator(ABC): To make this efficient, we cache and reuse the result. """ + src_paths_changed = self._diff.src_paths_changed() if not self._diff_violations_dict: - self._diff_violations_dict = { - src_path: DiffViolations( - self._violations.violations(src_path), - self._violations.measured_lines(src_path), - self._diff.lines_changed(src_path), + try: + violations = self._violations.violations_batch( + src_paths_changed ) - for src_path in self._diff.src_paths_changed() - } + self._diff_violations_dict = { + src_path: DiffViolations( + violations.get(src_path, []), + self._violations.measured_lines(src_path), + self._diff.lines_changed(src_path), + ) + for src_path in src_paths_changed + } + except NotImplementedError: + self._diff_violations_dict = { + src_path: DiffViolations( + self._violations.violations(src_path), + self._violations.measured_lines(src_path), + self._diff.lines_changed(src_path), + ) + for src_path in src_paths_changed + } return self._diff_violations_dict def report_dict(self): diff --git a/diff_cover/violationsreporters/base.py b/diff_cover/violationsreporters/base.py index f0d7000..3f1a6ca 100644 --- a/diff_cover/violationsreporters/base.py +++ b/diff_cover/violationsreporters/base.py @@ -34,6 +34,19 @@ class BaseViolationReporter(ABC): Return a list of Violations recorded in `src_path`. """ + def violations_batch(self, src_paths): + """ + Return a dict of Violations recorded in `src_paths`. + + src_paths: Sequence[str] - sequence of paths to source files + + Returns a Dict[str, List[Violation]]. Keys are paths to source files. + + If a subclass does not implement this function, violations() will be + called instead, once for each src_path in src_paths. + """ + raise NotImplementedError + def measured_lines(self, src_path): """ Return a list of the lines in src_path that were measured
Bachmann1234/diff_cover
72722598401aa2f4c0996c50841c560ad6492a40
diff --git a/tests/test_report_generator.py b/tests/test_report_generator.py index 49b92ed..97d81b9 100644 --- a/tests/test_report_generator.py +++ b/tests/test_report_generator.py @@ -1,5 +1,6 @@ # pylint: disable=attribute-defined-outside-init,not-callable +import copy import json from io import BytesIO from textwrap import dedent @@ -53,7 +54,9 @@ class BaseReportGeneratorTest: @pytest.fixture(autouse=True) def base_setup(self, mocker): # Create mocks of the dependencies - self.coverage = mocker.MagicMock(BaseViolationReporter) + self.coverage = mocker.MagicMock( + BaseViolationReporter, + ) self.diff = mocker.MagicMock(BaseDiffReporter) # Patch snippet loading to always return the same string @@ -81,6 +84,8 @@ class BaseReportGeneratorTest: self._violations_dict = dict() self.coverage.violations.side_effect = self._violations_dict.get + self.coverage.violations_batch.side_effect = NotImplementedError + self._measured_dict = dict() self.coverage.measured_lines.side_effect = self._measured_dict.get @@ -539,3 +544,26 @@ class TestMarkdownReportGenerator(BaseReportGeneratorTest): # Verify that we got the expected string expected = load_fixture("markdown_report_two_snippets.md").strip() self.assert_report(expected) + + +class TestSimpleReportGeneratorWithBatchViolationReporter(BaseReportGeneratorTest): + REPORT_GENERATOR_CLASS = SimpleReportGenerator + + @pytest.fixture(autouse=True) + def setup(self): + self.use_default_values() + # Have violations_batch() return the violations. + self.coverage.violations_batch.side_effect = None + self.coverage.violations_batch.return_value = copy.deepcopy( + self._violations_dict + ) + # Have violations() return an empty list to ensure violations_batch() + # is used. + for src in self.SRC_PATHS: + self.set_violations(src, []) + + def test_violation_lines(self): + # By construction, each file has the same coverage information + expected = [10, 11] + for src_path in self.SRC_PATHS: + assert self.report.violation_lines(src_path) == expected
Add support for report generator plugins that process modified files as a batch, rather than individually SQLFluff implements the `diff-quality` plugin protocol. SQLFluff users have requested that the `diff-quality` integration should take advantage of SQLFluff's multiprocessing capability (processing files in parallel across multiple processes). In order to do this, `BaseReportGenerator._diff_violations()` would need to be modified to call a different function if provided by the plugin, e.g. `violations_batch()` rather than `violations()`. If `violations_batch()` is not implemented, fall back to using `violations()`. I'm happy to provide a PR to implement this, but wanted to ask if this is a reasonable feature and if there are any concerns with the proposed implementation. πŸ™πŸ½
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_report_generator.py::TestSimpleReportGenerator::test_total_num_lines", "tests/test_report_generator.py::TestSimpleReportGenerator::test_total_num_missing", "tests/test_report_generator.py::TestSimpleReportGenerator::test_percent_covered", "tests/test_report_generator.py::TestSimpleReportGenerator::test_src_paths", "tests/test_report_generator.py::TestSimpleReportGenerator::test_diff_name", "tests/test_report_generator.py::TestSimpleReportGenerator::test_violation_lines", "tests/test_report_generator.py::TestSimpleReportGenerator::test_src_with_no_info", "tests/test_report_generator.py::TestSimpleReportGenerator::test_total_percent_covered", "tests/test_report_generator.py::TestSimpleReportGenerator::test_coverage_name", "tests/test_report_generator.py::TestSimpleReportGenerator::test_src_paths_not_measured", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_generate_report", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_hundred_percent", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_empty_report", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_multiple_snippets", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_one_snippet", "tests/test_report_generator.py::TestTemplateReportGenerator::test_empty_list", "tests/test_report_generator.py::TestTemplateReportGenerator::test_combine_adjacent_lines_no_adjacent", "tests/test_report_generator.py::TestTemplateReportGenerator::test_one_number", "tests/test_report_generator.py::TestTemplateReportGenerator::test_combine_adjacent_lines", "tests/test_report_generator.py::TestSimpleReportGeneratorWithBatchViolationReporter::test_violation_lines", "tests/test_report_generator.py::TestJsonReportGenerator::test_empty_report", "tests/test_report_generator.py::TestJsonReportGenerator::test_hundred_percent", "tests/test_report_generator.py::TestJsonReportGenerator::test_generate_report", "tests/test_report_generator.py::TestStringReportGenerator::test_generate_report", "tests/test_report_generator.py::TestStringReportGenerator::test_empty_report", "tests/test_report_generator.py::TestStringReportGenerator::test_hundred_percent", "tests/test_report_generator.py::TestHtmlReportGenerator::test_empty_report", "tests/test_report_generator.py::TestHtmlReportGenerator::test_one_snippet", "tests/test_report_generator.py::TestHtmlReportGenerator::test_generate_report", "tests/test_report_generator.py::TestHtmlReportGenerator::test_multiple_snippets" ]
[]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2022-11-26T22:01:06Z"
apache-2.0
Bachmann1234__diff_cover-325
diff --git a/diff_cover/report_generator.py b/diff_cover/report_generator.py index ad2e58a..0262838 100644 --- a/diff_cover/report_generator.py +++ b/diff_cover/report_generator.py @@ -105,6 +105,25 @@ class BaseReportGenerator(ABC): return None + def covered_lines(self, src_path): + """ + Returns a list of lines covered in measured lines (integers) + in `src_path` that were changed. + + If we have no coverage information for + `src_path`, returns an empty list. + """ + diff_violations = self._diff_violations().get(src_path) + + if diff_violations is None: + return [] + + return sorted( + set(diff_violations.measured_lines).difference( + set(self.violation_lines(src_path)) + ) + ) + def violation_lines(self, src_path): """ Return a list of lines in violation (integers) @@ -213,6 +232,8 @@ class BaseReportGenerator(ABC): Return a dict of statistics for the source file at `src_path`. """ + covered_lines = self.covered_lines(src_path) + # Find violation lines violation_lines = self.violation_lines(src_path) violations = sorted(self._diff_violations()[src_path].violations) @@ -220,6 +241,7 @@ class BaseReportGenerator(ABC): return { "percent_covered": self.percent_covered(src_path), "violation_lines": violation_lines, + "covered_lines": covered_lines, "violations": violations, }
Bachmann1234/diff_cover
7d5b22ea8aa5f129e8c3c970d736a4c52cbf8d02
diff --git a/tests/test_report_generator.py b/tests/test_report_generator.py index e7e7180..926d82b 100644 --- a/tests/test_report_generator.py +++ b/tests/test_report_generator.py @@ -280,11 +280,13 @@ class TestJsonReportGenerator(BaseReportGeneratorTest): "diff_name": "main", "src_stats": { "file1.py": { + "covered_lines": [2, 3, 4, 15], "percent_covered": 66.66666666666667, "violation_lines": [10, 11], "violations": [[10, None], [11, None]], }, "subdir/file2.py": { + "covered_lines": [2, 3, 4, 15], "percent_covered": 66.66666666666667, "violation_lines": [10, 11], "violations": [[10, None], [11, None]], @@ -312,6 +314,7 @@ class TestJsonReportGenerator(BaseReportGeneratorTest): "diff_name": "main", "src_stats": { "file.py": { + "covered_lines": [2], "percent_covered": 100.0, "violation_lines": [], "violations": [],
any ways to get all measured lines' numbers from json report # background I develop a tool to collect diff coverage based on diff_cover json output for further source code rendering in web. In this case, all measured lines(or all lines that should be covered by testing) and violation lines of each source file are important. Are there any options to output the measured(tracked) lines in json report? Or other possible outputs that can make it easy to compute it like covered lines(covered lines + violation lines = tracked lines). I use this command. `diff-cover diff-cover coverage.xml --json-report report.json` e.g. Currently, for given git diff like: ``` diff --git a/my_module/src/main/java/com/zsmallx/example/java_calculator/JCalculator.java b/my_module/src/main/java/com/zsmallx/example/java_calculator/JCalculator.java index 1633ede..52b785e 100644 --- a/my_module/src/main/java/com/zsmallx/example/java_calculator/JCalculator.java +++ b/my_module/src/main/java/com/zsmallx/example/java_calculator/JCalculator.java @@ -9,6 +9,10 @@ public class JCalculator { if (b == 0) { throw new ArithmeticException("Divide zero!"); } + // neg divide + if (b < 0) { + throw new ArithmeticException("Divide neg!"); + } return a / b; } } \ No newline at end of file ``` ```json { "report_name": "XML", "diff_name": "origin/master...HEAD, staged and unstaged changes", "src_stats": { "my_module/src/main/java/com/zsmallx/example/java_calculator/JCalculator.java": { "percent_covered": 50.0, "violation_lines": [ 14 ], "violations": [ [ 14, null ] ] } }, "total_num_lines": 2, "total_num_violations": 1, "total_percent_covered": 50, "num_changed_lines": 4 } ``` # expected For source file `my_module/src/main/java/com/zsmallx/example/java_calculator/JCalculator.java`, the output be like: ``` "violation_lines": [ 14 ], "measured_lines": [ 13, 14 ] ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_report_generator.py::TestJsonReportGenerator::test_hundred_percent", "tests/test_report_generator.py::TestJsonReportGenerator::test_generate_report" ]
[ "tests/test_report_generator.py::TestJsonReportGenerator::test_empty_report", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_empty_report", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_generate_report", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_hundred_percent", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_one_snippet", "tests/test_report_generator.py::TestMarkdownReportGenerator::test_multiple_snippets", "tests/test_report_generator.py::TestTemplateReportGenerator::test_one_number", "tests/test_report_generator.py::TestTemplateReportGenerator::test_empty_list", "tests/test_report_generator.py::TestTemplateReportGenerator::test_combine_adjacent_lines_no_adjacent", "tests/test_report_generator.py::TestTemplateReportGenerator::test_combine_adjacent_lines", "tests/test_report_generator.py::TestStringReportGenerator::test_generate_report", "tests/test_report_generator.py::TestStringReportGenerator::test_hundred_percent", "tests/test_report_generator.py::TestStringReportGenerator::test_empty_report", "tests/test_report_generator.py::TestHtmlReportGenerator::test_empty_report", "tests/test_report_generator.py::TestHtmlReportGenerator::test_generate_report", "tests/test_report_generator.py::TestHtmlReportGenerator::test_one_snippet", "tests/test_report_generator.py::TestHtmlReportGenerator::test_multiple_snippets", "tests/test_report_generator.py::TestSimpleReportGenerator::test_total_num_missing", "tests/test_report_generator.py::TestSimpleReportGenerator::test_src_paths_not_measured", "tests/test_report_generator.py::TestSimpleReportGenerator::test_src_with_no_info", "tests/test_report_generator.py::TestSimpleReportGenerator::test_violation_lines", "tests/test_report_generator.py::TestSimpleReportGenerator::test_percent_covered", "tests/test_report_generator.py::TestSimpleReportGenerator::test_total_percent_covered", "tests/test_report_generator.py::TestSimpleReportGenerator::test_src_paths", "tests/test_report_generator.py::TestSimpleReportGenerator::test_coverage_name", "tests/test_report_generator.py::TestSimpleReportGenerator::test_diff_name", "tests/test_report_generator.py::TestSimpleReportGenerator::test_total_num_lines", "tests/test_report_generator.py::TestSimpleReportGeneratorWithBatchViolationReporter::test_violation_lines" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2023-02-21T12:34:13Z"
apache-2.0
Backblaze__B2_Command_Line_Tool-173
diff --git a/b2/sync.py b/b2/sync.py index c3c4ad9..cffdc81 100644 --- a/b2/sync.py +++ b/b2/sync.py @@ -67,12 +67,15 @@ class SyncReport(object): self.closed = False self.lock = threading.Lock() self._update_progress() + self.warnings = [] def close(self): with self.lock: if not self.no_progress: self._print_line('', False) self.closed = True + for warning in self.warnings: + self._print_line(warning, True) def __enter__(self): return self @@ -185,6 +188,9 @@ class SyncReport(object): self.transfer_bytes += byte_delta self._update_progress() + def local_access_error(self, path): + self.warnings.append('WARNING: %s could not be accessed (broken symlink?)' % (path,)) + class SyncFileReporter(AbstractProgressListener): """ @@ -453,13 +459,17 @@ class AbstractFolder(object): """ @abstractmethod - def all_files(self): + def all_files(self, reporter): """ Returns an iterator over all of the files in the folder, in the order that B2 uses. No matter what the folder separator on the local file system is, "/" is used in the returned file names. + + If a file is found, but does not exist (for example due to + a broken symlink or a race), reporter will be informed about + each such problem. """ @abstractmethod @@ -494,9 +504,9 @@ class LocalFolder(AbstractFolder): def folder_type(self): return 'local' - def all_files(self): + def all_files(self, reporter): prefix_len = len(self.root) + 1 # include trailing '/' in prefix length - for relative_path in self._walk_relative_paths(prefix_len, self.root): + for relative_path in self._walk_relative_paths(prefix_len, self.root, reporter): yield self._make_file(relative_path) def make_full_path(self, file_name): @@ -514,7 +524,7 @@ class LocalFolder(AbstractFolder): elif not os.path.isdir(self.root): raise Exception('%s is not a directory' % (self.root,)) - def _walk_relative_paths(self, prefix_len, dir_path): + def _walk_relative_paths(self, prefix_len, dir_path, reporter): """ Yields all of the file names anywhere under this folder, in the order they would appear in B2. @@ -535,16 +545,21 @@ class LocalFolder(AbstractFolder): ) full_path = os.path.join(dir_path, name) relative_path = full_path[prefix_len:] - if os.path.isdir(full_path): - name += six.u('/') - dirs.add(name) - names[name] = (full_path, relative_path) + # Skip broken symlinks or other inaccessible files + if not os.path.exists(full_path): + if reporter is not None: + reporter.local_access_error(full_path) + else: + if os.path.isdir(full_path): + name += six.u('/') + dirs.add(name) + names[name] = (full_path, relative_path) # Yield all of the answers for name in sorted(names): (full_path, relative_path) = names[name] if name in dirs: - for rp in self._walk_relative_paths(prefix_len, full_path): + for rp in self._walk_relative_paths(prefix_len, full_path, reporter): yield rp else: yield relative_path @@ -573,7 +588,7 @@ class B2Folder(AbstractFolder): self.bucket = api.get_bucket_by_name(bucket_name) self.prefix = '' if self.folder_name == '' else self.folder_name + '/' - def all_files(self): + def all_files(self, reporter): current_name = None current_versions = [] for (file_version_info, folder_name) in self.bucket.ls( @@ -625,7 +640,7 @@ def next_or_none(iterator): return None -def zip_folders(folder_a, folder_b, exclusions=tuple()): +def zip_folders(folder_a, folder_b, reporter, exclusions=tuple()): """ An iterator over all of the files in the union of two folders, matching file names. @@ -637,8 +652,10 @@ def zip_folders(folder_a, folder_b, exclusions=tuple()): :param folder_b: A Folder object. """ - iter_a = (f for f in folder_a.all_files() if not any(ex.match(f.name) for ex in exclusions)) - iter_b = folder_b.all_files() + iter_a = ( + f for f in folder_a.all_files(reporter) if not any(ex.match(f.name) for ex in exclusions) + ) + iter_b = folder_b.all_files(reporter) current_a = next_or_none(iter_a) current_b = next_or_none(iter_b) @@ -810,7 +827,7 @@ def make_folder_sync_actions(source_folder, dest_folder, args, now_millis, repor ('b2', 'local'), ('local', 'b2') ]: raise NotImplementedError("Sync support only local-to-b2 and b2-to-local") - for (source_file, dest_file) in zip_folders(source_folder, dest_folder, exclusions): + for (source_file, dest_file) in zip_folders(source_folder, dest_folder, reporter, exclusions): if source_folder.folder_type() == 'local': if source_file is not None: reporter.update_compare(1) @@ -863,7 +880,9 @@ def count_files(local_folder, reporter): """ Counts all of the files in a local folder. """ - for _ in local_folder.all_files(): + # Don't pass in a reporter to all_files. Broken symlinks will be reported + # during the next pass when the source and dest files are compared. + for _ in local_folder.all_files(None): reporter.update_local(1) reporter.end_local()
Backblaze/B2_Command_Line_Tool
ab2b5b4e3dc2c8b52b28592c7414ebb4646034e2
diff --git a/test/test_sync.py b/test/test_sync.py index ad2b140..9102b6e 100644 --- a/test/test_sync.py +++ b/test/test_sync.py @@ -37,36 +37,58 @@ def write_file(path, contents): f.write(contents) -def create_files(root_dir, relative_paths): - for relative_path in relative_paths: - full_path = os.path.join(root_dir, relative_path) - write_file(full_path, b'') +class TestLocalFolder(unittest.TestCase): + NAMES = [ + six.u('.dot_file'), six.u('hello.'), six.u('hello/a/1'), six.u('hello/a/2'), + six.u('hello/b'), six.u('hello0'), six.u('\u81ea\u7531') + ] + def setUp(self): + self.reporter = MagicMock() + + @classmethod + def _create_files(cls, root_dir, relative_paths): + for relative_path in relative_paths: + full_path = os.path.join(root_dir, relative_path) + write_file(full_path, b'') + + def _prepare_folder(self, root_dir, broken_symlink=False): + self._create_files(root_dir, self.NAMES) + if broken_symlink: + os.symlink( + os.path.join(root_dir, 'non_existant_file'), os.path.join(root_dir, 'bad_symlink') + ) + return LocalFolder(root_dir) -class TestLocalFolder(unittest.TestCase): def test_slash_sorting(self): # '/' should sort between '.' and '0' - names = [ - six.u('.dot_file'), six.u('hello.'), six.u('hello/a/1'), six.u('hello/a/2'), - six.u('hello/b'), six.u('hello0'), six.u('\u81ea\u7531') - ] with TempDir() as tmpdir: - create_files(tmpdir, names) - folder = LocalFolder(tmpdir) - actual_names = list(f.name for f in folder.all_files()) - self.assertEqual(names, actual_names) + folder = self._prepare_folder(tmpdir) + actual_names = list(f.name for f in folder.all_files(self.reporter)) + self.assertEqual(self.NAMES, actual_names) + self.reporter.local_access_error.assert_not_called() + + def test_broken_symlink(self): + with TempDir() as tmpdir: + folder = self._prepare_folder(tmpdir, broken_symlink=True) + for f in folder.all_files(self.reporter): + pass # just generate all the files + self.reporter.local_access_error.assert_called_once_with( + os.path.join(tmpdir, 'bad_symlink') + ) class TestB2Folder(unittest.TestCase): def setUp(self): self.bucket = MagicMock() self.api = MagicMock() + self.reporter = MagicMock() self.api.get_bucket_by_name.return_value = self.bucket self.b2_folder = B2Folder('bucket-name', 'folder', self.api) def test_empty(self): self.bucket.ls.return_value = [] - self.assertEqual([], list(self.b2_folder.all_files())) + self.assertEqual([], list(self.b2_folder.all_files(self.reporter))) def test_multiple_versions(self): # Test two files, to cover the yield within the loop, and @@ -102,7 +124,7 @@ class TestB2Folder(unittest.TestCase): [ "File(a.txt, [FileVersion('a2', 'folder/a.txt', 2000, 'upload'), FileVersion('a1', 'folder/a.txt', 1000, 'upload')])", "File(b.txt, [FileVersion('b2', 'folder/b.txt', 2000, 'upload'), FileVersion('b1', 'folder/b.txt', 1000, 'upload')])", - ], [str(f) for f in self.b2_folder.all_files()] + ], [str(f) for f in self.b2_folder.all_files(self.reporter)] ) @@ -111,7 +133,7 @@ class FakeFolder(AbstractFolder): self.f_type = f_type self.files = files - def all_files(self): + def all_files(self, reporter): return iter(self.files) def folder_type(self): @@ -150,16 +172,19 @@ class TestParseSyncFolder(unittest.TestCase): class TestZipFolders(unittest.TestCase): + def setUp(self): + self.reporter = MagicMock() + def test_empty(self): folder_a = FakeFolder('b2', []) folder_b = FakeFolder('b2', []) - self.assertEqual([], list(zip_folders(folder_a, folder_b))) + self.assertEqual([], list(zip_folders(folder_a, folder_b, self.reporter))) def test_one_empty(self): file_a1 = File("a.txt", [FileVersion("a", "a", 100, "upload", 10)]) folder_a = FakeFolder('b2', [file_a1]) folder_b = FakeFolder('b2', []) - self.assertEqual([(file_a1, None)], list(zip_folders(folder_a, folder_b))) + self.assertEqual([(file_a1, None)], list(zip_folders(folder_a, folder_b, self.reporter))) def test_two(self): file_a1 = File("a.txt", [FileVersion("a", "a", 100, "upload", 10)]) @@ -174,9 +199,22 @@ class TestZipFolders(unittest.TestCase): [ (file_a1, None), (file_a2, file_b1), (file_a3, None), (None, file_b2), (file_a4, None) - ], list(zip_folders(folder_a, folder_b)) + ], list(zip_folders(folder_a, folder_b, self.reporter)) ) + def test_pass_reporter_to_folder(self): + """ + Check that the zip_folders() function passes the reporter through + to both folders. + """ + folder_a = MagicMock() + folder_b = MagicMock() + folder_a.all_files = MagicMock(return_value=iter([])) + folder_b.all_files = MagicMock(return_value=iter([])) + self.assertEqual([], list(zip_folders(folder_a, folder_b, self.reporter))) + folder_a.all_files.assert_called_once_with(self.reporter) + folder_b.all_files.assert_called_once_with(self.reporter) + class FakeArgs(object): """ diff --git a/test_b2_command_line.py b/test_b2_command_line.py index 8d23678..0628248 100644 --- a/test_b2_command_line.py +++ b/test_b2_command_line.py @@ -200,6 +200,8 @@ class CommandLine(object): sys.exit(1) if expected_pattern is not None: if re.search(expected_pattern, stdout) is None: + print('STDOUT:') + print(stdout) error_and_exit('did not match pattern: ' + expected_pattern) return stdout @@ -469,8 +471,12 @@ def _sync_test_using_dir(b2_tool, bucket_name, dir_): write_file(p('a'), b'hello') write_file(p('b'), b'hello') write_file(p('c'), b'hello') + os.symlink('broken', p('d')) - b2_tool.should_succeed(['sync', '--noProgress', dir_path, b2_sync_point]) + b2_tool.should_succeed( + ['sync', '--noProgress', dir_path, b2_sync_point], + expected_pattern="/d could not be accessed" + ) file_versions = b2_tool.list_file_versions(bucket_name) should_equal( [
Broken symlink break sync I had this issue where one of my sysmlinks was broken and b2 tool broke, this is the stack trace: ``` Traceback (most recent call last): File "/usr/local/bin/b2", line 9, in <module> load_entry_point('b2==0.5.4', 'console_scripts', 'b2')() File "/usr/local/lib/python2.7/dist-packages/b2/console_tool.py", line 861, in main exit_status = ct.run_command(decoded_argv) File "/usr/local/lib/python2.7/dist-packages/b2/console_tool.py", line 789, in run_command return command.run(args) File "/usr/local/lib/python2.7/dist-packages/b2/console_tool.py", line 609, in run max_workers=max_workers File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 877, in sync_folders source_folder, dest_folder, args, now_millis, reporter File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 777, in make_folder_sync_actions for (source_file, dest_file) in zip_folders(source_folder, dest_folder): File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 646, in zip_folders current_a = next_or_none(iter_a) File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 620, in next_or_none return six.advance_iterator(iterator) File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 499, in all_files yield self._make_file(relative_path) File "/usr/local/lib/python2.7/dist-packages/b2/sync.py", line 553, in _make_file mod_time = int(round(os.path.getmtime(full_path) * 1000)) File "/usr/lib/python2.7/genericpath.py", line 54, in getmtime return os.stat(filename).st_mtime OSError: [Errno 2] No such file or directory: '/media/2a9074d0-4788-45ab-bfae-fc46427c69fa/PersonalData/some-broken-symlink' ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_sync.py::TestLocalFolder::test_broken_symlink", "test/test_sync.py::TestLocalFolder::test_slash_sorting", "test/test_sync.py::TestB2Folder::test_empty", "test/test_sync.py::TestB2Folder::test_multiple_versions", "test/test_sync.py::TestZipFolders::test_empty", "test/test_sync.py::TestZipFolders::test_one_empty", "test/test_sync.py::TestZipFolders::test_pass_reporter_to_folder", "test/test_sync.py::TestZipFolders::test_two", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_delete", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_newer", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_older", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_equal", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal_delete", "test/test_sync.py::TestMakeSyncActions::test_delete_b2", "test/test_sync.py::TestMakeSyncActions::test_delete_b2_multiple_versions", "test/test_sync.py::TestMakeSyncActions::test_delete_hide_b2_multiple_versions", "test/test_sync.py::TestMakeSyncActions::test_delete_local", "test/test_sync.py::TestMakeSyncActions::test_empty_b2", "test/test_sync.py::TestMakeSyncActions::test_empty_local", "test/test_sync.py::TestMakeSyncActions::test_file_exclusions", "test/test_sync.py::TestMakeSyncActions::test_file_exclusions_with_delete", "test/test_sync.py::TestMakeSyncActions::test_keep_days_no_change_with_old_file", "test/test_sync.py::TestMakeSyncActions::test_newer_b2", "test/test_sync.py::TestMakeSyncActions::test_newer_b2_clean_old_versions", "test/test_sync.py::TestMakeSyncActions::test_newer_b2_delete_old_versions", "test/test_sync.py::TestMakeSyncActions::test_newer_local", "test/test_sync.py::TestMakeSyncActions::test_no_delete_b2", "test/test_sync.py::TestMakeSyncActions::test_no_delete_local", "test/test_sync.py::TestMakeSyncActions::test_not_there_b2", "test/test_sync.py::TestMakeSyncActions::test_not_there_local", "test/test_sync.py::TestMakeSyncActions::test_older_b2", "test/test_sync.py::TestMakeSyncActions::test_older_b2_replace", "test/test_sync.py::TestMakeSyncActions::test_older_b2_replace_delete", "test/test_sync.py::TestMakeSyncActions::test_older_b2_skip", "test/test_sync.py::TestMakeSyncActions::test_older_local", "test/test_sync.py::TestMakeSyncActions::test_older_local_replace", "test/test_sync.py::TestMakeSyncActions::test_older_local_skip", "test/test_sync.py::TestMakeSyncActions::test_same_b2", "test/test_sync.py::TestMakeSyncActions::test_same_clean_old_versions", "test/test_sync.py::TestMakeSyncActions::test_same_delete_old_versions", "test/test_sync.py::TestMakeSyncActions::test_same_leave_old_versions", "test/test_sync.py::TestMakeSyncActions::test_same_local" ]
[ "test/test_sync.py::TestParseSyncFolder::test_b2_double_slash", "test/test_sync.py::TestParseSyncFolder::test_b2_no_double_slash", "test/test_sync.py::TestParseSyncFolder::test_b2_no_folder", "test/test_sync.py::TestParseSyncFolder::test_b2_trailing_slash", "test/test_sync.py::TestParseSyncFolder::test_local", "test/test_sync.py::TestParseSyncFolder::test_local_trailing_slash", "test/test_sync.py::TestMakeSyncActions::test_illegal_b2_to_b2", "test/test_sync.py::TestMakeSyncActions::test_illegal_delete_and_keep_days", "test/test_sync.py::TestMakeSyncActions::test_illegal_local_to_local", "test/test_sync.py::TestMakeSyncActions::test_illegal_skip_and_replace", "test_b2_command_line.py::TestCommandLine::test_stderr_patterns" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-06-14T20:22:17Z"
mit
Backblaze__B2_Command_Line_Tool-180
diff --git a/b2/account_info.py b/b2/account_info.py index 9a639c5..5eafba6 100644 --- a/b2/account_info.py +++ b/b2/account_info.py @@ -141,6 +141,42 @@ class AbstractAccountInfo(object): pass +class UploadUrlPool(object): + """ + For each key (either a bucket id or large file id), holds a pool + of (url, auth_token) pairs, with thread-safe methods to add and + remove them. + """ + + def __init__(self): + self._lock = threading.Lock() + self._pool = collections.defaultdict(list) + + def put(self, key, url, auth_token): + """ + Adds the url and auth token to the pool for the given key. + """ + with self._lock: + pair = (url, auth_token) + self._pool[key].append(pair) + + def take(self, key): + """ + Returns (url, auth_token) if one is available, or (None, None) if not. + """ + with self._lock: + pair_list = self._pool[key] + if pair_list: + return pair_list.pop() + else: + return (None, None) + + def clear_for_key(self, key): + with self._lock: + if key in self._pool: + del self._pool[key] + + class SqliteAccountInfo(AbstractAccountInfo): """ Stores account information in an sqlite database, which is @@ -157,9 +193,8 @@ class SqliteAccountInfo(AbstractAccountInfo): with self._get_connection() as conn: self._create_tables(conn) - self._large_file_uploads = collections.defaultdict( - list - ) # We don't keep large file upload URLs across a reload + self._bucket_uploads = UploadUrlPool() + self._large_file_uploads = UploadUrlPool() # this lock controls access to self._large_file_uploads self._lock = threading.Lock() @@ -267,6 +302,9 @@ class SqliteAccountInfo(AbstractAccountInfo): ); """ ) + # This table is not used any more. We may use it again + # someday if we save upload URLs across invocations of + # the command-line tool. conn.execute( """ CREATE TABLE IF NOT EXISTS @@ -367,48 +405,22 @@ class SqliteAccountInfo(AbstractAccountInfo): return None def put_bucket_upload_url(self, bucket_id, upload_url, upload_auth_token): - with self._get_connection() as conn: - conn.execute( - 'INSERT INTO bucket_upload_url (bucket_id, upload_url, upload_auth_token) values (?, ?, ?);', - (bucket_id, upload_url, upload_auth_token) - ) + self._bucket_uploads.put(bucket_id, upload_url, upload_auth_token) def clear_bucket_upload_data(self, bucket_id): - with self._get_connection() as conn: - conn.execute('DELETE FROM bucket_upload_url WHERE bucket_id = ?;', (bucket_id,)) + self._bucket_uploads.clear_for_key(bucket_id) def take_bucket_upload_url(self, bucket_id): - try: - with self._get_connection() as conn: - cursor = conn.execute( - 'SELECT upload_url, upload_auth_token FROM bucket_upload_url WHERE bucket_id = ?;', - (bucket_id,) - ) - (upload_url, upload_auth_token) = cursor.fetchone() - conn.execute( - 'DELETE FROM bucket_upload_url WHERE upload_auth_token = ?;', - (upload_auth_token,) - ) - return (upload_url, upload_auth_token) - except: - return (None, None) + return self._bucket_uploads.take(bucket_id) def put_large_file_upload_url(self, file_id, upload_url, upload_auth_token): - with self._lock: - self._large_file_uploads[file_id].append((upload_url, upload_auth_token)) + self._large_file_uploads.put(file_id, upload_url, upload_auth_token) def take_large_file_upload_url(self, file_id): - with self._lock: - url_list = self._large_file_uploads.get(file_id, []) - if len(url_list) == 0: - return (None, None) - else: - return url_list.pop() + return self._large_file_uploads.take(file_id) def clear_large_file_upload_urls(self, file_id): - with self._lock: - if file_id in self._large_file_uploads: - del self._large_file_uploads[file_id] + self._large_file_uploads.clear_for_key(file_id) class StubAccountInfo(AbstractAccountInfo):
Backblaze/B2_Command_Line_Tool
01c4e89f63f38b9efa6a6fa63f54cd556a0b5305
diff --git a/test/test_account_info.py b/test/test_account_info.py index 2a52183..68a6f22 100644 --- a/test/test_account_info.py +++ b/test/test_account_info.py @@ -14,7 +14,7 @@ import unittest import six -from b2.account_info import SqliteAccountInfo +from b2.account_info import SqliteAccountInfo, UploadUrlPool from b2.exception import CorruptAccountInfo, MissingAccountData try: @@ -23,6 +23,32 @@ except: import mock +class TestUploadUrlPool(unittest.TestCase): + def setUp(self): + self.pool = UploadUrlPool() + + def test_take_empty(self): + self.assertEqual((None, None), self.pool.take('a')) + + def test_put_and_take(self): + self.pool.put('a', 'url_a1', 'auth_token_a1') + self.pool.put('a', 'url_a2', 'auth_token_a2') + self.pool.put('b', 'url_b1', 'auth_token_b1') + self.assertEqual(('url_a2', 'auth_token_a2'), self.pool.take('a')) + self.assertEqual(('url_a1', 'auth_token_a1'), self.pool.take('a')) + self.assertEqual((None, None), self.pool.take('a')) + self.assertEqual(('url_b1', 'auth_token_b1'), self.pool.take('b')) + self.assertEqual((None, None), self.pool.take('b')) + + def test_clear(self): + self.pool.put('a', 'url_a1', 'auth_token_a1') + self.pool.clear_for_key('a') + self.pool.put('b', 'url_b1', 'auth_token_b1') + self.assertEqual((None, None), self.pool.take('a')) + self.assertEqual(('url_b1', 'auth_token_b1'), self.pool.take('b')) + self.assertEqual((None, None), self.pool.take('b')) + + class TestSqliteAccountInfo(unittest.TestCase): FILE_NAME = '/tmp/test_b2_account_info' @@ -99,20 +125,6 @@ class TestSqliteAccountInfo(unittest.TestCase): except MissingAccountData: pass - def test_bucket_upload_data(self): - account_info = self._make_info() - account_info.put_bucket_upload_url('bucket-0', 'http://bucket-0', 'bucket-0_auth') - self.assertEqual( - ('http://bucket-0', 'bucket-0_auth'), account_info.take_bucket_upload_url('bucket-0') - ) - self.assertEqual((None, None), self._make_info().take_bucket_upload_url('bucket-0')) - account_info.put_bucket_upload_url('bucket-0', 'http://bucket-0', 'bucket-0_auth') - self.assertEqual( - ('http://bucket-0', 'bucket-0_auth'), - self._make_info().take_bucket_upload_url('bucket-0') - ) - self.assertEqual((None, None), account_info.take_bucket_upload_url('bucket-0')) - def test_clear_bucket_upload_data(self): account_info = self._make_info() account_info.put_bucket_upload_url('bucket-0', 'http://bucket-0', 'bucket-0_auth')
Using a lot of threads with sync cause sqlite errors I'm attempting to use the sync command to upload 1 million files using 200 threads. In the log, I'm seeing database errors that prevents an individual file from being uploaded: `b2_upload(/Volumes/Library/Data/1m-files/file100352.txt, file100352.txt, 1463692063000): OperationalError('database is locked',) database is locked` When that thread fails - the file is left out of the sync. It never is re-tried. In addition, infrequently B2 CLI crashes out completely because auth credentials couldn't be retrieved: `ERROR: Missing account data: database is locked Use: b2 authorize_account`
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_account_info.py::TestSqliteAccountInfo::test_corrupted", "test/test_account_info.py::TestSqliteAccountInfo::test_clear_large_file_upload_urls", "test/test_account_info.py::TestSqliteAccountInfo::test_convert_from_json", "test/test_account_info.py::TestSqliteAccountInfo::test_refresh_bucket", "test/test_account_info.py::TestSqliteAccountInfo::test_clear_bucket_upload_data", "test/test_account_info.py::TestSqliteAccountInfo::test_bucket", "test/test_account_info.py::TestSqliteAccountInfo::test_large_file_upload_urls", "test/test_account_info.py::TestSqliteAccountInfo::test_account_info", "test/test_account_info.py::TestSqliteAccountInfo::test_clear", "test/test_account_info.py::TestUploadUrlPool::test_clear", "test/test_account_info.py::TestUploadUrlPool::test_take_empty", "test/test_account_info.py::TestUploadUrlPool::test_put_and_take" ]
[]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-07-01T16:30:50Z"
mit
Backblaze__B2_Command_Line_Tool-302
diff --git a/b2/console_tool.py b/b2/console_tool.py index 0e7c7c7..bf15d7b 100644 --- a/b2/console_tool.py +++ b/b2/console_tool.py @@ -35,7 +35,7 @@ from .exception import (B2Error, BadFileInfo) from .file_version import (FileVersionInfo) from .parse_args import parse_arg_list from .progress import (make_progress_listener) -from .raw_api import (test_raw_api) +from .raw_api import (SRC_LAST_MODIFIED_MILLIS, test_raw_api) from .sync import parse_sync_folder, sync_folders from .utils import (current_time_millis, set_shutting_down) from .version import (VERSION) @@ -859,6 +859,10 @@ class UploadFile(Command): raise BadFileInfo(info) file_infos[parts[0]] = parts[1] + if SRC_LAST_MODIFIED_MILLIS not in file_infos: + file_infos[SRC_LAST_MODIFIED_MILLIS + ] = str(int(os.path.getmtime(args.localFilePath) * 1000)) + max_workers = args.threads or 10 self.api.set_thread_pool_size(max_workers) diff --git a/b2/raw_api.py b/b2/raw_api.py index ef34e11..bf6230f 100644 --- a/b2/raw_api.py +++ b/b2/raw_api.py @@ -26,6 +26,9 @@ from .download_dest import DownloadDestBytes from .exception import ChecksumMismatch, TruncatedOutput, UnexpectedCloudBehaviour from .utils import b2_url_encode, hex_sha1_of_stream +# Standard names for file info entries +SRC_LAST_MODIFIED_MILLIS = 'src_last_modified_millis' + @six.add_metaclass(ABCMeta) class AbstractRawApi(object): @@ -236,8 +239,8 @@ class B2RawApi(AbstractRawApi): raise UnexpectedCloudBehaviour('Content-Range header was expected') file_info = dict((k[10:], info[k]) for k in info if k.startswith('x-bz-info-')) - if 'src_last_modified_millis' in file_info: - mod_time_millis = int(file_info['src_last_modified_millis']) + if SRC_LAST_MODIFIED_MILLIS in file_info: + mod_time_millis = int(file_info[SRC_LAST_MODIFIED_MILLIS]) else: mod_time_millis = int(info['x-bz-upload-timestamp']) diff --git a/b2/sync/action.py b/b2/sync/action.py index c79023e..b9e6acc 100644 --- a/b2/sync/action.py +++ b/b2/sync/action.py @@ -17,6 +17,7 @@ import six from ..download_dest import DownloadDestLocalFile from ..upload_source import UploadSourceLocalFile from ..utils import raise_if_shutting_down +from ..raw_api import SRC_LAST_MODIFIED_MILLIS from .report import SyncFileReporter logger = logging.getLogger(__name__) @@ -79,7 +80,7 @@ class B2UploadAction(AbstractAction): bucket.upload( UploadSourceLocalFile(self.local_full_path), self.b2_file_name, - file_info={'src_last_modified_millis': str(self.mod_time_millis)}, + file_info={SRC_LAST_MODIFIED_MILLIS: str(self.mod_time_millis)}, progress_listener=SyncFileReporter(reporter) ) diff --git a/b2/sync/folder.py b/b2/sync/folder.py index 7309b86..137705a 100644 --- a/b2/sync/folder.py +++ b/b2/sync/folder.py @@ -16,6 +16,7 @@ import six from .exception import EnvironmentEncodingError from .file import File, FileVersion +from ..raw_api import SRC_LAST_MODIFIED_MILLIS @six.add_metaclass(ABCMeta) @@ -198,8 +199,8 @@ class B2Folder(AbstractFolder): yield File(current_name, current_versions) current_versions = [] file_info = file_version_info.file_info - if 'src_last_modified_millis' in file_info: - mod_time_millis = int(file_info['src_last_modified_millis']) + if SRC_LAST_MODIFIED_MILLIS in file_info: + mod_time_millis = int(file_info[SRC_LAST_MODIFIED_MILLIS]) else: mod_time_millis = file_version_info.upload_timestamp assert file_version_info.size is not None
Backblaze/B2_Command_Line_Tool
0fe4f2d0faad6e4e86d668b54958d93bc116b85c
diff --git a/test/test_console_tool.py b/test/test_console_tool.py index 8ac7ee6..66e0d75 100644 --- a/test/test_console_tool.py +++ b/test/test_console_tool.py @@ -210,6 +210,27 @@ class TestConsoleTool(TestBase): expected_stdout, '', 0 ) + # Get file info + mod_time_str = str(int(os.path.getmtime(local_file1) * 1000)) + expected_stdout = ''' + { + "accountId": "my-account", + "action": "upload", + "bucketId": "bucket_0", + "contentLength": 11, + "contentSha1": "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed", + "contentType": "b2/x-auto", + "fileId": "9999", + "fileInfo": { + "src_last_modified_millis": "%s" + }, + "fileName": "file1.txt", + "uploadTimestamp": 5000 + } + ''' % (mod_time_str,) + + self._run_command(['get_file_info', '9999'], expected_stdout, '', 0) + # Download by name local_download1 = os.path.join(temp_dir, 'download1.txt') expected_stdout = ''' @@ -218,8 +239,9 @@ class TestConsoleTool(TestBase): File size: 11 Content type: b2/x-auto Content sha1: 2aae6c35c94fcfb415dbe95f408b9ce91ee846ed + INFO src_last_modified_millis: %s checksum matches - ''' + ''' % (mod_time_str,) self._run_command( [ @@ -269,7 +291,9 @@ class TestConsoleTool(TestBase): "contentSha1": "2aae6c35c94fcfb415dbe95f408b9ce91ee846ed", "contentType": "b2/x-auto", "fileId": "9999", - "fileInfo": {}, + "fileInfo": { + "src_last_modified_millis": "%s" + }, "fileName": "file1.txt", "size": 11, "uploadTimestamp": 5000 @@ -278,7 +302,7 @@ class TestConsoleTool(TestBase): "nextFileId": null, "nextFileName": null } - ''' + ''' % (mod_time_str,) self._run_command(['list_file_versions', 'my-bucket'], expected_stdout, '', 0) diff --git a/test_b2_command_line.py b/test_b2_command_line.py index 8de27c4..2435226 100644 --- a/test_b2_command_line.py +++ b/test_b2_command_line.py @@ -324,6 +324,7 @@ def tearDown_envvar_test(envvar_name): def basic_test(b2_tool, bucket_name): file_to_upload = 'README.md' + file_mod_time_str = str(file_mod_time_millis(file_to_upload)) hex_sha1 = hashlib.sha1(read_file(file_to_upload)).hexdigest() @@ -398,7 +399,12 @@ def basic_test(b2_tool, bucket_name): b2_tool.should_succeed(['ls', bucket_name, 'b/'], r'^b/1\nb/2\n') file_info = b2_tool.should_succeed_json(['get_file_info', second_c_version['fileId']]) - should_equal({'color': 'blue', 'foo': 'bar=baz'}, file_info['fileInfo']) + expected_info = { + 'color': 'blue', + 'foo': 'bar=baz', + 'src_last_modified_millis': file_mod_time_str + } + should_equal(expected_info, file_info['fileInfo']) b2_tool.should_succeed(['delete_file_version', 'c', first_c_version['fileId']]) b2_tool.should_succeed(['ls', bucket_name], r'^a\nb/\nc\nd\n')
Set `src_last_modified_millis` in `b2 upload_file` When you use `sync` to upload files, it always sets `src_last_modified_millis`. But uploading single files doesn't, which can cause later syncs to get confused. I propose that `src_last_modified_millis` be set for every uploaded file. If the user doesn't specify a value on the command line, we can take it from the file. It would be good to check and make sure that the local clock isn't too different from the server's clock before uploading anything.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_console_tool.py::TestConsoleTool::test_files" ]
[ "test/test_console_tool.py::TestConsoleTool::test_authorize_with_bad_key", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key", "test/test_console_tool.py::TestConsoleTool::test_bad_terminal", "test/test_console_tool.py::TestConsoleTool::test_bucket_info_from_json", "test/test_console_tool.py::TestConsoleTool::test_buckets", "test/test_console_tool.py::TestConsoleTool::test_cancel_all_large_file", "test/test_console_tool.py::TestConsoleTool::test_cancel_large_file", "test/test_console_tool.py::TestConsoleTool::test_clear_account", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_defaults", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_explicit", "test/test_console_tool.py::TestConsoleTool::test_help_with_bad_args", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_parts", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_some", "test/test_console_tool.py::TestConsoleTool::test_sync", "test/test_console_tool.py::TestConsoleTool::test_sync_dry_run", "test/test_console_tool.py::TestConsoleTool::test_sync_syntax_error", "test/test_console_tool.py::TestConsoleTool::test_upload_large_file", "test_b2_command_line.py::TestCommandLine::test_stderr_patterns" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-12-16T19:31:54Z"
mit
Backblaze__B2_Command_Line_Tool-304
diff --git a/README.md b/README.md index 2a38ff9..9ffca80 100644 --- a/README.md +++ b/README.md @@ -27,32 +27,32 @@ this: # Usage - b2 authorize_account [<accountId>] [<applicationKey>] - b2 cancel_all_unfinished_large_files <bucketName> - b2 cancel_large_file <fileId> - b2 clear_account - b2 create_bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] - b2 delete_bucket <bucketName> - b2 delete_file_version [<fileName>] <fileId> - b2 download_file_by_id [--noProgress] <fileId> <localFileName> - b2 download_file_by_name [--noProgress] <bucketName> <fileName> <localFileName> - b2 get_download_auth [--prefix <fileNamePrefix>] [--duration <durationInSeconds>] <bucketName> - b2 get_file_info <fileId> + b2 authorize-account [<accountId>] [<applicationKey>] + b2 cancel-all-unfinished-large-files <bucketName> + b2 cancel-large-file <fileId> + b2 clear-account + b2 create-bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] + b2 delete-bucket <bucketName> + b2 delete-file-version [<fileName>] <fileId> + b2 download-file-by-id [--noProgress] <fileId> <localFileName> + b2 download-file-by-name [--noProgress] <bucketName> <fileName> <localFileName> + b2 get-download-auth [--prefix <fileNamePrefix>] [--duration <durationInSeconds>] <bucketName> + b2 get-file-info <fileId> b2 help [commandName] - b2 hide_file <bucketName> <fileName> - b2 list_buckets - b2 list_file_names <bucketName> [<startFileName>] [<maxToShow>] - b2 list_file_versions <bucketName> [<startFileName>] [<startFileId>] [<maxToShow>] - b2 list_parts <largeFileId> - b2 list_unfinished_large_files <bucketName> + b2 hide-file <bucketName> <fileName> + b2 list-buckets + b2 list-file-names <bucketName> [<startFileName>] [<maxToShow>] + b2 list-file-versions <bucketName> [<startFileName>] [<startFileId>] [<maxToShow>] + b2 list-parts <largeFileId> + b2 list-unfinished-large-files <bucketName> b2 ls [--long] [--versions] <bucketName> [<folderName>] - b2 make_url <fileId> + b2 make-url <fileId> b2 sync [--delete] [--keepDays N] [--skipNewer] [--replaceNewer] \ [--compareVersions <option>] [--threads N] [--noProgress] \ [--excludeRegex <regex> [--includeRegex <regex>]] [--dryRun] \ <source> <destination> - b2 update_bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] - b2 upload_file [--sha1 <sha1sum>] [--contentType <contentType>] \ + b2 update-bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] + b2 upload-file [--sha1 <sha1sum>] [--contentType <contentType>] \ [--info <key>=<value>]* [--minPartSize N] \ [--noProgress] [--threads N] <bucketName> <localFilePath> <b2FileName> b2 version diff --git a/b2/console_tool.py b/b2/console_tool.py index bf15d7b..a657532 100644 --- a/b2/console_tool.py +++ b/b2/console_tool.py @@ -60,8 +60,8 @@ def keyboard_interrupt_handler(signum, frame): raise KeyboardInterrupt() -def mixed_case_to_underscores(s): - return s[0].lower() + ''.join(c if c.islower() else '_' + c.lower() for c in s[1:]) +def mixed_case_to_hyphens(s): + return s[0].lower() + ''.join(c if c.islower() else '-' + c.lower() for c in s[1:]) class Command(object): @@ -177,7 +177,7 @@ class Command(object): class AuthorizeAccount(Command): """ - b2 authorize_account [<accountId>] [<applicationKey>] + b2 authorize-account [<accountId>] [<applicationKey>] Prompts for Backblaze accountID and applicationKey (unless they are given on the command line). @@ -226,7 +226,7 @@ class AuthorizeAccount(Command): class CancelAllUnfinishedLargeFiles(Command): """ - b2 cancel_all_unfinished_large_files <bucketName> + b2 cancel-all-unfinished-large-files <bucketName> Lists all large files that have been started but not finsished and cancels them. Any parts that have been @@ -245,7 +245,7 @@ class CancelAllUnfinishedLargeFiles(Command): class CancelLargeFile(Command): """ - b2 cancel_large_file <fileId> + b2 cancel-large-file <fileId> """ REQUIRED = ['fileId'] @@ -258,7 +258,7 @@ class CancelLargeFile(Command): class ClearAccount(Command): """ - b2 clear_account + b2 clear-account Erases everything in ~/.b2_account_info """ @@ -270,7 +270,7 @@ class ClearAccount(Command): class CreateBucket(Command): """ - b2 create_bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] + b2 create-bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] Creates a new bucket. Prints the ID of the bucket created. @@ -297,7 +297,7 @@ class CreateBucket(Command): class DeleteBucket(Command): """ - b2 delete_bucket <bucketName> + b2 delete-bucket <bucketName> Deletes the bucket with the given name. """ @@ -313,7 +313,7 @@ class DeleteBucket(Command): class DeleteFileVersion(Command): """ - b2 delete_file_version [<fileName>] <fileId> + b2 delete-file-version [<fileName>] <fileId> Permanently and irrevocably deletes one version of a file. @@ -342,7 +342,7 @@ class DeleteFileVersion(Command): class DownloadFileById(Command): """ - b2 download_file_by_id [--noProgress] <fileId> <localFileName> + b2 download-file-by-id [--noProgress] <fileId> <localFileName> Downloads the given file, and stores it in the given local file. @@ -364,7 +364,7 @@ class DownloadFileById(Command): class DownloadFileByName(Command): """ - b2 download_file_by_name [--noProgress] <bucketName> <fileName> <localFileName> + b2 download-file-by-name [--noProgress] <bucketName> <fileName> <localFileName> Downloads the given file, and stores it in the given local file. """ @@ -383,7 +383,7 @@ class DownloadFileByName(Command): class GetFileInfo(Command): """ - b2 get_file_info <fileId> + b2 get-file-info <fileId> Prints all of the information about the file, but not its contents. """ @@ -398,7 +398,7 @@ class GetFileInfo(Command): class GetDownloadAuth(Command): """ - b2 get_download_auth [--prefix <fileNamePrefix>] [--duration <durationInSeconds>] <bucketName> + b2 get-download-auth [--prefix <fileNamePrefix>] [--duration <durationInSeconds>] <bucketName> Prints an authorization token that is valid only for downloading files from the given bucket. @@ -450,7 +450,7 @@ class Help(Command): class HideFile(Command): """ - b2 hide_file <bucketName> <fileName> + b2 hide-file <bucketName> <fileName> Uploads a new, hidden, version of the given file. """ @@ -467,7 +467,7 @@ class HideFile(Command): class ListBuckets(Command): """ - b2 list_buckets + b2 list-buckets Lists all of the buckets in the current account. @@ -485,7 +485,7 @@ class ListBuckets(Command): class ListFileVersions(Command): """ - b2 list_file_versions <bucketName> [<startFileName>] [<startFileId>] [<maxToShow>] + b2 list-file-versions <bucketName> [<startFileName>] [<startFileId>] [<maxToShow>] Lists the names of the files in a bucket, starting at the given point. This is a low-level operation that reports the @@ -508,7 +508,7 @@ class ListFileVersions(Command): class ListFileNames(Command): """ - b2 list_file_names <bucketName> [<startFileName>] [<maxToShow>] + b2 list-file-names <bucketName> [<startFileName>] [<maxToShow>] Lists the names of the files in a bucket, starting at the given point. @@ -529,7 +529,7 @@ class ListFileNames(Command): class ListParts(Command): """ - b2 list_parts <largeFileId> + b2 list-parts <largeFileId> Lists all of the parts that have been uploaded for the given large file, which must be a file that was started but not @@ -546,7 +546,7 @@ class ListParts(Command): class ListUnfinishedLargeFiles(Command): """ - b2 list_unfinished_large_files <bucketName> + b2 list-unfinished-large-files <bucketName> Lists all of the large files in the bucket that were started, but not finished or canceled. @@ -616,7 +616,7 @@ class Ls(Command): class MakeUrl(Command): """ - b2 make_url <fileId> + b2 make-url <fileId> Prints an URL that can be used to download the given file, if it is public. @@ -744,7 +744,7 @@ class Sync(Command): class TestHttp(Command): """ - b2 test_http + b2 test-http PRIVATE. Exercises the HTTP layer. """ @@ -758,7 +758,7 @@ class TestHttp(Command): class TestRawApi(Command): """ - b2 test_raw_api + b2 test-raw-api PRIVATE. Exercises the B2RawApi class. """ @@ -772,7 +772,7 @@ class TestRawApi(Command): class TestUploadUrlConcurrency(Command): """ - b2 test_upload_url_concurrency + b2 test-upload-url-concurrency PRIVATE. Exercises the HTTP layer. """ @@ -786,7 +786,7 @@ class TestUploadUrlConcurrency(Command): class UpdateBucket(Command): """ - b2 update_bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] + b2 update-bucket [--bucketInfo <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] Updates the bucketType of an existing bucket. Prints the ID of the bucket updated. @@ -814,7 +814,7 @@ class UpdateBucket(Command): class UploadFile(Command): """ - b2 upload_file [--sha1 <sha1sum>] [--contentType <contentType>] \\ + b2 upload-file [--sha1 <sha1sum>] [--contentType <contentType>] \\ [--info <key>=<value>]* [--minPartSize N] \\ [--noProgress] [--threads N] <bucketName> <localFilePath> <b2FileName> @@ -915,7 +915,7 @@ class ConsoleTool(object): # a *magic* registry of commands self.command_name_to_class = dict( - (mixed_case_to_underscores(cls.__name__), cls) for cls in Command.__subclasses__() + (mixed_case_to_hyphens(cls.__name__), cls) for cls in Command.__subclasses__() ) def run_command(self, argv): @@ -925,7 +925,7 @@ class ConsoleTool(object): logger.info('ConsoleTool error - insufficient arguments') return self._usage_and_fail() - action = argv[1] + action = argv[1].replace('_', '-') arg_list = argv[2:] if action not in self.command_name_to_class: @@ -951,7 +951,7 @@ class ConsoleTool(object): return command.run(args) except MissingAccountData as e: logger.exception('ConsoleTool missing account data error') - self._print_stderr('ERROR: %s Use: b2 authorize_account' % (str(e),)) + self._print_stderr('ERROR: %s Use: b2 authorize-account' % (str(e),)) return 1 except B2Error as e: logger.exception('ConsoleTool command error')
Backblaze/B2_Command_Line_Tool
967dbda851bab6aa8adf7d61b46a337595d8480a
diff --git a/test/test_console_tool.py b/test/test_console_tool.py index 66e0d75..99adff5 100644 --- a/test/test_console_tool.py +++ b/test/test_console_tool.py @@ -49,7 +49,7 @@ class TestConsoleTool(TestBase): ['authorize_account', 'my-account', 'bad-app-key'], expected_stdout, expected_stderr, 1 ) - def test_authorize_with_good_key(self): + def test_authorize_with_good_key_using_hyphen(self): # Initial condition assert self.account_info.get_account_auth_token() is None @@ -59,7 +59,23 @@ class TestConsoleTool(TestBase): """ self._run_command( - ['authorize_account', 'my-account', 'good-app-key'], expected_stdout, '', 0 + ['authorize-account', 'my-account', 'good-app-key'], expected_stdout, '', 0 + ) + + # Auth token should be in account info now + assert self.account_info.get_account_auth_token() is not None + + def test_authorize_with_good_key_using_underscore(self): + # Initial condition + assert self.account_info.get_account_auth_token() is None + + # Authorize an account with a good api key. + expected_stdout = """ + Using http://production.example.com + """ + + self._run_command( + ['authorize-account', 'my-account', 'good-app-key'], expected_stdout, '', 0 ) # Auth token should be in account info now @@ -68,7 +84,7 @@ class TestConsoleTool(TestBase): def test_help_with_bad_args(self): expected_stderr = ''' - b2 list_parts <largeFileId> + b2 list-parts <largeFileId> Lists all of the parts that have been uploaded for the given large file, which must be a file that was started but not @@ -85,7 +101,7 @@ class TestConsoleTool(TestBase): # Clearing the account should remove the auth token # from the account info. - self._run_command(['clear_account'], '', '', 0) + self._run_command(['clear-account'], '', '', 0) assert self.account_info.get_account_auth_token() is None def test_buckets(self):
Underscore in command should be avoided AFAIK I've never seen underscores in commands `authorize_account`. Please consider moving to dashes. `authorize-account` See https://github.com/pallets/click and http://click.pocoo.org/5/why/
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_hyphen", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_underscore", "test/test_console_tool.py::TestConsoleTool::test_clear_account", "test/test_console_tool.py::TestConsoleTool::test_help_with_bad_args" ]
[ "test/test_console_tool.py::TestConsoleTool::test_authorize_with_bad_key", "test/test_console_tool.py::TestConsoleTool::test_bad_terminal", "test/test_console_tool.py::TestConsoleTool::test_bucket_info_from_json", "test/test_console_tool.py::TestConsoleTool::test_buckets", "test/test_console_tool.py::TestConsoleTool::test_cancel_all_large_file", "test/test_console_tool.py::TestConsoleTool::test_cancel_large_file", "test/test_console_tool.py::TestConsoleTool::test_files", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_defaults", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_explicit", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_parts", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_some", "test/test_console_tool.py::TestConsoleTool::test_sync", "test/test_console_tool.py::TestConsoleTool::test_sync_dry_run", "test/test_console_tool.py::TestConsoleTool::test_sync_syntax_error", "test/test_console_tool.py::TestConsoleTool::test_upload_large_file" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-12-16T22:38:57Z"
mit
Backblaze__B2_Command_Line_Tool-307
diff --git a/README.md b/README.md index 9ffca80..0548c49 100644 --- a/README.md +++ b/README.md @@ -47,6 +47,7 @@ this: b2 list-unfinished-large-files <bucketName> b2 ls [--long] [--versions] <bucketName> [<folderName>] b2 make-url <fileId> + b2 show-account-info b2 sync [--delete] [--keepDays N] [--skipNewer] [--replaceNewer] \ [--compareVersions <option>] [--threads N] [--noProgress] \ [--excludeRegex <regex> [--includeRegex <regex>]] [--dryRun] \ diff --git a/b2/console_tool.py b/b2/console_tool.py index a657532..b218880 100644 --- a/b2/console_tool.py +++ b/b2/console_tool.py @@ -629,6 +629,23 @@ class MakeUrl(Command): return 0 +class ShowAccountInfo(Command): + """ + b2 show-account-info + + Shows the account ID, key, auth token, and URLs. + """ + + def run(self, args): + account_info = self.api.account_info + self._print('Account ID: %s' % (account_info.get_account_id(),)) + self._print('Application Key: %s' % (account_info.get_application_key(),)) + self._print('Account Auth Token: %s' % (account_info.get_account_auth_token(),)) + self._print('API URL: %s' % (account_info.get_api_url(),)) + self._print('Download URL: %s' % (account_info.get_download_url(),)) + return 0 + + class Sync(Command): """ b2 sync [--delete] [--keepDays N] [--skipNewer] [--replaceNewer] \\ diff --git a/b2/sync/sync.py b/b2/sync/sync.py index eaba7a6..9dcf371 100644 --- a/b2/sync/sync.py +++ b/b2/sync/sync.py @@ -13,6 +13,7 @@ from __future__ import division import logging import re import six +import threading from ..exception import CommandError from ..utils import trace_call @@ -162,6 +163,40 @@ def count_files(local_folder, reporter): reporter.end_local() +class BoundedQueueExecutor(object): + """ + Wraps a futures.Executor and limits the number of requests that + can be queued at once. Requests to submit() tasks block until + there is room in the queue. + + The number of available slots in the queue is tracked with a + semaphore that is acquired before queueing an action, and + released when an action finishes. + """ + + def __init__(self, executor, queue_limit): + self.executor = executor + self.semaphore = threading.Semaphore(queue_limit) + + def submit(self, fcn, *args, **kwargs): + # Wait until there is room in the queue. + self.semaphore.acquire() + + # Wrap the action in a function that will release + # the semaphore after it runs. + def run_it(): + try: + fcn(*args, **kwargs) + finally: + self.semaphore.release() + + # Submit the wrapped action. + return self.executor.submit(run_it) + + def shutdown(self): + self.executor.shutdown() + + @trace_call(logger) def sync_folders( source_folder, dest_folder, args, now_millis, stdout, no_progress, max_workers, dry_run=False @@ -183,7 +218,12 @@ def sync_folders( # not the same as the executor in the API object, which is used for # uploads. The tasks in this executor wait for uploads. Putting them # in the same thread pool could lead to deadlock. - sync_executor = futures.ThreadPoolExecutor(max_workers=max_workers) + # + # We use an executor with a bounded queue to avoid using up lots of memory + # when syncing lots of files. + unbounded_executor = futures.ThreadPoolExecutor(max_workers=max_workers) + queue_limit = max_workers + 1000 + sync_executor = BoundedQueueExecutor(unbounded_executor, queue_limit=queue_limit) # First, start the thread that counts the local files. That's the operation # that should be fastest, and it provides scale for the progress reporting.
Backblaze/B2_Command_Line_Tool
4f2a17eb0342ba6efed8b97442dd20c4e80c1845
diff --git a/test/test_console_tool.py b/test/test_console_tool.py index 99adff5..f046017 100644 --- a/test/test_console_tool.py +++ b/test/test_console_tool.py @@ -452,6 +452,17 @@ class TestConsoleTool(TestBase): ], expected_stdout, '', 0 ) + def test_show_account_info(self): + self._authorize_account() + expected_stdout = ''' + Account ID: my-account + Application Key: good-app-key + Account Auth Token: AUTH:my-account + API URL: http://api.example.com + Download URL: http://download.example.com + ''' + self._run_command(['show-account-info'], expected_stdout, '', 0) + def test_sync(self): self._authorize_account() self._create_my_bucket() diff --git a/test/test_sync.py b/test/test_sync.py index 1bf26aa..63dd640 100644 --- a/test/test_sync.py +++ b/test/test_sync.py @@ -12,6 +12,8 @@ from __future__ import print_function import os import platform +import threading +import time import unittest import six @@ -21,7 +23,7 @@ from b2.exception import CommandError, DestFileNewer from b2.file_version import FileVersionInfo from b2.sync.folder import AbstractFolder, B2Folder, LocalFolder from b2.sync.file import File, FileVersion -from b2.sync.sync import make_folder_sync_actions, zip_folders +from b2.sync.sync import BoundedQueueExecutor, make_folder_sync_actions, zip_folders from b2.sync.folder_parser import parse_sync_folder from b2.utils import TempDir @@ -30,6 +32,11 @@ try: except ImportError: from mock import MagicMock +try: + import concurrent.futures as futures +except ImportError: + import futures + DAY = 86400000 # milliseconds TODAY = DAY * 100 # an arbitrary reference time for testing @@ -689,5 +696,78 @@ class TestMakeSyncActions(TestSync): self.assertEqual(expected_actions, [str(a) for a in actions]) +class TestBoundedQueueExecutor(TestBase): + def test_run_more_than_queue_size(self): + """ + Makes sure that the executor will run more jobs that the + queue size, which ensures that the semaphore gets released, + even if an exception is thrown. + """ + raw_executor = futures.ThreadPoolExecutor(1) + bounded_executor = BoundedQueueExecutor(raw_executor, 5) + + class Counter(object): + """ + Counts how many times run() is called. + """ + + def __init__(self): + self.counter = 0 + + def run(self): + """ + Always increments the counter. Sometimes raises an exception. + """ + self.counter += 1 + if self.counter % 2 == 0: + raise Exception('test') + + counter = Counter() + for _ in six.moves.range(10): + bounded_executor.submit(counter.run) + bounded_executor.shutdown() + self.assertEqual(10, counter.counter) + + def test_wait_for_running_jobs(self): + """ + Makes sure that no more than queue_limit workers are + running at once, which checks that the semaphore is + acquired before submitting an action. + """ + raw_executor = futures.ThreadPoolExecutor(2) + bounded_executor = BoundedQueueExecutor(raw_executor, 1) + assert_equal = self.assertEqual + + class CountAtOnce(object): + """ + Counts how many threads are running at once. + There should never be more than 1 because that's + the limit on the bounded executor. + """ + + def __init__(self): + self.running_at_once = 0 + self.lock = threading.Lock() + + def run(self): + with self.lock: + self.running_at_once += 1 + assert_equal(1, self.running_at_once) + # While we are sleeping here, no other actions should start + # running. If they do, they will increment the counter and + # fail the above assertion. + time.sleep(0.05) + with self.lock: + self.running_at_once -= 1 + self.counter += 1 + if self.counter % 2 == 0: + raise Exception('test') + + count_at_once = CountAtOnce() + for _ in six.moves.range(5): + bounded_executor.submit(count_at_once.run) + bounded_executor.shutdown() + + if __name__ == '__main__': unittest.main()
Limit sync action queue length to avoid memory explosion found in #279
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_console_tool.py::TestConsoleTool::test_authorize_with_bad_key", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_hyphen", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_underscore", "test/test_console_tool.py::TestConsoleTool::test_bad_terminal", "test/test_console_tool.py::TestConsoleTool::test_bucket_info_from_json", "test/test_console_tool.py::TestConsoleTool::test_buckets", "test/test_console_tool.py::TestConsoleTool::test_cancel_all_large_file", "test/test_console_tool.py::TestConsoleTool::test_cancel_large_file", "test/test_console_tool.py::TestConsoleTool::test_clear_account", "test/test_console_tool.py::TestConsoleTool::test_files", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_defaults", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_explicit", "test/test_console_tool.py::TestConsoleTool::test_help_with_bad_args", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_parts", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_some", "test/test_console_tool.py::TestConsoleTool::test_show_account_info", "test/test_console_tool.py::TestConsoleTool::test_sync", "test/test_console_tool.py::TestConsoleTool::test_sync_dry_run", "test/test_console_tool.py::TestConsoleTool::test_sync_syntax_error", "test/test_console_tool.py::TestConsoleTool::test_upload_large_file", "test/test_sync.py::TestLocalFolder::test_broken_symlink", "test/test_sync.py::TestLocalFolder::test_slash_sorting", "test/test_sync.py::TestB2Folder::test_empty", "test/test_sync.py::TestB2Folder::test_multiple_versions", "test/test_sync.py::TestParseSyncFolder::test_b2_double_slash", "test/test_sync.py::TestParseSyncFolder::test_b2_no_double_slash", "test/test_sync.py::TestParseSyncFolder::test_b2_no_folder", "test/test_sync.py::TestParseSyncFolder::test_b2_trailing_slash", "test/test_sync.py::TestParseSyncFolder::test_local", "test/test_sync.py::TestParseSyncFolder::test_local_trailing_slash", "test/test_sync.py::TestZipFolders::test_empty", "test/test_sync.py::TestZipFolders::test_one_empty", "test/test_sync.py::TestZipFolders::test_pass_reporter_to_folder", "test/test_sync.py::TestZipFolders::test_two", "test/test_sync.py::TestExclusions::test_file_exclusions", "test/test_sync.py::TestExclusions::test_file_exclusions_inclusions", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_delete", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_delete_hide_marker", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_old_delete", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_one_old", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_two_old", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_newer", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_older", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_equal", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal_delete", "test/test_sync.py::TestMakeSyncActions::test_delete_b2", "test/test_sync.py::TestMakeSyncActions::test_delete_b2_multiple_versions", "test/test_sync.py::TestMakeSyncActions::test_delete_hide_b2_multiple_versions", "test/test_sync.py::TestMakeSyncActions::test_delete_hide_b2_multiple_versions_old", "test/test_sync.py::TestMakeSyncActions::test_delete_local", "test/test_sync.py::TestMakeSyncActions::test_empty_b2", "test/test_sync.py::TestMakeSyncActions::test_empty_local", "test/test_sync.py::TestMakeSyncActions::test_illegal_b2_to_b2", "test/test_sync.py::TestMakeSyncActions::test_illegal_delete_and_keep_days", "test/test_sync.py::TestMakeSyncActions::test_illegal_local_to_local", "test/test_sync.py::TestMakeSyncActions::test_illegal_skip_and_replace", "test/test_sync.py::TestMakeSyncActions::test_keep_days_no_change_with_old_file", "test/test_sync.py::TestMakeSyncActions::test_newer_b2", "test/test_sync.py::TestMakeSyncActions::test_newer_b2_clean_old_versions", "test/test_sync.py::TestMakeSyncActions::test_newer_b2_delete_old_versions", "test/test_sync.py::TestMakeSyncActions::test_newer_local", "test/test_sync.py::TestMakeSyncActions::test_no_delete_b2", "test/test_sync.py::TestMakeSyncActions::test_no_delete_local", "test/test_sync.py::TestMakeSyncActions::test_not_there_b2", "test/test_sync.py::TestMakeSyncActions::test_not_there_local", "test/test_sync.py::TestMakeSyncActions::test_older_b2", "test/test_sync.py::TestMakeSyncActions::test_older_b2_replace", "test/test_sync.py::TestMakeSyncActions::test_older_b2_replace_delete", "test/test_sync.py::TestMakeSyncActions::test_older_b2_skip", "test/test_sync.py::TestMakeSyncActions::test_older_local", "test/test_sync.py::TestMakeSyncActions::test_older_local_replace", "test/test_sync.py::TestMakeSyncActions::test_older_local_skip", "test/test_sync.py::TestMakeSyncActions::test_same_b2", "test/test_sync.py::TestMakeSyncActions::test_same_clean_old_versions", "test/test_sync.py::TestMakeSyncActions::test_same_delete_old_versions", "test/test_sync.py::TestMakeSyncActions::test_same_leave_old_versions", "test/test_sync.py::TestMakeSyncActions::test_same_local", "test/test_sync.py::TestBoundedQueueExecutor::test_run_more_than_queue_size", "test/test_sync.py::TestBoundedQueueExecutor::test_wait_for_running_jobs" ]
[]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_issue_reference", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-12-20T01:15:09Z"
mit
Backblaze__B2_Command_Line_Tool-332
diff --git a/b2/sync/policy.py b/b2/sync/policy.py index b8e7435..5b74f97 100644 --- a/b2/sync/policy.py +++ b/b2/sync/policy.py @@ -240,7 +240,6 @@ def make_b2_keep_days_actions( only the 25-day old version can be deleted. The 15 day-old version was visible 10 days ago. """ - prev_age_days = None deleting = False if dest_file is None: # B2 does not really store folders, so there is no need to hide @@ -250,8 +249,17 @@ def make_b2_keep_days_actions( # How old is this version? age_days = (now_millis - version.mod_time) / ONE_DAY_IN_MS - # We assume that the versions are ordered by time, newest first. - assert prev_age_days is None or prev_age_days <= age_days + # Mostly, the versions are ordered by time, newest first, + # BUT NOT ALWAYS. The mod time we have is the src_last_modified_millis + # from the file info (if present), or the upload start time + # (if not present). The user-specified src_last_modified_millis + # may not be in order. Because of that, we no longer + # assert that age_days is non-decreasing. + # + # Note that if there is an out-of-order date that is old enough + # to trigger deletions, all of the versions uploaded before that + # (the ones after it in the list) will be deleted, even if they + # aren't over the age threshold. # Do we need to hide this version? if version_index == 0 and source_file is None and version.action == 'upload': @@ -275,6 +283,3 @@ def make_b2_keep_days_actions( # age of this one? if keep_days < age_days: deleting = True - - # Remember this age for next time around the loop. - prev_age_days = age_days
Backblaze/B2_Command_Line_Tool
26ba7c389b732b2202da62a28826a893a8d47749
diff --git a/test/test_policy.py b/test/test_policy.py new file mode 100644 index 0000000..bcc0ec4 --- /dev/null +++ b/test/test_policy.py @@ -0,0 +1,77 @@ +###################################################################### +# +# File: test_policy +# +# Copyright 2017, Backblaze Inc. All Rights Reserved. +# +# License https://www.backblaze.com/using_b2_code.html +# +###################################################################### + +from b2.sync.file import File, FileVersion +from b2.sync.folder import B2Folder +from b2.sync.policy import make_b2_keep_days_actions +from .test_base import TestBase + +try: + from unittest.mock import MagicMock +except ImportError: + from mock import MagicMock + + +class TestMakeB2KeepDaysActions(TestBase): + def setUp(self): + self.keep_days = 7 + self.today = 100 * 86400 + self.one_day_millis = 86400 * 1000 + + def test_no_versions(self): + self.check_one_answer(True, [], []) + + def test_new_version_no_action(self): + self.check_one_answer(True, [(1, -5, 'upload')], []) + + def test_no_source_one_old_version_hides(self): + # An upload that is old gets deleted if there is no source file. + self.check_one_answer(False, [(1, -10, 'upload')], ['b2_hide(folder/a)']) + + def test_old_hide_causes_delete(self): + # A hide marker that is old gets deleted, as do the things after it. + self.check_one_answer( + True, [(1, -5, 'upload'), (2, -10, 'hide'), (3, -20, 'upload')], + ['b2_delete(folder/a, 2, (hide marker))', 'b2_delete(folder/a, 3, (old version))'] + ) + + def test_old_upload_causes_delete(self): + # An upload that is old stays if there is a source file, but things + # behind it go away. + self.check_one_answer( + True, [(1, -5, 'upload'), (2, -10, 'upload'), (3, -20, 'upload')], + ['b2_delete(folder/a, 3, (old version))'] + ) + + def test_out_of_order_dates(self): + # The one at date -3 will get deleted because the one before it is old. + self.check_one_answer( + True, [(1, -5, 'upload'), (2, -10, 'upload'), (3, -3, 'upload')], + ['b2_delete(folder/a, 3, (old version))'] + ) + + def check_one_answer(self, has_source, id_relative_date_action_list, expected_actions): + source_file = File('a', []) if has_source else None + dest_file_versions = [ + FileVersion(id_, 'a', self.today + relative_date * self.one_day_millis, action, 100) + for (id_, relative_date, action) in id_relative_date_action_list + ] + dest_file = File('a', dest_file_versions) + bucket = MagicMock() + api = MagicMock() + api.get_bucket_by_name.return_value = bucket + dest_folder = B2Folder('bucket-1', 'folder', api) + actual_actions = list( + make_b2_keep_days_actions( + source_file, dest_file, dest_folder, dest_folder, self.keep_days, self.today + ) + ) + actual_action_strs = [str(a) for a in actual_actions] + self.assertEqual(expected_actions, actual_action_strs)
CLI Sync errors Hi all, after i finished my first sync to Cloud after 3 weeks, i have now errors while syncing new files to the Cloud. The following lines occurs after a few seconds when i start my CLI Command ``` C:\Program Files\Python36\Scripts>b2.exe sync --excludeRegex DfsrPrivate --threads 10 --keepDays 30 --replaceNewer \\?\D:\DFS\Daten b2://Nuernberg01/Daten ERROR:b2.console_tool:ConsoleTool unexpected exception Traceback (most recent call last): File "c:\program files\python36\lib\site-packages\b2\console_tool.py", line 992, in run_command return command.run(args) File "c:\program files\python36\lib\site-packages\b2\console_tool.py", line 781, in run dry_run=args.dryRun, File "c:\program files\python36\lib\site-packages\logfury\v0_1\trace_call.py", line 84, in wrapper return function(*wrapee_args, **wrapee_kwargs) File "c:\program files\python36\lib\site-packages\b2\sync\sync.py", line 251, in sync_folders source_folder, dest_folder, args, now_millis, reporter File "c:\program files\python36\lib\site-packages\b2\sync\sync.py", line 150, in make_folder_sync_actions sync_type, source_file, dest_file, source_folder, dest_folder, args, now_millis File "c:\program files\python36\lib\site-packages\b2\sync\sync.py", line 106, in make_file_sync_actions for action in policy.get_all_actions(): File "c:\program files\python36\lib\site-packages\b2\sync\policy.py", line 104, in get_all_actions for action in self._get_hide_delete_actions(): File "c:\program files\python36\lib\site-packages\b2\sync\policy.py", line 177, in _get_hide_delete_actions self._keepDays, self._now_millis File "c:\program files\python36\lib\site-packages\b2\sync\policy.py", line 254, in make_b2_keep_days_actions assert prev_age_days is None or prev_age_days <= age_days AssertionError Traceback (most recent call last): File "C:\Program Files\Python36\Scripts\b2-script.py", line 11, in <module> load_entry_point('b2==0.7.0', 'console_scripts', 'b2')() File "c:\program files\python36\lib\site-packages\b2\console_tool.py", line 1104, in main exit_status = ct.run_command(decoded_argv) File "c:\program files\python36\lib\site-packages\b2\console_tool.py", line 992, in run_command return command.run(args) File "c:\program files\python36\lib\site-packages\b2\console_tool.py", line 781, in run dry_run=args.dryRun, File "c:\program files\python36\lib\site-packages\logfury\v0_1\trace_call.py", line 84, in wrapper return function(*wrapee_args, **wrapee_kwargs) File "c:\program files\python36\lib\site-packages\b2\sync\sync.py", line 251, in sync_folders source_folder, dest_folder, args, now_millis, reporter File "c:\program files\python36\lib\site-packages\b2\sync\sync.py", line 150, in make_folder_sync_actions sync_type, source_file, dest_file, source_folder, dest_folder, args, now_millis File "c:\program files\python36\lib\site-packages\b2\sync\sync.py", line 106, in make_file_sync_actions for action in policy.get_all_actions(): File "c:\program files\python36\lib\site-packages\b2\sync\policy.py", line 104, in get_all_actions for action in self._get_hide_delete_actions(): File "c:\program files\python36\lib\site-packages\b2\sync\policy.py", line 177, in _get_hide_delete_actions self._keepDays, self._now_millis File "c:\program files\python36\lib\site-packages\b2\sync\policy.py", line 254, in make_b2_keep_days_actions assert prev_age_days is None or prev_age_days <= age_days AssertionError ``` I have no Idea what to do?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_policy.py::TestMakeB2KeepDaysActions::test_out_of_order_dates" ]
[ "test/test_policy.py::TestMakeB2KeepDaysActions::test_new_version_no_action", "test/test_policy.py::TestMakeB2KeepDaysActions::test_no_source_one_old_version_hides", "test/test_policy.py::TestMakeB2KeepDaysActions::test_no_versions", "test/test_policy.py::TestMakeB2KeepDaysActions::test_old_hide_causes_delete", "test/test_policy.py::TestMakeB2KeepDaysActions::test_old_upload_causes_delete" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2017-03-20T21:04:08Z"
mit
Backblaze__B2_Command_Line_Tool-420
diff --git a/b2/sync/scan_policies.py b/b2/sync/scan_policies.py index 198c079..dfb9413 100644 --- a/b2/sync/scan_policies.py +++ b/b2/sync/scan_policies.py @@ -27,10 +27,45 @@ class RegexSet(object): return any(c.match(s) is not None for c in self._compiled_list) +def convert_dir_regex_to_dir_prefix_regex(dir_regex): + """ + The patterns used to match directory names (and file names) are allowed + to match a prefix of the name. This 'feature' was unintentional, but is + being retained for compatibility. + + This means that a regex that matches a directory name can't be used directly + to match against a file name and test whether the file should be excluded + because it matches the directory. + + The pattern 'photos' will match directory names 'photos' and 'photos2', + and should exclude files 'photos/kitten.jpg', and 'photos2/puppy.jpg'. + It should not exclude 'photos.txt', because there is no directory name + that matches. + + On the other hand, the pattern 'photos$' should match 'photos/kitten.jpg', + but not 'photos2/puppy.jpg', nor 'photos.txt' + + If the original regex is valid, there are only two cases to consider: + either the regex ends in '$' or does not. + """ + if dir_regex.endswith('$'): + return dir_regex[:-1] + r'/' + else: + return dir_regex + r'.*?/' + + class ScanPoliciesManager(object): """ Policy object used when scanning folders for syncing, used to decide which files to include in the list of files to be synced. + + Code that scans through files should at least use should_exclude_file() + to decide whether each file should be included; it will check include/exclude + patterns for file names, as well as patterns for excluding directeries. + + Code that scans may optionally use should_exclude_directory() to test whether + it can skip a directory completely and not bother listing the files and + sub-directories in it. """ def __init__( @@ -40,6 +75,9 @@ class ScanPoliciesManager(object): include_file_regexes=tuple(), ): self._exclude_dir_set = RegexSet(exclude_dir_regexes) + self._exclude_file_because_of_dir_set = RegexSet( + map(convert_dir_regex_to_dir_prefix_regex, exclude_dir_regexes) + ) self._exclude_file_set = RegexSet(exclude_file_regexes) self._include_file_set = RegexSet(include_file_regexes) @@ -51,8 +89,12 @@ class ScanPoliciesManager(object): being scanned. :return: True iff excluded. """ - return self._exclude_file_set.matches(file_path) and \ - not self._include_file_set.matches(file_path) + exclude_because_of_dir = self._exclude_file_because_of_dir_set.matches(file_path) + exclude_because_of_file = ( + self._exclude_file_set.matches(file_path) and + not self._include_file_set.matches(file_path) + ) + return exclude_because_of_dir or exclude_because_of_file def should_exclude_directory(self, dir_path): """
Backblaze/B2_Command_Line_Tool
15a60ad1c71b75366061e4f742ef52eb9dcc23e7
diff --git a/test/test_scan_policies.py b/test/test_scan_policies.py index f3bb797..853730d 100644 --- a/test/test_scan_policies.py +++ b/test/test_scan_policies.py @@ -30,8 +30,20 @@ class TestScanPolicies(TestBase): def test_exclude_dir(self): policy = ScanPoliciesManager( - include_file_regexes=['.*[.]txt$'], exclude_dir_regexes=['alfa$'] + include_file_regexes=['.*[.]txt$'], exclude_dir_regexes=['alfa', 'bravo$'] ) self.assertTrue(policy.should_exclude_directory('alfa')) - self.assertFalse(policy.should_exclude_directory('alfa2')) - self.assertFalse(policy.should_exclude_directory('alfa/hello')) + self.assertTrue(policy.should_exclude_directory('alfa2')) + self.assertTrue(policy.should_exclude_directory('alfa/hello')) + + self.assertTrue(policy.should_exclude_directory('bravo')) + self.assertFalse(policy.should_exclude_directory('bravo2')) + self.assertFalse(policy.should_exclude_directory('bravo/hello')) + + self.assertTrue(policy.should_exclude_file('alfa/foo')) + self.assertTrue(policy.should_exclude_file('alfa2/hello/foo')) + self.assertTrue(policy.should_exclude_file('alfa/hello/foo.txt')) + + self.assertTrue(policy.should_exclude_file('bravo/foo')) + self.assertFalse(policy.should_exclude_file('bravo2/hello/foo')) + self.assertTrue(policy.should_exclude_file('bravo/hello/foo.txt'))
--excludeDirRegex does not work when source is B2 The new filtering that lets you exclude an entire directory works in the `LocalFolder` class, but not the `B2Folder` class. I think there are two possible approaches to fixing it: (1) change B2Folder to simulate the existence of directories, and check them for exclusion, or (2) extend `ScanPoliciesManager.should_exclude_file` to also test whether any of the directories in the path are excluded. I like #2, but I think it would need optimization to avoid checking every parent directory of every file.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_scan_policies.py::TestScanPolicies::test_exclude_dir" ]
[ "test/test_scan_policies.py::TestScanPolicies::test_default", "test/test_scan_policies.py::TestScanPolicies::test_exclude_include" ]
{ "failed_lite_validators": [ "has_issue_reference" ], "has_test_patch": true, "is_lite": false }
"2018-03-07T02:01:24Z"
mit
Backblaze__B2_Command_Line_Tool-488
diff --git a/b2/api.py b/b2/api.py index 017f5ba..a1400e1 100644 --- a/b2/api.py +++ b/b2/api.py @@ -205,20 +205,27 @@ class B2Api(object): def get_bucket_by_name(self, bucket_name): """ - Returns the bucket_id for the given bucket_name. + Returns the Bucket for the given bucket_name. - If we don't already know it from the cache, try fetching it from - the B2 service. + :param bucket_name: The name of the bucket to return. + :return: a Bucket object + :raises NonExistentBucket: if the bucket does not exist in the account """ - # If we can get it from the stored info, do that. + # Give a useful warning if the current application key does not + # allow access to the named bucket. self.check_bucket_restrictions(bucket_name) + + # First, try the cache. id_ = self.cache.get_bucket_id_or_none_from_bucket_name(bucket_name) if id_ is not None: return Bucket(self, id_, name=bucket_name) - for bucket in self.list_buckets(): - if bucket.name == bucket_name: - return bucket + # Second, ask the service + for bucket in self.list_buckets(bucket_name=bucket_name): + assert bucket.name == bucket_name + return bucket + + # There is no such bucket. raise NonExistentBucket(bucket_name) def delete_bucket(self, bucket): @@ -244,25 +251,14 @@ class B2Api(object): :param bucket_name: Optional: the name of the one bucket to return. :return: A list of Bucket objects. """ - account_id = self.account_info.get_account_id() + # Give a useful warning if the current application key does not + # allow access to the named bucket. self.check_bucket_restrictions(bucket_name) - # TEMPORARY work around until we fix the API endpoint bug that things requests - # with a bucket name are not authorized. When it's fixed, well just pass the - # bucket name (or None) to the raw API. - if bucket_name is None: - bucket_id = None - else: - allowed = self.account_info.get_allowed() - if allowed['bucketId'] is not None: - # We just checked that if there is a bucket restriction we have a bucket name - # and it matches. So if there's a restriction we know that's the bucket we're - # looking for. - bucket_id = allowed['bucketId'] - else: - bucket_id = self.get_bucket_by_name(bucket_name).id_ + account_id = self.account_info.get_account_id() + self.check_bucket_restrictions(bucket_name) - response = self.session.list_buckets(account_id, bucket_id=bucket_id) + response = self.session.list_buckets(account_id, bucket_name=bucket_name) buckets = BucketFactory.from_api_response(self, response) if bucket_name is not None: diff --git a/b2/raw_simulator.py b/b2/raw_simulator.py index 9731370..0fcb999 100644 --- a/b2/raw_simulator.py +++ b/b2/raw_simulator.py @@ -767,18 +767,40 @@ class RawSimulator(AbstractRawApi): self.file_id_to_bucket_id[response['fileId']] = bucket_id return response - def list_buckets(self, api_url, account_auth_token, account_id, bucket_id=None): - self._assert_account_auth(api_url, account_auth_token, account_id, 'listBuckets', bucket_id) + def list_buckets( + self, api_url, account_auth_token, account_id, bucket_id=None, bucket_name=None + ): + # First, map the bucket name to a bucket_id, so that we can check auth. + if bucket_name is None: + bucket_id_for_auth = bucket_id + else: + bucket_id_for_auth = self._get_bucket_id_or_none_for_bucket_name(bucket_name) + self._assert_account_auth( + api_url, account_auth_token, account_id, 'listBuckets', bucket_id_for_auth + ) + + # Do the query sorted_buckets = [ - self.bucket_name_to_bucket[bucket_name] - for bucket_name in sorted(six.iterkeys(self.bucket_name_to_bucket)) + self.bucket_name_to_bucket[name] + for name in sorted(six.iterkeys(self.bucket_name_to_bucket)) ] bucket_list = [ bucket.bucket_dict() - for bucket in sorted_buckets if bucket_id is None or bucket.bucket_id == bucket_id + for bucket in sorted_buckets if self._bucket_matches(bucket, bucket_id, bucket_name) ] return dict(buckets=bucket_list) + def _get_bucket_id_or_none_for_bucket_name(self, bucket_name): + for bucket in six.itervalues(self.bucket_name_to_bucket): + if bucket.bucket_name == bucket_name: + return bucket.bucket_id + + def _bucket_matches(self, bucket, bucket_id, bucket_name): + return ( + (bucket_id is None or bucket.bucket_id == bucket_id) and + (bucket_name is None or bucket.bucket_name == bucket_name) + ) + def list_file_names( self, api_url, account_auth, bucket_id, start_file_name=None, max_file_count=None ):
Backblaze/B2_Command_Line_Tool
4154652165dd475d79de606abd70b6debc4596d4
diff --git a/test/test_api.py b/test/test_api.py index adcdb45..f72c336 100644 --- a/test/test_api.py +++ b/test/test_api.py @@ -53,6 +53,16 @@ class TestApi(TestBase): [b.name for b in self.api.list_buckets(bucket_name=bucket1.name)], ) + def test_get_bucket_by_name_with_bucket_restriction(self): + self._authorize_account() + bucket1 = self.api.create_bucket('bucket1', 'allPrivate') + key = self.api.create_key(['listBuckets'], 'key1', bucket_id=bucket1.id_) + self.api.authorize_account('production', key['applicationKeyId'], key['applicationKey']) + self.assertEqual( + bucket1.id_, + self.api.get_bucket_by_name('bucket1').id_, + ) + def test_list_buckets_with_restriction_and_wrong_name(self): self._authorize_account() bucket1 = self.api.create_bucket('bucket1', 'allPrivate') @@ -72,4 +82,4 @@ class TestApi(TestBase): self.api.list_buckets() def _authorize_account(self): - self.api.authorize_account('production', self.account_id, self.master_key) \ No newline at end of file + self.api.authorize_account('production', self.account_id, self.master_key)
b2.api.B2Api.get_bucket_by_name does not work with bucket-scoped application keys I am using Duplicity 0.7.17 and b2 1.3.2. duplicity is executed using ``` duplicity \ --verbosity debug \ /backup \ "b2://$B2_APP_KEY_ID:$B2_APP_KEY@$B2_BUCKET_NAME" ``` Where `$B2_APP_KEY_ID` and `$B2_APP_KEY` are URL-encoded strings which were output from a call to: ``` b2 create-key \ --bucket "$B2_BUCKET_NAME" \ "$B2_KEY_NAME" \ listFiles,readFiles,writeFiles,deleteFiles,listBuckets ``` Duplicity fails with the following traceback: ``` Traceback (innermost last): File "/usr/local/bin/duplicity", line 1555, in <module> with_tempdir(main) File "/usr/local/bin/duplicity", line 1541, in with_tempdir fn() File "/usr/local/bin/duplicity", line 1380, in main action = commandline.ProcessCommandLine(sys.argv[1:]) File "/usr/local/lib/python2.7/dist-packages/duplicity/commandline.py", line 1135, in ProcessCommandLine backup, local_pathname = set_backend(args[0], args[1]) File "/usr/local/lib/python2.7/dist-packages/duplicity/commandline.py", line 1010, in set_backend globals.backend = backend.get_backend(bend) File "/usr/local/lib/python2.7/dist-packages/duplicity/backend.py", line 223, in get_backend obj = get_backend_object(url_string) File "/usr/local/lib/python2.7/dist-packages/duplicity/backend.py", line 209, in get_backend_object return factory(pu) File "/usr/local/lib/python2.7/dist-packages/duplicity/backends/b2backend.py", line 87, in __init__ self.bucket = self.service.get_bucket_by_name(bucket_name) File "/usr/local/lib/python2.7/dist-packages/b2/api.py", line 222, in get_bucket_by_name for bucket in self.list_buckets(): File "/usr/local/lib/python2.7/dist-packages/logfury/v0_1/trace_call.py", line 84, in wrapper return function(*wrapee_args, **wrapee_kwargs) File "/usr/local/lib/python2.7/dist-packages/b2/api.py", line 251, in list_buckets self.check_bucket_restrictions(bucket_name) File "/usr/local/lib/python2.7/dist-packages/logfury/v0_1/trace_call.py", line 84, in wrapper return function(*wrapee_args, **wrapee_kwargs) File "/usr/local/lib/python2.7/dist-packages/b2/api.py", line 375, in check_bucket_restrictions raise RestrictedBucket(allowed_bucket_name) RestrictedBucket: Application key is restricted to bucket: pc-backup ``` Internally<sup>[[1]](https://bazaar.launchpad.net/~duplicity-team/duplicity/0.8-series/view/head:/duplicity/backends/b2backend.py#L88)</sup>, Duplicity uses `b2.api.B2Api.get_bucket_by_name`, passing in the name of the bucket. This method calls `b2.api.B2Api.list_buckets` without passing the bucket name, so we get the permission error indicated above. This looks related to changes in #474.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_api.py::TestApi::test_get_bucket_by_name_with_bucket_restriction" ]
[ "test/test_api.py::TestApi::test_list_buckets", "test/test_api.py::TestApi::test_list_buckets_with_name", "test/test_api.py::TestApi::test_list_buckets_with_restriction", "test/test_api.py::TestApi::test_list_buckets_with_restriction_and_no_name", "test/test_api.py::TestApi::test_list_buckets_with_restriction_and_wrong_name" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2018-08-09T15:22:12Z"
mit
Backblaze__B2_Command_Line_Tool-508
diff --git a/README.md b/README.md index 0391bfd..c07a186 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,7 @@ this: [--threads N] [--noProgress] [--dryRun ] [--allowEmptySource ] \ [--excludeRegex <regex> [--includeRegex <regex>]] \ [--excludeDirRegex <regex>] \ + [--excludeAllSymlinks ] \ <source> <destination> b2 update-bucket [--bucketInfo <json>] [--corsRules <json>] [--lifecycleRules <json>] <bucketName> [allPublic | allPrivate] b2 upload-file [--sha1 <sha1sum>] [--contentType <contentType>] \ diff --git a/b2/bucket.py b/b2/bucket.py index b148b96..1873cbc 100644 --- a/b2/bucket.py +++ b/b2/bucket.py @@ -444,8 +444,11 @@ class Bucket(object): part_ranges = choose_part_ranges(content_length, minimum_part_size) # Check for unfinished files with same name - unfinished_file, finished_parts = self._find_unfinished_file( - upload_source, file_name, file_info, part_ranges + unfinished_file, finished_parts = self._find_unfinished_file_if_possible( + upload_source, + file_name, + file_info, + part_ranges, ) # Tell B2 we're going to upload a file if necessary @@ -477,40 +480,43 @@ class Bucket(object): response = self.api.session.finish_large_file(file_id, part_sha1_array) return FileVersionInfoFactory.from_api_response(response) - def _find_unfinished_file(self, upload_source, file_name, file_info, part_ranges): + def _find_unfinished_file_if_possible(self, upload_source, file_name, file_info, part_ranges): """ - Find an unfinished file which may be used to resume a large file upload. The + Find an unfinished file that may be used to resume a large file upload. The file is found using the filename and comparing the uploaded parts against the local file. + + This is only possible if the application key being used allows listFiles access. """ - for file_ in self.list_unfinished_large_files(): - if file_.file_name == file_name and file_.file_info == file_info: - files_match = True - finished_parts = {} - for part in self.list_parts(file_.file_id): - # Compare part sizes - offset, part_length = part_ranges[part.part_number - 1] - if part_length != part.content_length: - files_match = False - break - - # Compare hash - with upload_source.open() as f: - f.seek(offset) - sha1_sum = hex_sha1_of_stream(f, part_length) - if sha1_sum != part.content_sha1: - files_match = False - break - - # Save part - finished_parts[part.part_number] = part - - # Skip not matching files or unfinished files with no uploaded parts - if not files_match or not finished_parts: - continue - - # Return first matched file - return file_, finished_parts + if 'listFiles' in self.api.account_info.get_allowed()['capabilities']: + for file_ in self.list_unfinished_large_files(): + if file_.file_name == file_name and file_.file_info == file_info: + files_match = True + finished_parts = {} + for part in self.list_parts(file_.file_id): + # Compare part sizes + offset, part_length = part_ranges[part.part_number - 1] + if part_length != part.content_length: + files_match = False + break + + # Compare hash + with upload_source.open() as f: + f.seek(offset) + sha1_sum = hex_sha1_of_stream(f, part_length) + if sha1_sum != part.content_sha1: + files_match = False + break + + # Save part + finished_parts[part.part_number] = part + + # Skip not matching files or unfinished files with no uploaded parts + if not files_match or not finished_parts: + continue + + # Return first matched file + return file_, finished_parts return None, {} def _upload_part( diff --git a/b2/console_tool.py b/b2/console_tool.py index 0eb38c8..063d919 100644 --- a/b2/console_tool.py +++ b/b2/console_tool.py @@ -137,7 +137,7 @@ class Command(object): FORBID_LOGGING_ARGUMENTS = False # Parsers for each argument. Each should be a function that - # takes a string and returns the vaule. + # takes a string and returns the value. ARG_PARSER = {} def __init__(self, console_tool): @@ -287,7 +287,7 @@ class CancelAllUnfinishedLargeFiles(Command): b2 cancel-all-unfinished-large-files <bucketName> Lists all large files that have been started but not - finsished and cancels them. Any parts that have been + finished and cancels them. Any parts that have been uploaded will be deleted. Requires capability: writeFiles @@ -445,9 +445,9 @@ class DeleteFileVersion(Command): Specifying the fileName is more efficient than leaving it out. If you omit the fileName, it requires an initial query to B2 to get the file name, before making the call to delete the - file. + file. This extra query requires the readFiles capability. - Requires capability: deleteFiles + Requires capability: deleteFiles, readFiles (if file name not provided) """ OPTIONAL_BEFORE = ['fileName'] @@ -1016,6 +1016,7 @@ class Sync(Command): [--threads N] [--noProgress] [--dryRun ] [--allowEmptySource ] \\ [--excludeRegex <regex> [--includeRegex <regex>]] \\ [--excludeDirRegex <regex>] \\ + [--excludeAllSymlinks ] \\ <source> <destination> Copies multiple files from source to destination. Optionally @@ -1056,6 +1057,9 @@ class Sync(Command): Note that --includeRegex cannot be used without --excludeRegex. + You can specify --excludeAllSymlinks to skip symlinks when + syncing from a local source. + When a directory is excluded by using --excludeDirRegex, all of the files within it are excluded, even if they match an --includeRegex pattern. This means that there is no need to look inside excluded @@ -1067,7 +1071,7 @@ class Sync(Command): a trailing '/', so don't include on in your regular expression. Multiple regex rules can be applied by supplying them as pipe - delimitered instructions. Note that the regex for this command + delimited instructions. Note that the regex for this command is Python regex. Reference: https://docs.python.org/2/library/re.html. Regular expressions are considered a match if they match a substring @@ -1132,7 +1136,13 @@ class Sync(Command): """ OPTION_FLAGS = [ - 'delete', 'noProgress', 'skipNewer', 'replaceNewer', 'dryRun', 'allowEmptySource' + 'delete', + 'noProgress', + 'skipNewer', + 'replaceNewer', + 'dryRun', + 'allowEmptySource', + 'excludeAllSymlinks', ] OPTION_ARGS = ['keepDays', 'threads', 'compareVersions', 'compareThreshold'] REQUIRED = ['source', 'destination'] @@ -1156,6 +1166,7 @@ class Sync(Command): exclude_dir_regexes=args.excludeDirRegex, exclude_file_regexes=args.excludeRegex, include_file_regexes=args.includeRegex, + exclude_all_symlinks=args.excludeAllSymlinks, ) sync_folders( source_folder=source, diff --git a/b2/sync/folder.py b/b2/sync/folder.py index 609e279..fbbd89a 100644 --- a/b2/sync/folder.py +++ b/b2/sync/folder.py @@ -169,6 +169,10 @@ class LocalFolder(AbstractFolder): if not is_file_readable(local_path, reporter): continue + if policies_manager.exclude_all_symlinks and os.path.islink(local_path): + reporter.symlink_skipped(local_path) + continue + if os.path.isdir(local_path): name += six.u('/') if policies_manager.should_exclude_directory(b2_path): diff --git a/b2/sync/report.py b/b2/sync/report.py index d1f0ea8..f46dd03 100644 --- a/b2/sync/report.py +++ b/b2/sync/report.py @@ -198,6 +198,9 @@ class SyncReport(object): 'WARNING: %s could not be accessed (no permissions to read?)' % (path,) ) + def symlink_skipped(self, path): + pass + class SyncFileReporter(AbstractProgressListener): """ diff --git a/b2/sync/scan_policies.py b/b2/sync/scan_policies.py index dfb9413..d3d2299 100644 --- a/b2/sync/scan_policies.py +++ b/b2/sync/scan_policies.py @@ -73,6 +73,7 @@ class ScanPoliciesManager(object): exclude_dir_regexes=tuple(), exclude_file_regexes=tuple(), include_file_regexes=tuple(), + exclude_all_symlinks=False, ): self._exclude_dir_set = RegexSet(exclude_dir_regexes) self._exclude_file_because_of_dir_set = RegexSet( @@ -80,6 +81,7 @@ class ScanPoliciesManager(object): ) self._exclude_file_set = RegexSet(exclude_file_regexes) self._include_file_set = RegexSet(include_file_regexes) + self.exclude_all_symlinks = exclude_all_symlinks def should_exclude_file(self, file_path): """ diff --git a/requirements-setup.txt b/requirements-setup.txt index abf8187..57d273d 100644 --- a/requirements-setup.txt +++ b/requirements-setup.txt @@ -1,3 +1,3 @@ nose setuptools -twine +twine; python_version >= '2.7'
Backblaze/B2_Command_Line_Tool
fe0638e578aac6164e76a43a19d900cca2278ace
diff --git a/test/test_bucket.py b/test/test_bucket.py index 5d4633f..2601d28 100644 --- a/test/test_bucket.py +++ b/test/test_bucket.py @@ -242,8 +242,8 @@ class TestLs(TestCaseWithBucket): self.bucket.upload_bytes(data, 'ccc') expected = [ ('9998', 'bb/1', 11, 'upload', None), ('9995', 'bb/2', 11, 'upload', None), - ('9996', 'bb/2', 11, 'upload', None), ('9997', 'bb/2', 11, 'upload', - None), ('9994', 'bb/3', 11, 'upload', None) + ('9996', 'bb/2', 11, 'upload', None), ('9997', 'bb/2', 11, 'upload', None), + ('9994', 'bb/3', 11, 'upload', None) ] actual = [ (info.id_, info.file_name, info.size, info.action, folder) diff --git a/test/test_console_tool.py b/test/test_console_tool.py index 3dbcc66..6ef2af7 100644 --- a/test/test_console_tool.py +++ b/test/test_console_tool.py @@ -1059,6 +1059,38 @@ class TestConsoleTool(TestBase): ''' % (mtime) self._run_command(['list_file_names', 'my-bucket'], expected_stdout, '', 0) + def test_sync_exclude_all_symlinks(self): + self._authorize_account() + self._create_my_bucket() + + with TempDir() as temp_dir: + self._make_local_file(temp_dir, 'test.txt') + os.symlink('test.txt', os.path.join(temp_dir, 'alink')) + expected_stdout = ''' + upload test.txt + ''' + + command = [ + 'sync', '--threads', '1', '--noProgress', '--excludeAllSymlinks', temp_dir, + 'b2://my-bucket' + ] + self._run_command(command, expected_stdout, '', 0) + + def test_sync_dont_exclude_all_symlinks(self): + self._authorize_account() + self._create_my_bucket() + + with TempDir() as temp_dir: + self._make_local_file(temp_dir, 'test.txt') + os.symlink('test.txt', os.path.join(temp_dir, 'alink')) + expected_stdout = ''' + upload alink + upload test.txt + ''' + + command = ['sync', '--threads', '1', '--noProgress', temp_dir, 'b2://my-bucket'] + self._run_command(command, expected_stdout, '', 0) + def test_ls(self): self._authorize_account() self._create_my_bucket() diff --git a/test/test_raw_api.py b/test/test_raw_api.py index cf44d20..148ba17 100644 --- a/test/test_raw_api.py +++ b/test/test_raw_api.py @@ -44,8 +44,9 @@ class TestRawAPIFilenames(TestBase): :param exception_message: regexp that matches the exception's detailed message """ print( - u"Filename \"{0}\" should raise UnusableFileName(\".*{1}.*\")." - .format(filename, exception_message) + u"Filename \"{0}\" should raise UnusableFileName(\".*{1}.*\").".format( + filename, exception_message + ) ) with self.assertRaisesRegexp(UnusableFileName, exception_message): self.raw_api.check_b2_filename(filename) diff --git a/test/test_sync.py b/test/test_sync.py index 4c8f856..369dddc 100644 --- a/test/test_sync.py +++ b/test/test_sync.py @@ -412,6 +412,7 @@ class FakeArgs(object): debugLogs=True, dryRun=False, allowEmptySource=False, + excludeAllSymlinks=False, ): self.delete = delete self.keepDays = keepDays @@ -431,6 +432,7 @@ class FakeArgs(object): self.debugLogs = debugLogs self.dryRun = dryRun self.allowEmptySource = allowEmptySource + self.excludeAllSymlinks = excludeAllSymlinks def b2_file(name, mod_times, size=10): @@ -456,8 +458,8 @@ def b2_file(name, mod_times, size=10): """ versions = [ FileVersion( - 'id_%s_%d' % (name[0], abs(mod_time)), 'folder/' + name, abs(mod_time), 'upload' - if 0 < mod_time else 'hide', size + 'id_%s_%d' % (name[0], abs(mod_time)), 'folder/' + name, abs(mod_time), + 'upload' if 0 < mod_time else 'hide', size ) for mod_time in mod_times ] # yapf disable return File(name, versions) @@ -496,6 +498,7 @@ class TestExclusions(TestSync): exclude_dir_regexes=fakeargs.excludeDirRegex, exclude_file_regexes=fakeargs.excludeRegex, include_file_regexes=fakeargs.includeRegex, + exclude_all_symlinks=fakeargs.excludeAllSymlinks ) actions = list( make_folder_sync_actions( diff --git a/test_b2_command_line.py b/test_b2_command_line.py index e04c21a..d6a5175 100644 --- a/test_b2_command_line.py +++ b/test_b2_command_line.py @@ -680,6 +680,38 @@ def _sync_test_using_dir(b2_tool, bucket_name, dir_): ], file_version_summary(file_versions) ) + # confirm symlink is skipped + write_file(p('linktarget'), b'hello') + os.symlink('linktarget', p('alink')) + + b2_tool.should_succeed( + ['sync', '--noProgress', '--excludeAllSymlinks', dir_path, b2_sync_point], + ) + file_versions = b2_tool.list_file_versions(bucket_name) + should_equal( + [ + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'linktarget', + ], + file_version_summary(file_versions), + ) + + # confirm symlink target is uploaded (with symlink's name) + b2_tool.should_succeed(['sync', '--noProgress', dir_path, b2_sync_point]) + file_versions = b2_tool.list_file_versions(bucket_name) + should_equal( + [ + '+ ' + prefix + 'alink', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'c', + '+ ' + prefix + 'linktarget', + ], + file_version_summary(file_versions), + ) + def sync_down_test(b2_tool, bucket_name): sync_down_helper(b2_tool, bucket_name, 'sync')
Uploading a large file fails with capabilities listBuckets,writeFiles The tool is trying to look for incomplete uploads by listing unfinished large files, but that requires listFiles capability. ``` $ b2 upload-file --noProgress bwb-ca001 250MB.txt large-250.txt ERROR: unauthorized for application key with capabilities 'listBuckets,writeFiles', restricted to bucket 'bwb-ca001' (unauthorized) ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_console_tool.py::TestConsoleTool::test_sync_exclude_all_symlinks", "test/test_sync.py::TestExclusions::test_file_exclusions_with_delete", "test/test_sync.py::TestExclusions::test_file_exclusions_inclusions_with_delete" ]
[ "test/test_console_tool.py::TestConsoleTool::test_sync_empty_folder_when_not_enabled", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_explicit", "test/test_console_tool.py::TestConsoleTool::test_bucket_missing_for_bucket_key", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_underscore", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_with_versions", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_defaults", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_one_item_show_size", "test/test_console_tool.py::TestConsoleTool::test_buckets", "test/test_console_tool.py::TestConsoleTool::test_help_with_bad_args", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_url_with_encoding", "test/test_console_tool.py::TestConsoleTool::test_sync_empty_folder_when_enabled", "test/test_console_tool.py::TestConsoleTool::test_create_bucket_key_and_authorize_with_it", "test/test_console_tool.py::TestConsoleTool::test_cancel_all_large_file", "test/test_console_tool.py::TestConsoleTool::test_get_bucket", "test/test_console_tool.py::TestConsoleTool::test_upload_large_file", "test/test_console_tool.py::TestConsoleTool::test_restrictions", "test/test_console_tool.py::TestConsoleTool::test_list_buckets_not_allowed_for_app_key", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_url", "test/test_console_tool.py::TestConsoleTool::test_sync_dont_exclude_all_symlinks", "test/test_console_tool.py::TestConsoleTool::test_authorize_key_without_list_buckets", "test/test_console_tool.py::TestConsoleTool::test_ls_for_restricted_bucket", "test/test_console_tool.py::TestConsoleTool::test_cancel_large_file", "test/test_console_tool.py::TestConsoleTool::test_sync", "test/test_console_tool.py::TestConsoleTool::test_create_key_and_authorize_with_it", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_parts", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_with_folders", "test/test_console_tool.py::TestConsoleTool::test_bucket_info_from_json", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_with_hidden", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_some", "test/test_console_tool.py::TestConsoleTool::test_clear_account", "test/test_console_tool.py::TestConsoleTool::test_sync_syntax_error", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_complex", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_hyphen", "test/test_console_tool.py::TestConsoleTool::test_sync_dry_run", "test/test_console_tool.py::TestConsoleTool::test_bad_terminal", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_empty_show_size", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_bad_key", "test/test_console_tool.py::TestConsoleTool::test_get_account_info", "test/test_console_tool.py::TestConsoleTool::test_ls", "test/test_console_tool.py::TestConsoleTool::test_keys", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_none", "test/test_console_tool.py::TestConsoleTool::test_files", "test_b2_command_line.py::TestCommandLine::test_stderr_patterns", "test/test_sync.py::TestMakeSyncActions::test_illegal_b2_to_b2", "test/test_sync.py::TestMakeSyncActions::test_older_b2_replace", "test/test_sync.py::TestMakeSyncActions::test_older_b2_replace_delete", "test/test_sync.py::TestMakeSyncActions::test_newer_b2_clean_old_versions", "test/test_sync.py::TestMakeSyncActions::test_older_local_replace", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_delete", "test/test_sync.py::TestMakeSyncActions::test_delete_large_b2", "test/test_sync.py::TestMakeSyncActions::test_illegal_delete_and_keep_days", "test/test_sync.py::TestMakeSyncActions::test_no_delete_local", "test/test_sync.py::TestMakeSyncActions::test_older_b2_skip", "test/test_sync.py::TestMakeSyncActions::test_newer_b2", "test/test_sync.py::TestMakeSyncActions::test_older_b2", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_equal", "test/test_sync.py::TestMakeSyncActions::test_same_delete_old_versions", "test/test_sync.py::TestMakeSyncActions::test_newer_local", "test/test_sync.py::TestMakeSyncActions::test_illegal_skip_and_replace", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_one_old", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days", "test/test_sync.py::TestMakeSyncActions::test_not_there_local", "test/test_sync.py::TestMakeSyncActions::test_same_local", "test/test_sync.py::TestMakeSyncActions::test_no_delete_b2", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal", "test/test_sync.py::TestMakeSyncActions::test_delete_local", "test/test_sync.py::TestMakeSyncActions::test_same_b2", "test/test_sync.py::TestMakeSyncActions::test_dir_not_there_b2_delete", "test/test_sync.py::TestMakeSyncActions::test_delete_hide_b2_multiple_versions", "test/test_sync.py::TestMakeSyncActions::test_keep_days_no_change_with_old_file", "test/test_sync.py::TestMakeSyncActions::test_empty_local", "test/test_sync.py::TestMakeSyncActions::test_delete_hide_b2_multiple_versions_old", "test/test_sync.py::TestMakeSyncActions::test_illegal_local_to_local", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_older", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_size_not_equal_delete", "test/test_sync.py::TestMakeSyncActions::test_empty_b2", "test/test_sync.py::TestMakeSyncActions::test_older_local", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_delete_hide_marker", "test/test_sync.py::TestMakeSyncActions::test_delete_b2_multiple_versions", "test/test_sync.py::TestMakeSyncActions::test_delete_b2", "test/test_sync.py::TestMakeSyncActions::test_newer_b2_delete_old_versions", "test/test_sync.py::TestMakeSyncActions::test_older_local_skip", "test/test_sync.py::TestMakeSyncActions::test_same_leave_old_versions", "test/test_sync.py::TestMakeSyncActions::test_not_there_b2", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_old_delete", "test/test_sync.py::TestMakeSyncActions::test_dir_not_there_b2_keepdays", "test/test_sync.py::TestMakeSyncActions::test_same_clean_old_versions", "test/test_sync.py::TestMakeSyncActions::test_compare_b2_none_newer", "test/test_sync.py::TestMakeSyncActions::test_already_hidden_multiple_versions_keep_days_two_old", "test/test_sync.py::TestLocalFolder::test_exclude_all", "test/test_sync.py::TestLocalFolder::test_broken_symlink", "test/test_sync.py::TestLocalFolder::test_exclude_directory_trailing_slash_does_not_match", "test/test_sync.py::TestLocalFolder::test_exclusion_with_exact_match", "test/test_sync.py::TestLocalFolder::test_exclusions_inclusions", "test/test_sync.py::TestLocalFolder::test_exclude_directory", "test/test_sync.py::TestLocalFolder::test_exclude_directory2", "test/test_sync.py::TestLocalFolder::test_exclude_matches_prefix", "test/test_sync.py::TestLocalFolder::test_slash_sorting", "test/test_sync.py::TestLocalFolder::test_invalid_permissions", "test/test_sync.py::TestLocalFolder::test_exclusions", "test/test_sync.py::TestParseSyncFolder::test_b2_double_slash", "test/test_sync.py::TestParseSyncFolder::test_local", "test/test_sync.py::TestParseSyncFolder::test_b2_no_double_slash", "test/test_sync.py::TestParseSyncFolder::test_b2_trailing_slash", "test/test_sync.py::TestParseSyncFolder::test_local_trailing_slash", "test/test_sync.py::TestParseSyncFolder::test_b2_no_folder", "test/test_sync.py::TestB2Folder::test_empty", "test/test_sync.py::TestB2Folder::test_multiple_versions", "test/test_sync.py::TestBoundedQueueExecutor::test_wait_for_running_jobs", "test/test_sync.py::TestBoundedQueueExecutor::test_run_more_than_queue_size", "test/test_sync.py::TestZipFolders::test_empty", "test/test_sync.py::TestZipFolders::test_one_empty", "test/test_sync.py::TestZipFolders::test_two", "test/test_sync.py::TestZipFolders::test_pass_reporter_to_folder", "test/test_raw_api.py::TestRawAPIFilenames::test_b2_filename_checker", "test/test_bucket.py::TestLs::test_three_files_multiple_versions", "test/test_bucket.py::TestLs::test_three_files_at_root", "test/test_bucket.py::TestLs::test_hidden_file", "test/test_bucket.py::TestLs::test_three_files_in_dir", "test/test_bucket.py::TestLs::test_started_large_file", "test/test_bucket.py::TestLs::test_empty", "test/test_bucket.py::TestLs::test_delete_file_version", "test/test_bucket.py::TestLs::test_one_file_at_root", "test/test_bucket.py::TestUpload::test_upload_file_one_fatal_error", "test/test_bucket.py::TestUpload::test_upload_large_resume_no_parts", "test/test_bucket.py::TestUpload::test_upload_local_file", "test/test_bucket.py::TestUpload::test_upload_large_resume_file_info", "test/test_bucket.py::TestUpload::test_upload_large", "test/test_bucket.py::TestUpload::test_upload_large_resume_part_does_not_match", "test/test_bucket.py::TestUpload::test_upload_large_resume_all_parts_there", "test/test_bucket.py::TestUpload::test_upload_file_too_many_retryable_errors", "test/test_bucket.py::TestUpload::test_upload_large_resume_file_info_does_not_match", "test/test_bucket.py::TestUpload::test_upload_large_resume", "test/test_bucket.py::TestUpload::test_upload_fifo", "test/test_bucket.py::TestUpload::test_upload_one_retryable_error", "test/test_bucket.py::TestUpload::test_upload_bytes", "test/test_bucket.py::TestUpload::test_upload_dead_symlink", "test/test_bucket.py::TestUpload::test_upload_large_resume_wrong_part_size", "test/test_bucket.py::TestUpload::test_upload_bytes_progress", "test/test_bucket.py::TestDownloadSimple::test_download_by_name_no_progress", "test/test_bucket.py::TestDownloadSimple::test_download_by_id_progress_partial_inplace_overwrite", "test/test_bucket.py::TestDownloadSimple::test_download_by_id_progress_range_one_off", "test/test_bucket.py::TestDownloadSimple::test_download_by_id_progress_partial", "test/test_bucket.py::TestDownloadSimple::test_download_by_name_progress", "test/test_bucket.py::TestDownloadSimple::test_download_by_id_no_progress", "test/test_bucket.py::TestDownloadSimple::test_download_by_id_progress_partial_shifted_overwrite", "test/test_bucket.py::TestDownloadSimple::test_download_by_id_progress_exact_range", "test/test_bucket.py::TestDownloadSimple::test_download_by_id_progress", "test/test_bucket.py::TestListUnfinished::test_three", "test/test_bucket.py::TestListUnfinished::test_empty", "test/test_bucket.py::TestListUnfinished::test_one", "test/test_bucket.py::TestReauthorization::testCreateBucket", "test/test_bucket.py::TestUploadPart::test_error_in_state", "test/test_bucket.py::TestDownloadParallel::test_download_by_id_no_progress", "test/test_bucket.py::TestDownloadParallel::test_download_by_id_progress_partial_inplace_overwrite", "test/test_bucket.py::TestDownloadParallel::test_download_by_id_progress_range_one_off", "test/test_bucket.py::TestDownloadParallel::test_download_by_id_progress_partial", "test/test_bucket.py::TestDownloadParallel::test_download_by_name_no_progress", "test/test_bucket.py::TestDownloadParallel::test_download_by_name_progress", "test/test_bucket.py::TestDownloadParallel::test_download_by_id_progress_partial_shifted_overwrite", "test/test_bucket.py::TestDownloadParallel::test_download_by_id_progress", "test/test_bucket.py::TestDownloadParallel::test_download_by_id_progress_exact_range", "test/test_bucket.py::TestDownloadDefault::test_download_by_name_progress", "test/test_bucket.py::TestDownloadDefault::test_download_by_id_progress_partial_shifted_overwrite", "test/test_bucket.py::TestDownloadDefault::test_download_by_id_progress", "test/test_bucket.py::TestDownloadDefault::test_download_by_id_progress_exact_range", "test/test_bucket.py::TestDownloadDefault::test_download_by_id_progress_partial", "test/test_bucket.py::TestDownloadDefault::test_download_by_name_no_progress", "test/test_bucket.py::TestDownloadDefault::test_download_by_id_no_progress", "test/test_bucket.py::TestDownloadDefault::test_download_by_id_progress_range_one_off", "test/test_bucket.py::TestDownloadDefault::test_download_by_id_progress_partial_inplace_overwrite", "test/test_bucket.py::TestListParts::testThree", "test/test_bucket.py::TestListParts::testEmpty" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2018-09-06T15:13:56Z"
mit
Backblaze__B2_Command_Line_Tool-643
diff --git a/README.md b/README.md index beaabdc..727ae51 100644 --- a/README.md +++ b/README.md @@ -108,6 +108,10 @@ For advanced users, a hidden option `--logConfig <filename.ini>` can be used to # Release History +## Not released yet + +* Add `--environment` internal parameter for `authorize-account` + ## 2.0.0 (2020-06-25) Changes: diff --git a/b2/console_tool.py b/b2/console_tool.py index 2914a16..867a205 100644 --- a/b2/console_tool.py +++ b/b2/console_tool.py @@ -283,21 +283,20 @@ class AuthorizeAccount(Command): @classmethod def _setup_parser(cls, parser): - parser.add_argument('--dev', action='store_true', help=argparse.SUPPRESS) - parser.add_argument('--staging', action='store_true', help=argparse.SUPPRESS) + realm_group = parser.add_mutually_exclusive_group() + realm_group.add_argument('--dev', action='store_true', help=argparse.SUPPRESS) + realm_group.add_argument('--staging', action='store_true', help=argparse.SUPPRESS) + realm_group.add_argument('--environment', help=argparse.SUPPRESS) + parser.add_argument('applicationKeyId', nargs='?') parser.add_argument('applicationKey', nargs='?') def run(self, args): - # Handle internal options for testing inside Backblaze. These - # are not documented in the usage string. - realm = 'production' - if args.staging: - realm = 'staging' - if args.dev: - realm = 'dev' + # Handle internal options for testing inside Backblaze. + # These are not documented in the usage string. + realm = self._get_realm(args) - url = self.api.account_info.REALM_URLS[realm] + url = self.api.account_info.REALM_URLS.get(realm, realm) self._print('Using %s' % url) if args.applicationKeyId is None: @@ -339,6 +338,17 @@ class AuthorizeAccount(Command): self._print_stderr('ERROR: unable to authorize account: ' + str(e)) return 1 + @classmethod + def _get_realm(cls, args): + if args.dev: + return 'dev' + if args.staging: + return 'staging' + if args.environment: + return args.environment + + return 'production' + @B2.register_subcommand class CancelAllUnfinishedLargeFiles(Command): diff --git a/requirements.txt b/requirements.txt index b1235a4..1006adb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ arrow>=0.8.0,<0.13.1; python_version <= '3.4' arrow>=0.8.0; python_version > '3.4' -b2sdk>=1.1.0,<1.2.0 +b2sdk>=1.1.3,<1.2.0 class-registry==2.1.2 six>=1.13
Backblaze/B2_Command_Line_Tool
cd5618431964b317d1bfa03c03f2798afd0c5296
diff --git a/test/test_console_tool.py b/test/test_console_tool.py index 8956eec..4e7f652 100644 --- a/test/test_console_tool.py +++ b/test/test_console_tool.py @@ -110,6 +110,25 @@ class TestConsoleTool(TestBase): # Auth token should be in account info now assert self.account_info.get_account_auth_token() is not None + def test_authorize_towards_custom_realm(self): + # Initial condition + assert self.account_info.get_account_auth_token() is None + + # Authorize an account with a good api key. + expected_stdout = """ + Using http://custom.example.com + """ + + self._run_command( + [ + 'authorize-account', '--environment', 'http://custom.example.com', self.account_id, + self.master_key + ], expected_stdout, '', 0 + ) + + # Auth token should be in account info now + assert self.account_info.get_account_auth_token() is not None + def test_create_key_and_authorize_with_it(self): # Start with authorizing with the master key self._authorize_account()
Please add an option to authorize_account to let me specify a base url. The goal is to let us run the CLI against other B2 instances without having to change the code to add additional arguments like the "--staging" and "--dev" arguments that currently exist. Here's a potential example: b2 authorize-account --baseUrl https://api.backblazeb2.xyz thanks, ab
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/test_console_tool.py::TestConsoleTool::test_authorize_towards_custom_realm" ]
[ "test/test_console_tool.py::TestConsoleTool::test_authorize_key_without_list_buckets", "test/test_console_tool.py::TestConsoleTool::test_authorize_using_env_variables", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_bad_key", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_hyphen", "test/test_console_tool.py::TestConsoleTool::test_authorize_with_good_key_using_underscore", "test/test_console_tool.py::TestConsoleTool::test_bad_terminal", "test/test_console_tool.py::TestConsoleTool::test_bucket_info_from_json", "test/test_console_tool.py::TestConsoleTool::test_bucket_missing_for_bucket_key", "test/test_console_tool.py::TestConsoleTool::test_buckets", "test/test_console_tool.py::TestConsoleTool::test_cancel_all_large_file", "test/test_console_tool.py::TestConsoleTool::test_cancel_large_file", "test/test_console_tool.py::TestConsoleTool::test_clear_account", "test/test_console_tool.py::TestConsoleTool::test_copy_file_by_id", "test/test_console_tool.py::TestConsoleTool::test_create_bucket_key_and_authorize_with_it", "test/test_console_tool.py::TestConsoleTool::test_create_key_and_authorize_with_it", "test/test_console_tool.py::TestConsoleTool::test_files", "test/test_console_tool.py::TestConsoleTool::test_get_account_info", "test/test_console_tool.py::TestConsoleTool::test_get_bucket", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_complex", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_empty_show_size", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_one_item_show_size", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_with_folders", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_with_hidden", "test/test_console_tool.py::TestConsoleTool::test_get_bucket_with_versions", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_defaults", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_explicit", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_url", "test/test_console_tool.py::TestConsoleTool::test_get_download_auth_url_with_encoding", "test/test_console_tool.py::TestConsoleTool::test_keys", "test/test_console_tool.py::TestConsoleTool::test_list_buckets_not_allowed_for_app_key", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_parts_with_parts", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_none", "test/test_console_tool.py::TestConsoleTool::test_list_unfinished_large_files_with_some", "test/test_console_tool.py::TestConsoleTool::test_ls", "test/test_console_tool.py::TestConsoleTool::test_ls_for_restricted_bucket", "test/test_console_tool.py::TestConsoleTool::test_restrictions", "test/test_console_tool.py::TestConsoleTool::test_sync", "test/test_console_tool.py::TestConsoleTool::test_sync_dont_exclude_all_symlinks", "test/test_console_tool.py::TestConsoleTool::test_sync_dry_run", "test/test_console_tool.py::TestConsoleTool::test_sync_empty_folder_when_enabled", "test/test_console_tool.py::TestConsoleTool::test_sync_empty_folder_when_not_enabled", "test/test_console_tool.py::TestConsoleTool::test_sync_exclude_all_symlinks", "test/test_console_tool.py::TestConsoleTool::test_upload_large_file" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-07-14T05:17:02Z"
mit