instance_id
stringlengths
10
57
patch
stringlengths
261
37.7k
repo
stringlengths
7
53
base_commit
stringlengths
40
40
hints_text
stringclasses
301 values
test_patch
stringlengths
212
2.22M
problem_statement
stringlengths
23
37.7k
version
int64
0
0
environment_setup_commit
stringclasses
89 values
FAIL_TO_PASS
sequencelengths
1
4.94k
PASS_TO_PASS
sequencelengths
0
7.82k
meta
dict
created_at
unknown
license
stringclasses
8 values
0b01001001__spectree-64
diff --git a/setup.py b/setup.py index 1b3cb64..4ef21e6 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f: setup( name='spectree', - version='0.3.7', + version='0.3.8', author='Keming Yang', author_email='[email protected]', description=('generate OpenAPI document and validate request&response ' diff --git a/spectree/utils.py b/spectree/utils.py index bb5698d..73d6c71 100644 --- a/spectree/utils.py +++ b/spectree/utils.py @@ -54,6 +54,7 @@ def parse_params(func, params, models): 'in': 'query', 'schema': schema, 'required': name in query.get('required', []), + 'description': schema.get('description', ''), }) if hasattr(func, 'headers'): @@ -64,6 +65,7 @@ def parse_params(func, params, models): 'in': 'header', 'schema': schema, 'required': name in headers.get('required', []), + 'description': schema.get('description', ''), }) if hasattr(func, 'cookies'): @@ -74,6 +76,7 @@ def parse_params(func, params, models): 'in': 'cookie', 'schema': schema, 'required': name in cookies.get('required', []), + 'description': schema.get('description', ''), }) return params
0b01001001/spectree
a091fab020ac26548250c907bae0855273a98778
diff --git a/tests/common.py b/tests/common.py index 0f2d696..83b4140 100644 --- a/tests/common.py +++ b/tests/common.py @@ -1,7 +1,7 @@ from enum import IntEnum, Enum from typing import List -from pydantic import BaseModel, root_validator +from pydantic import BaseModel, root_validator, Field class Order(IntEnum): @@ -43,7 +43,7 @@ class Cookies(BaseModel): class DemoModel(BaseModel): uid: int limit: int - name: str + name: str = Field(..., description='user name') def get_paths(spec): diff --git a/tests/test_utils.py b/tests/test_utils.py index bf3426d..53dd3e1 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -98,8 +98,10 @@ def test_parse_params(): 'name': 'uid', 'in': 'query', 'required': True, + 'description': '', 'schema': { 'title': 'Uid', 'type': 'integer', } } + assert params[2]['description'] == 'user name'
[BUG]description for query paramters can not show in swagger ui Hi, when I add a description for a schema used in query, it can not show in swagger ui but can show in Redoc ```py @HELLO.route('/', methods=['GET']) @api.validate(query=HelloForm) def hello(): """ hello 注释 :return: """ return 'ok' class HelloForm(BaseModel): """ hello表单 """ user: str # 用户名称 msg: str = Field(description='msg test', example='aa') index: int data: HelloGetListForm list: List[HelloListForm] ``` ![截屏2020-10-12 下午7 54 52](https://user-images.githubusercontent.com/60063723/95743785-de70f480-0cc4-11eb-857b-fffd3d7e9cdd.png) ![截屏2020-10-12 下午7 53 59](https://user-images.githubusercontent.com/60063723/95743805-e5980280-0cc4-11eb-99ae-11e6439bae02.png)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_utils.py::test_parse_params" ]
[ "tests/test_utils.py::test_comments", "tests/test_utils.py::test_parse_code", "tests/test_utils.py::test_parse_name", "tests/test_utils.py::test_has_model", "tests/test_utils.py::test_parse_resp", "tests/test_utils.py::test_parse_request" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_media", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-10-12T13:21:50"
apache-2.0
12rambau__sepal_ui-644
diff --git a/.github/workflows/unit.yml b/.github/workflows/unit.yml index 9fc498b3..fc69f702 100644 --- a/.github/workflows/unit.yml +++ b/.github/workflows/unit.yml @@ -72,17 +72,19 @@ jobs: assert len(unexpected) == 0 - name: test with pytest - run: coverage run -m pytest --color=yes --instafail tests + run: pytest --color=yes --cov --cov-report=xml --instafail tests + + - name: assess dead fixtures + if: matrix.python-version == '3.8' + run: pytest --dead-fixtures - name: build the template panel application if: matrix.python-version == '3.8' - run: | - pytest --nbmake sepal_ui/templates/panel_app/ui.ipynb + run: pytest --nbmake sepal_ui/templates/panel_app/ui.ipynb - name: build the template map application if: matrix.python-version == '3.8' - run: | - pytest --nbmake sepal_ui/templates/map_app/ui.ipynb + run: pytest --nbmake sepal_ui/templates/map_app/ui.ipynb - name: coverage run: coverage xml diff --git a/sepal_ui/sepalwidgets/btn.py b/sepal_ui/sepalwidgets/btn.py index 137622fa..105f6160 100644 --- a/sepal_ui/sepalwidgets/btn.py +++ b/sepal_ui/sepalwidgets/btn.py @@ -25,6 +25,9 @@ class Btn(v.Btn, SepalWidget): .. deprecated:: 2.13 ``text`` and ``icon`` will be replaced by ``msg`` and ``gliph`` to avoid duplicating ipyvuetify trait. + + .. deprecated:: 2.14 + Btn is not using a default ``msg`` anymor`. """ v_icon = None @@ -36,7 +39,7 @@ class Btn(v.Btn, SepalWidget): msg = Unicode("").tag(sync=True) "traitlet.Unicode: the text of the btn" - def __init__(self, msg="Click", gliph="", **kwargs): + def __init__(self, msg="", gliph="", **kwargs): # deprecation in 2.13 of text and icon # as they already exist in the ipyvuetify Btn traits (as booleans) @@ -55,7 +58,7 @@ class Btn(v.Btn, SepalWidget): ) # create the default v_icon - self.v_icon = v.Icon(left=True, children=[""]) + self.v_icon = v.Icon(children=[""]) # set the default parameters kwargs["color"] = kwargs.pop("color", "primary") @@ -89,6 +92,7 @@ class Btn(v.Btn, SepalWidget): Set the text of the btn """ + self.v_icon.left = bool(change["new"]) self.children = [self.v_icon, change["new"]] return self diff --git a/setup.py b/setup.py index 84775ae4..e7ca3ccf 100644 --- a/setup.py +++ b/setup.py @@ -65,7 +65,7 @@ setup_params = { "cryptography", "python-box", "xyzservices", - "planet==2.0a2", # this is a prerelease + "planet>=2.0", "pyyaml", "dask", "tqdm", @@ -83,6 +83,8 @@ setup_params = { "pytest-sugar", "pytest-icdiff", "pytest-instafail", + "pytest-deadfixtures", + "pytest-cov", "nbmake ", ], "doc": [
12rambau/sepal_ui
8a8196e3c7893b7a0aebdb4910e83054f59e0374
diff --git a/tests/test_Btn.py b/tests/test_Btn.py index fcaed760..4e3cb9b5 100644 --- a/tests/test_Btn.py +++ b/tests/test_Btn.py @@ -11,7 +11,7 @@ class TestBtn: btn = sw.Btn() assert btn.color == "primary" assert btn.v_icon.children[0] == "" - assert btn.children[1] == "Click" + assert btn.children[1] == "" # extensive btn btn = sw.Btn("toto", "fas fa-folder") @@ -42,12 +42,18 @@ class TestBtn: assert isinstance(btn.v_icon, v.Icon) assert btn.v_icon.children[0] == gliph + assert btn.v_icon.left is True # change existing icon gliph = "fas fa-file" btn.gliph = gliph assert btn.v_icon.children[0] == gliph + # display only the gliph + btn.msg = "" + assert btn.children[1] == "" + assert btn.v_icon.left is False + # remove all gliph gliph = "" btn.gliph = gliph @@ -79,4 +85,4 @@ class TestBtn: def btn(self): """Create a simple btn""" - return sw.Btn() + return sw.Btn("Click") diff --git a/tests/test_PlanetModel.py b/tests/test_PlanetModel.py index f84d2e1f..d6d63c5a 100644 --- a/tests/test_PlanetModel.py +++ b/tests/test_PlanetModel.py @@ -9,11 +9,17 @@ from sepal_ui.planetapi import PlanetModel @pytest.mark.skipif("PLANET_API_KEY" not in os.environ, reason="requires Planet") class TestPlanetModel: - @pytest.mark.parametrize("credentials", ["planet_key", "cred"]) - def test_init(self, credentials, request): + def test_init(self, planet_key, cred, request): + + # Test with a valid api key + planet_model = PlanetModel(planet_key) + + assert isinstance(planet_model, PlanetModel) + assert isinstance(planet_model.session, planet.http.Session) + assert planet_model.active is True - # Test with a valid api key and login credentials - planet_model = PlanetModel(request.getfixturevalue(credentials)) + # Test with a valid login credentials + planet_model = PlanetModel(cred) assert isinstance(planet_model, PlanetModel) assert isinstance(planet_model.session, planet.http.Session) @@ -56,10 +62,7 @@ class TestPlanetModel: return - def test_is_active(self, planet_key): - - # We only need to test with a key. - planet_model = PlanetModel(planet_key) + def test_is_active(self, planet_model): planet_model._is_active() assert planet_model.active is True @@ -69,9 +72,8 @@ class TestPlanetModel: return - def test_get_subscriptions(self, planet_key): + def test_get_subscriptions(self, planet_model): - planet_model = PlanetModel(planet_key) subs = planet_model.get_subscriptions() # Check object has length, because there is no way to check a value @@ -80,10 +82,7 @@ class TestPlanetModel: return - def test_get_planet_items(self, planet_key): - - # Arrange - planet_model = PlanetModel(planet_key) + def test_get_planet_items(self, planet_model): aoi = { # Yasuni national park in Ecuador "type": "Polygon", @@ -119,3 +118,11 @@ class TestPlanetModel: credentials = json.loads(os.getenv("PLANET_API_CREDENTIALS")) return list(credentials.values()) + + @pytest.fixture + def planet_model(self): + """Start a planet model using the API key""" + + key = os.getenv("PLANET_API_KEY") + + return PlanetModel(key)
sepal_ui.Btn does't work as expected I want to create a simple Icon button, to do so: ```python sw.Btn(icon=True, gliph ="mdi-plus") ``` Doing this, without "msg" parameter will add the default text to the button which is "click", I think is worthless having that value. So if I want to remove the default text, I would expect doing this: ```python sw.Btn(children = [""], icon=True, gliph ="mdi-plus") # or sw.Btn(msg= ""] icon=True, gliph ="mdi-plus") ``` Which leads the icon aligned to the left and not centered (as it is using a empyt string as message).
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_Btn.py::TestBtn::test_init", "tests/test_Btn.py::TestBtn::test_set_gliph" ]
[ "tests/test_Btn.py::TestBtn::test_toggle_loading", "tests/test_Btn.py::TestBtn::test_set_msg" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-11-29T14:42:21"
mit
15five__scim2-filter-parser-13
diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 12a5d4f..178f172 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,6 +1,10 @@ CHANGE LOG ========== +0.3.5 +----- +- Update the sql.Transpiler to collect namedtuples rather than tuples for attr paths + 0.3.4 ----- - Update tox.ini and clean up linting errors diff --git a/setup.py b/setup.py index bbf57bf..bd16f70 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ def long_description(): setup( name='scim2-filter-parser', - version='0.3.4', + version='0.3.5', description='A customizable parser/transpiler for SCIM2.0 filters', url='https://github.com/15five/scim2-filter-parser', maintainer='Paul Logston', diff --git a/src/scim2_filter_parser/transpilers/sql.py b/src/scim2_filter_parser/transpilers/sql.py index 6254f1e..2107758 100644 --- a/src/scim2_filter_parser/transpilers/sql.py +++ b/src/scim2_filter_parser/transpilers/sql.py @@ -4,9 +4,12 @@ clause based on a SCIM filter. """ import ast import string +import collections from .. import ast as scim2ast +AttrPath = collections.namedtuple('AttrPath', ['attr_name', 'sub_attr', 'uri']) + class Transpiler(ast.NodeTransformer): """ @@ -145,7 +148,7 @@ class Transpiler(ast.NodeTransformer): # Convert attr_name to another value based on map. # Otherwise, return None. - attr_path_tuple = (attr_name_value, sub_attr_value, uri_value) + attr_path_tuple = AttrPath(attr_name_value, sub_attr_value, uri_value) self.attr_paths.append(attr_path_tuple) return self.attr_map.get(attr_path_tuple)
15five/scim2-filter-parser
3ed1858b492542d0bc9b9e9ab9547641595e28c1
diff --git a/tests/test_transpiler.py b/tests/test_transpiler.py index b8e1bb4..280c2d3 100644 --- a/tests/test_transpiler.py +++ b/tests/test_transpiler.py @@ -36,6 +36,16 @@ class RFCExamples(TestCase): self.assertEqual(expected_sql, sql, query) self.assertEqual(expected_params, params, query) + def test_attr_paths_are_created(self): + query = 'userName eq "bjensen"' + tokens = self.lexer.tokenize(query) + ast = self.parser.parse(tokens) + self.transpiler.transpile(ast) + + self.assertEqual(len(self.transpiler.attr_paths), 1) + for path in self.transpiler.attr_paths: + self.assertTrue(isinstance(path, transpile_sql.AttrPath)) + def test_username_eq(self): query = 'userName eq "bjensen"' sql = "username = {a}"
Return NamedTuple rather than tuple. It would be nice to return a NamedTuple instead of a tuple here: https://github.com/15five/scim2-filter-parser/blob/7ddc216f8c3dd1cdb2152944187e8f7f5ee07be2/src/scim2_filter_parser/transpilers/sql.py#L148 This way parts of each path could be accessed by name rather than by index in the tuple.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_transpiler.py::RFCExamples::test_attr_paths_are_created" ]
[ "tests/test_transpiler.py::CommandLine::test_command_line", "tests/test_transpiler.py::AzureQueries::test_email_type_eq_primary_value_eq_uuid", "tests/test_transpiler.py::AzureQueries::test_parse_simple_email_filter_with_uuid", "tests/test_transpiler.py::AzureQueries::test_external_id_from_azure", "tests/test_transpiler.py::UndefinedAttributes::test_schemas_eq", "tests/test_transpiler.py::UndefinedAttributes::test_title_has_value_and_user_type_eq_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_2", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_work_and_value_contains_2", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_work_and_value_contains_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_email_contains_or_email_contains", "tests/test_transpiler.py::UndefinedAttributes::test_email_type_eq_primary_value_eq_uuid_1", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_eq_and_not_email_type_eq_work_and_value_contains_3", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_3", "tests/test_transpiler.py::UndefinedAttributes::test_email_type_eq_primary_value_eq_uuid_2", "tests/test_transpiler.py::UndefinedAttributes::test_title_has_value_and_user_type_eq_2", "tests/test_transpiler.py::UndefinedAttributes::test_user_type_ne_and_not_email_contains_or_email_contains", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_4", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_1", "tests/test_transpiler.py::UndefinedAttributes::test_username_eq", "tests/test_transpiler.py::UndefinedAttributes::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians_2", "tests/test_transpiler.py::RFCExamples::test_username_eq", "tests/test_transpiler.py::RFCExamples::test_schema_username_startswith", "tests/test_transpiler.py::RFCExamples::test_title_has_value", "tests/test_transpiler.py::RFCExamples::test_family_name_contains", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_lt", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_ge", "tests/test_transpiler.py::RFCExamples::test_user_type_eq_and_not_email_type_eq_work_and_value_contains", "tests/test_transpiler.py::RFCExamples::test_emails_type_eq_work_value_contians_or_ims_type_eq_and_value_contians", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_le", "tests/test_transpiler.py::RFCExamples::test_user_type_eq_and_not_email_type_eq", "tests/test_transpiler.py::RFCExamples::test_title_has_value_and_user_type_eq", "tests/test_transpiler.py::RFCExamples::test_schemas_eq", "tests/test_transpiler.py::RFCExamples::test_user_type_eq_and_email_contains_or_email_contains", "tests/test_transpiler.py::RFCExamples::test_title_has_value_or_user_type_eq", "tests/test_transpiler.py::RFCExamples::test_meta_last_modified_gt", "tests/test_transpiler.py::RFCExamples::test_user_type_ne_and_not_email_contains_or_email_contains", "tests/test_transpiler.py::RFCExamples::test_username_startswith" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2020-07-30T14:25:04"
mit
15five__scim2-filter-parser-20
diff --git a/src/scim2_filter_parser/parser.py b/src/scim2_filter_parser/parser.py index 516f65d..12c693e 100644 --- a/src/scim2_filter_parser/parser.py +++ b/src/scim2_filter_parser/parser.py @@ -110,9 +110,8 @@ class SCIMParser(Parser): # which takes precedence over "or" # 3. Attribute operators precedence = ( - ('nonassoc', OR), # noqa F821 - ('nonassoc', AND), # noqa F821 - ('nonassoc', NOT), # noqa F821 + ('left', OR, AND), # noqa F821 + ('right', NOT), # noqa F821 ) # FILTER = attrExp / logExp / valuePath / *1"not" "(" FILTER ")"
15five/scim2-filter-parser
08de23c5626556a37beced764a22a2fa7021989b
diff --git a/tests/test_parser.py b/tests/test_parser.py index 4ff562c..19aa198 100644 --- a/tests/test_parser.py +++ b/tests/test_parser.py @@ -47,6 +47,24 @@ class BuggyQueries(TestCase): with self.assertRaises(parser.SCIMParserError): self.parser.parse(token_stream) + def test_g17_1_log_exp_order(self): + query = 'displayName co "username" or nickName co "username" or userName co "username"' + + tokens = self.lexer.tokenize(query) + self.parser.parse(tokens) # Should not raise error + + def test_g17_2_log_exp_order(self): + query = 'displayName co "username" and nickName co "username" and userName co "username"' + + tokens = self.lexer.tokenize(query) + self.parser.parse(tokens) # Should not raise error + + def test_g17_3_log_exp_order(self): + query = 'displayName co "username" and nickName co "username" or userName co "username"' + + tokens = self.lexer.tokenize(query) + self.parser.parse(tokens) # Should not raise error + class CommandLine(TestCase): def setUp(self):
Issue when using multiple "or" or "and" Hi, I am facing an issue, where the query having two or more "and" or more than two "or" is failing. Have a look at examples below: - 1)```"displayName co \"username\" or nickName co \"username\" or userName co \"username\""``` ```"displayName co \"username\" and nickName co \"username\" and userName co \"username\""``` the two queries fails giving , ```scim2_filter_parser.parser.SCIMParserError: Parsing error at: Token(type='OR', value='or', lineno=1, index=52)``` notice above queries are having either only "or" or "and". 2)```"displayName co \"username\" and nickName co \"username\" or userName co \"username\""``` but this query works.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_parser.py::BuggyQueries::test_g17_2_log_exp_order", "tests/test_parser.py::BuggyQueries::test_g17_1_log_exp_order" ]
[ "tests/test_parser.py::BuggyQueries::test_g17_3_log_exp_order", "tests/test_parser.py::BuggyQueries::test_no_quotes_around_comp_value", "tests/test_parser.py::RegressionTestQueries::test_command_line", "tests/test_parser.py::CommandLine::test_command_line" ]
{ "failed_lite_validators": [ "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2020-10-18T03:21:13"
mit
20c__ctl-3
diff --git a/src/ctl/plugins/pypi.py b/src/ctl/plugins/pypi.py index 5d979af..a6117af 100644 --- a/src/ctl/plugins/pypi.py +++ b/src/ctl/plugins/pypi.py @@ -32,7 +32,7 @@ class PyPIPluginConfig(release.ReleasePluginConfig): config_file = confu.schema.Str(help="path to pypi config file (e.g. ~/.pypirc)") # PyPI repository name, needs to exist in your pypi config file - repository = confu.schema.Str( + pypi_repository = confu.schema.Str( help="PyPI repository name - needs to exist " "in your pypi config file", default="pypi", ) @@ -55,16 +55,16 @@ class PyPIPlugin(release.ReleasePlugin): @property def dist_path(self): - return os.path.join(self.target.checkout_path, "dist", "*") + return os.path.join(self.repository.checkout_path, "dist", "*") def prepare(self): super(PyPIPlugin, self).prepare() self.shell = True - self.repository = self.get_config("repository") + self.pypi_repository = self.get_config("pypi_repository") self.pypirc_path = os.path.expanduser(self.config.get("config_file")) self.twine_settings = Settings( config_file=self.pypirc_path, - repository_name=self.repository, + repository_name=self.pypi_repository, sign=self.get_config("sign"), identity=self.get_config("identity"), sign_with=self.get_config("sign_with"), diff --git a/src/ctl/plugins/release.py b/src/ctl/plugins/release.py index bcfa1ce..dcae2f4 100644 --- a/src/ctl/plugins/release.py +++ b/src/ctl/plugins/release.py @@ -18,8 +18,8 @@ import ctl.plugins.git class ReleasePluginConfig(confu.schema.Schema): - target = confu.schema.Str( - help="target for release - should be a path " + repository = confu.schema.Str( + help="repository target for release - should be a path " "to a python package or the name of a " "repository type plugin", cli=False, @@ -46,16 +46,16 @@ class ReleasePlugin(command.CommandPlugin): "version", nargs=1, type=str, - help="release version - if target is managed by git, " + help="release version - if repository is managed by git, " "checkout this branch/tag", ) group.add_argument( - "target", + "repository", nargs="?", type=str, - default=plugin_config.get("target"), - help=ReleasePluginConfig().target.help, + default=plugin_config.get("repository"), + help=ReleasePluginConfig().repository.help, ) sub = parser.add_subparsers(title="Operation", dest="op") @@ -74,7 +74,7 @@ class ReleasePlugin(command.CommandPlugin): return { "group": group, - "confu_target": op_release_parser, + "confu_repository": op_release_parser, "op_release_parser": op_release_parser, "op_validate_parser": op_validate_parser, } @@ -84,48 +84,48 @@ class ReleasePlugin(command.CommandPlugin): self.prepare() self.shell = True - self.set_target(self.get_config("target")) + self.set_repository(self.get_config("repository")) self.dry_run = kwargs.get("dry") self.version = kwargs.get("version")[0] - self.orig_branch = self.target.branch + self.orig_branch = self.repository.branch if self.dry_run: self.log.info("Doing dry run...") - self.log.info("Release target: {}".format(self.target)) + self.log.info("Release repository: {}".format(self.repository)) try: - self.target.checkout(self.version) + self.repository.checkout(self.version) op = self.get_op(kwargs.get("op")) op(**kwargs) finally: - self.target.checkout(self.orig_branch) + self.repository.checkout(self.orig_branch) - def set_target(self, target): - if not target: - raise ValueError("No target specified") + def set_repository(self, repository): + if not repository: + raise ValueError("No repository specified") try: - self.target = self.other_plugin(target) - if not isinstance(self.target, ctl.plugins.repository.RepositoryPlugin): + self.repository = self.other_plugin(repository) + if not isinstance(self.repository, ctl.plugins.repository.RepositoryPlugin): raise TypeError( "The plugin with the name `{}` is not a " "repository type plugin and cannot be used " - "as a target".format(target) + "as a repository".format(repository) ) except KeyError: - self.target = os.path.abspath(target) - if not os.path.exists(self.target): + self.repository = os.path.abspath(repository) + if not os.path.exists(self.repository): raise IOError( "Target is neither a configured repository " "plugin nor a valid file path: " - "{}".format(self.target) + "{}".format(self.repository) ) - self.target = ctl.plugins.git.temporary_plugin( - self.ctl, "{}__tmp_repo".format(self.plugin_name), self.target + self.repository = ctl.plugins.git.temporary_plugin( + self.ctl, "{}__tmp_repo".format(self.plugin_name), self.repository ) - self.cwd = self.target.checkout_path + self.cwd = self.repository.checkout_path @expose("ctl.{plugin_name}.release") def release(self, **kwargs):
20c/ctl
879af37647e61767a1ede59ffd353e4cfd27cd6f
diff --git a/tests/test_plugin_pypi.py b/tests/test_plugin_pypi.py index 20315ad..19813e2 100644 --- a/tests/test_plugin_pypi.py +++ b/tests/test_plugin_pypi.py @@ -53,35 +53,35 @@ def test_init(): -def test_set_target_git_path(tmpdir, ctlr): +def test_set_repository_git_path(tmpdir, ctlr): """ - Test setting build target: existing git repo via filepath + Test setting build repository: existing git repo via filepath """ plugin, git_plugin = instantiate(tmpdir, ctlr) - plugin.set_target(git_plugin.checkout_path) + plugin.set_repository(git_plugin.checkout_path) assert plugin.dist_path == os.path.join(git_plugin.checkout_path, "dist", "*") -def test_set_target_git_plugin(tmpdir, ctlr): +def test_set_repository_git_plugin(tmpdir, ctlr): """ - Test setting build target: existing git plugin + Test setting build repository: existing git plugin """ plugin, git_plugin = instantiate(tmpdir, ctlr) - plugin.set_target(git_plugin.plugin_name) + plugin.set_repository(git_plugin.plugin_name) assert plugin.dist_path == os.path.join(git_plugin.checkout_path, "dist", "*") -def test_set_target_error(tmpdir, ctlr): +def test_set_repository_error(tmpdir, ctlr): """ - Test setting invalid build target + Test setting invalid build repository """ plugin, git_plugin = instantiate(tmpdir, ctlr) @@ -89,17 +89,17 @@ def test_set_target_error(tmpdir, ctlr): # non existing path / plugin name with pytest.raises(IOError): - plugin.set_target("invalid target") + plugin.set_repository("invalid repository") # invalid plugin type with pytest.raises(TypeError): - plugin.set_target("test_pypi") + plugin.set_repository("test_pypi") - # no target + # no repository with pytest.raises(ValueError): - plugin.set_target(None) + plugin.set_repository(None) def test_build_dist(tmpdir, ctlr): @@ -110,7 +110,7 @@ def test_build_dist(tmpdir, ctlr): plugin, git_plugin = instantiate(tmpdir, ctlr) plugin.prepare() - plugin.set_target(git_plugin.plugin_name) + plugin.set_repository(git_plugin.plugin_name) plugin._build_dist() assert os.path.exists(os.path.join(git_plugin.checkout_path, @@ -126,7 +126,7 @@ def test_validate_dist(tmpdir, ctlr): plugin, git_plugin = instantiate(tmpdir, ctlr) plugin.prepare() - plugin.set_target(git_plugin.plugin_name) + plugin.set_repository(git_plugin.plugin_name) plugin._build_dist() plugin._validate_dist()
PyPI plugin: `target` config attribute should be `repository` This is so it's in line with the version plugin, which currently uses `repository` to specify the target repository The pypi plugin currently uses `repository` to specify which PyPI repository to use, this should change to `pypi_repository` as well. Should do this before tagging 1.0.0 since it's a config schema change
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_plugin_pypi.py::test_set_repository_git_path[standard]", "tests/test_plugin_pypi.py::test_set_repository_error[standard]", "tests/test_plugin_pypi.py::test_set_repository_git_plugin[standard]" ]
[ "tests/test_plugin_pypi.py::test_init" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-10-08T09:23:56"
apache-2.0
20c__ctl-7
diff --git a/Ctl/Pipfile b/Ctl/Pipfile index 0c7a304..1bd6308 100644 --- a/Ctl/Pipfile +++ b/Ctl/Pipfile @@ -14,7 +14,7 @@ tmpl = "==0.3.0" [packages] munge = "<1,>=0.4" -cfu = ">=1.2.0,<2" +cfu = ">=1.3.0,<2" grainy = ">=1.4.0,<2" git-url-parse = ">=1.1.0,<2" pluginmgr = ">=0.6" diff --git a/Ctl/requirements.txt b/Ctl/requirements.txt index b3582c5..0037aaa 100644 --- a/Ctl/requirements.txt +++ b/Ctl/requirements.txt @@ -1,5 +1,5 @@ munge >=0.4, <1 -cfu >= 1.2.0, < 2 +cfu >= 1.3.0, < 2 grainy >= 1.4.0, <2 git-url-parse >= 1.1.0, <2 pluginmgr >= 0.6 diff --git a/src/ctl/__init__.py b/src/ctl/__init__.py index eb4a635..b9616df 100644 --- a/src/ctl/__init__.py +++ b/src/ctl/__init__.py @@ -4,6 +4,7 @@ import os from pkg_resources import get_distribution import confu.config +import confu.exceptions import grainy.core import copy import logging @@ -279,11 +280,14 @@ class Ctl(object): # def set_config_dir(self): def __init__(self, ctx=None, config_dir=None, full_init=True): - self.init_context(ctx=ctx, config_dir=config_dir) + self.init_context(ctx=ctx, config_dir=config_dir) self.init_logging() - self.init_permissions() + if self.config.errors: + return self.log_config_issues() + + self.init_permissions() self.expose_plugin_vars() if full_init: @@ -330,8 +334,10 @@ class Ctl(object): Apply python logging config and create `log` and `usage_log` properties """ + # allow setting up python logging from ctl config set_pylogger_config(self.ctx.config.get_nested("ctl", "log")) + # instantiate logger self.log = Log("ctl") self.usage_log = Log("usage") diff --git a/src/ctl/util/versioning.py b/src/ctl/util/versioning.py index 22bdb09..23e1390 100644 --- a/src/ctl/util/versioning.py +++ b/src/ctl/util/versioning.py @@ -1,5 +1,4 @@ def version_tuple(version): - print("VERSION", version) """ Returns a tuple from version string """ return tuple(version.split(".")) @@ -9,27 +8,35 @@ def version_string(version): return ".".join(["{}".format(v) for v in version]) -def validate_semantic(version): +def validate_semantic(version, pad=0): if not isinstance(version, (list, tuple)): version = version_tuple(version) - try: - major, minor, patch, dev = version - except ValueError: - major, minor, patch = version + parts = len(version) + + if parts < 1: + raise ValueError("Semantic version needs to contain at least a major version") + if parts > 4: + raise ValueError("Semantic version can not contain more than 4 parts") + + if parts < pad: + version = tuple(list(version) + [0 for i in range(0, pad - parts)]) return tuple([int(n) for n in version]) def bump_semantic(version, segment): - version = list(validate_semantic(version)) if segment == "major": + version = list(validate_semantic(version)) return (version[0] + 1, 0, 0) elif segment == "minor": + version = list(validate_semantic(version, pad=2)) return (version[0], version[1] + 1, 0) elif segment == "patch": + version = list(validate_semantic(version, pad=3)) return (version[0], version[1], version[2] + 1) elif segment == "dev": + version = list(validate_semantic(version, pad=4)) try: return (version[0], version[1], version[2], version[3] + 1) except IndexError:
20c/ctl
be7f350f8f2d92918922d82fce0266fcd72decd2
diff --git a/tests/test_plugin_version.py b/tests/test_plugin_version.py index 6745c78..4b9617a 100644 --- a/tests/test_plugin_version.py +++ b/tests/test_plugin_version.py @@ -138,6 +138,30 @@ def test_bump(tmpdir, ctlr): plugin.bump(version="invalid", repo="dummy_repo") +def test_bump_truncated(tmpdir, ctlr): + plugin, dummy_repo = instantiate(tmpdir, ctlr) + plugin.tag(version="1.0", repo="dummy_repo") + + plugin.bump(version="minor", repo="dummy_repo") + assert dummy_repo.version == ("1", "1", "0") + assert dummy_repo._tag == "1.1.0" + + plugin.tag(version="1.0", repo="dummy_repo") + plugin.bump(version="patch", repo="dummy_repo") + assert dummy_repo.version == ("1", "0", "1") + assert dummy_repo._tag == "1.0.1" + + plugin.tag(version="2", repo="dummy_repo") + plugin.bump(version="patch", repo="dummy_repo") + assert dummy_repo.version == ("2", "0", "1") + assert dummy_repo._tag == "2.0.1" + + plugin.tag(version="3", repo="dummy_repo") + plugin.bump(version="major", repo="dummy_repo") + assert dummy_repo.version == ("4", "0", "0") + assert dummy_repo._tag == "4.0.0" + + def test_execute(tmpdir, ctlr): plugin, dummy_repo = instantiate(tmpdir, ctlr) plugin.execute(op="tag", version="1.0.0", repository="dummy_repo", init=True) diff --git a/tests/test_util_versioning.py b/tests/test_util_versioning.py index b89df79..6624816 100644 --- a/tests/test_util_versioning.py +++ b/tests/test_util_versioning.py @@ -19,7 +19,7 @@ def test_version_tuple(version, string): ((1, 0, 0), (1, 0, 0), None), ("1.0.0.0", (1, 0, 0, 0), None), ((1, 0, 0, 0), (1, 0, 0, 0), None), - ("1.0", None, ValueError), + ("1.0", (1, 0), None), ("a.b.c", None, ValueError), ], )
Better error handling for config errors outside of `plugins` Example: having a schema error in `permissions` exits ctl with traceback that's not very telling as to what is failing reproduce: ``` permissions: namespace: ctl permission: crud ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_util_versioning.py::test_validate_semantic[1.0-expected4-None]", "tests/test_plugin_version.py::test_bump_truncated[standard]" ]
[ "tests/test_util_versioning.py::test_bump_semantic[1.2.3-minor-expected1]", "tests/test_util_versioning.py::test_validate_semantic[1.0.0-expected0-None]", "tests/test_util_versioning.py::test_validate_semantic[version3-expected3-None]", "tests/test_util_versioning.py::test_validate_semantic[version1-expected1-None]", "tests/test_util_versioning.py::test_bump_semantic[1.2.3.4-dev-expected3]", "tests/test_util_versioning.py::test_version_tuple[version0-1.0.0]", "tests/test_util_versioning.py::test_validate_semantic[1.0.0.0-expected2-None]", "tests/test_util_versioning.py::test_bump_semantic[1.2.3.4-patch-expected2]", "tests/test_util_versioning.py::test_bump_semantic[1.2.3.4-major-expected0]", "tests/test_util_versioning.py::test_validate_semantic[a.b.c-None-ValueError]", "tests/test_plugin_version.py::test_execute_permissions[permission_denied]", "tests/test_plugin_version.py::test_tag[standard]", "tests/test_plugin_version.py::test_repository[standard]", "tests/test_plugin_version.py::test_bump[standard]", "tests/test_plugin_version.py::test_execute[standard]", "tests/test_plugin_version.py::test_init" ]
{ "failed_lite_validators": [ "has_short_problem_statement", "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2019-10-21T11:05:40"
apache-2.0
3YOURMIND__django-migration-linter-186
diff --git a/.github/workflows/ci-build.yml b/.github/workflows/ci-build.yml index 085b32c..fef9e0e 100644 --- a/.github/workflows/ci-build.yml +++ b/.github/workflows/ci-build.yml @@ -28,7 +28,7 @@ jobs: strategy: matrix: - python-version: ['2.7', '3.5', '3.6', '3.7', '3.8', '3.9'] + python-version: ['3.6', '3.7', '3.8', '3.9'] name: Build with Python ${{ matrix.python-version }} steps: diff --git a/CHANGELOG.md b/CHANGELOG.md index d1ec8e5..15fefc0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,7 +1,8 @@ -## 4.0.0 +## 4.0.0 (unreleased) - Drop support for Python 2.7 and 3.5 - Drop support for Django 1.11, 2.0, 2.1, 3.0 +- Fix index creation detection when table is being created in the transaction (issue #178) ## 3.0.1 diff --git a/django_migration_linter/sql_analyser/postgresql.py b/django_migration_linter/sql_analyser/postgresql.py index 140aba3..3eb18a5 100644 --- a/django_migration_linter/sql_analyser/postgresql.py +++ b/django_migration_linter/sql_analyser/postgresql.py @@ -3,14 +3,32 @@ import re from .base import BaseAnalyser +def has_create_index(sql_statements, **kwargs): + regex_result = None + for sql in sql_statements: + regex_result = re.search(r"CREATE (UNIQUE )?INDEX.*ON (.*) \(", sql) + if re.search("INDEX CONCURRENTLY", sql): + regex_result = None + elif regex_result: + break + if not regex_result: + return False + + concerned_table = regex_result.group(2) + table_is_added_in_transaction = any( + sql.startswith("CREATE TABLE {}".format(concerned_table)) + for sql in sql_statements + ) + return not table_is_added_in_transaction + + class PostgresqlAnalyser(BaseAnalyser): migration_tests = [ { "code": "CREATE_INDEX", - "fn": lambda sql, **kw: re.search("CREATE (UNIQUE )?INDEX", sql) - and not re.search("INDEX CONCURRENTLY", sql), + "fn": has_create_index, "msg": "CREATE INDEX locks table", - "mode": "one_liner", + "mode": "transaction", "type": "warning", }, {
3YOURMIND/django-migration-linter
aef3db3e4198d06c38bc4b0874e72ed657891eea
diff --git a/tests/unit/test_sql_analyser.py b/tests/unit/test_sql_analyser.py index 00dd50e..65ab7f0 100644 --- a/tests/unit/test_sql_analyser.py +++ b/tests/unit/test_sql_analyser.py @@ -233,6 +233,23 @@ class PostgresqlAnalyserTestCase(SqlAnalyserTestCase): sql = "CREATE UNIQUE INDEX title_idx ON films (title);" self.assertWarningSql(sql) + def test_create_index_non_concurrently_with_table_creation(self): + sql = [ + 'CREATE TABLE "films" ("title" text);', + 'CREATE INDEX ON "films" ((lower("title")));', + ] + self.assertValidSql(sql) + sql = [ + 'CREATE TABLE "some_table" ("title" text);', + 'CREATE INDEX ON "films" ((lower("title")));', + ] + self.assertWarningSql(sql) + sql = [ + 'CREATE TABLE "films" ("title" text);', + 'CREATE INDEX ON "some_table" ((lower("title")));', + ] + self.assertWarningSql(sql) + def test_create_index_concurrently(self): sql = "CREATE INDEX CONCURRENTLY ON films (lower(title));" self.assertValidSql(sql)
Linter fails on CREATE INDEX when creating a new table Here is an example `CreateModel` from Django: ```python migrations.CreateModel( name='ShipmentMetadataAlert', fields=[ ('deleted_at', models.DateTimeField(blank=True, db_index=True, null=True)), ('created_at', common.fields.CreatedField(default=django.utils.timezone.now, editable=False)), ('updated_at', common.fields.LastModifiedField(default=django.utils.timezone.now, editable=False)), ('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, verbose_name='ID')), ('message', models.TextField(blank=True, null=True)), ('level', models.CharField(blank=True, choices=[('HIGH', 'high'), ('MEDIUM', 'medium'), ('LOW', 'low')], max_length=16, null=True)), ('type', models.CharField(blank=True, choices=[('MOBILE_DEVICE_ALERT', 'MOBILE_DEVICE_ALERT'), ('NON_ACTIVE_CARRIER', 'NON_ACTIVE_CARRIER'), ('OTHER', 'OTHER')], max_length=32, null=True)), ('subtype', models.CharField(blank=True, choices=[('DRIVER_PERMISSIONS', 'DRIVER_PERMISSIONS'), ('DRIVER_LOCATION', 'DRIVER_LOCATION'), ('OTHER', 'OTHER')], max_length=32, null=True)), ('occurred_at', models.DateTimeField(null=True)), ('clear_alert_job_id', models.UUIDField(default=None, null=True)), ('metadata', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='alerts', to='shipments.ShipmentMetadata')), ], options={ 'abstract': False, } ) ``` Here are the SQL statements that this spits out in `sqlmigrate`: ```sql BEGIN; -- -- Create model ShipmentMetadataAlert -- CREATE TABLE "shipments_shipmentmetadataalert" ("deleted_at" timestamp with time zone NULL, "created_at" timestamp with time zone NOT NULL, "updated_at" timestamp with time zone NOT NULL, "id" uuid NOT NULL PRIMARY KEY, "message" text NULL, "level" varchar(16) NULL, "type" varchar(32) NULL, "subtype" varchar(32) NULL, "occurred_at" timestamp with time zone NULL, "clear_alert_job_id" uuid NULL, "metadata_id" uuid NOT NULL); ALTER TABLE "shipments_shipmentmetadataalert" ADD CONSTRAINT "shipments_shipmentme_metadata_id_f20850e8_fk_shipments" FOREIGN KEY ("metadata_id") REFERENCES "shipments_shipmentmetadata" ("id") DEFERRABLE INITIALLY DEFERRED; CREATE INDEX "shipments_shipmentmetadataalert_deleted_at_c9a93342" ON "shipments_shipmentmetadataalert" ("deleted_at"); CREATE INDEX "shipments_shipmentmetadataalert_metadata_id_f20850e8" ON "shipments_shipmentmetadataalert" ("metadata_id"); COMMIT; ``` This is an error from the linter as it outputs the error `CREATE INDEX locks table`. But the table is being created within the migration, it just needs to recognize that. It seems like the `CREATE INDEX` detection should work the same way that the `ADD_UNIQUE` detection works where it detects that the create table is happening in the same migration: https://github.com/3YOURMIND/django-migration-linter/blob/db71a9db23746f64d41d681f3fecb9b066c87338/django_migration_linter/sql_analyser/base.py#L26-L40
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently_with_table_creation" ]
[ "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column_after_django22", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_create_table_with_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_rename_table", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_field_to_not_null_with_dropped_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_onetoonefield_to_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_reindex", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_unique_together" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-12-20T21:27:38"
apache-2.0
3YOURMIND__django-migration-linter-258
diff --git a/CHANGELOG.md b/CHANGELOG.md index 3069d91..beafd65 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,10 +4,21 @@ Instead, the linter crashes and lets the `sqlmigrate` error raise, in order to avoid letting a problematic migration pass. One common reason for such an error is the SQL generation which requires the database to be actually migrated in order to fetch actual constraint names from it. The crash is a sign to double-check the migration. But if you are certain the migration is safe, you can ignore it (issue #209) + +Features: + - Fixed `RunPython` model import check when using a `through` object like `MyModel.many_to_many.through.objects.filter(...)` (issue #218) - Mark the `IgnoreMigration` operation as `elidable=True` + +Bug: + +- Don't detect not nullable field on partial index creation (issue #250) + +Miscellaneous: + - Add support for Python 3.11 - Add support for Django 4.1 +- Add support for Django 4.2 - Drop support for Django 2.2 - Internally rename "migration tests" to "migration checks" - Add dataclasses internally instead of custom dicts diff --git a/django_migration_linter/sql_analyser/base.py b/django_migration_linter/sql_analyser/base.py index 2fa0646..131652e 100644 --- a/django_migration_linter/sql_analyser/base.py +++ b/django_migration_linter/sql_analyser/base.py @@ -40,7 +40,8 @@ def has_not_null_column(sql_statements: list[str], **kwargs) -> bool: ends_with_default = False return ( any( - re.search("(?<!DROP )NOT NULL", sql) and not sql.startswith("CREATE TABLE") + re.search("(?<!DROP )NOT NULL", sql) + and not (sql.startswith("CREATE TABLE") or sql.startswith("CREATE INDEX")) for sql in sql_statements ) and ends_with_default is False
3YOURMIND/django-migration-linter
366d16b01a72d0baa54fef55761d846b0f05b8dd
diff --git a/tests/unit/test_sql_analyser.py b/tests/unit/test_sql_analyser.py index d7349fc..012d53c 100644 --- a/tests/unit/test_sql_analyser.py +++ b/tests/unit/test_sql_analyser.py @@ -297,6 +297,10 @@ class PostgresqlAnalyserTestCase(SqlAnalyserTestCase): sql = "CREATE UNIQUE INDEX CONCURRENTLY title_idx ON films (title);" self.assertValidSql(sql) + def test_create_index_concurrently_where(self): + sql = 'CREATE INDEX CONCURRENTLY "index_name" ON "table_name" ("a_column") WHERE ("some_column" IS NOT NULL);' + self.assertValidSql(sql) + def test_drop_index_non_concurrently(self): sql = "DROP INDEX ON films" self.assertWarningSql(sql)
Adding an index with a NOT NULL condition incorrectly triggers NOT_NULL rule Adding an index with a `WHERE` clause including `NOT NULL` gets flagged as a `NOT NULL constraint on columns` error. ## Steps to reproduce The follow migration operation: ```python AddIndexConcurrently( model_name="prediction", index=models.Index( condition=models.Q( ("data_deleted_at__isnull", True), ("delete_data_after__isnull", False), ), fields=["delete_data_after"], name="delete_data_after_idx", ), ), ``` Generates the following SQL: ```sql CREATE INDEX CONCURRENTLY "delete_data_after_idx" ON "models_prediction" ("delete_data_after") WHERE ("data_deleted_at" IS NULL AND "delete_data_after" IS NOT NULL); ``` When linted this is flagged as an error because of the `NOT NULL`, when it ought to be a safe operation. ## Investigation Looking at the condition used for this rule, I think it might just need to permit `CREATE INDEX` requests: ```python re.search("(?<!DROP )NOT NULL", sql) and not sql.startswith("CREATE TABLE") and not sql.startswith("CREATE INDEX") ``` https://github.com/3YOURMIND/django-migration-linter/blob/202a6d9d5dea83528cb52fd7481a5a0565cc6f83/django_migration_linter/sql_analyser/base.py#L43
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_concurrently_where" ]
[ "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_add_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_unique_index", "tests/unit/test_sql_analyser.py::MySqlAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_add_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_alter_column_after_django22", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_create_table_with_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_rename_table", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_unique_index", "tests/unit/test_sql_analyser.py::SqliteAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_add_many_to_many_field", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_alter_column", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_create_index_non_concurrently_with_table_creation", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_index_non_concurrently", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_drop_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_field_to_not_null_with_dropped_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_django_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_make_column_not_null_with_lib_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_not_null_followed_by_default", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_onetoonefield_to_not_null", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_reindex", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_unique_index", "tests/unit/test_sql_analyser.py::PostgresqlAnalyserTestCase::test_unique_together", "tests/unit/test_sql_analyser.py::SqlUtilsTestCase::test_unknown_analyser_string", "tests/unit/test_sql_analyser.py::SqlUtilsTestCase::test_unsupported_db_vendor" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2023-07-03T18:35:18"
apache-2.0
3YOURMIND__django-migration-linter-47
diff --git a/django_migration_linter/migration_linter.py b/django_migration_linter/migration_linter.py index f9c0ab1..03c2054 100644 --- a/django_migration_linter/migration_linter.py +++ b/django_migration_linter/migration_linter.py @@ -20,7 +20,7 @@ from subprocess import Popen, PIPE import sys from .cache import Cache -from .constants import DEFAULT_CACHE_PATH, MIGRATION_FOLDER_NAME +from .constants import DEFAULT_CACHE_PATH, MIGRATION_FOLDER_NAME, __version__ from .migration import Migration from .utils import is_directory, is_django_project, clean_bytes_to_str from .sql_analyser import analyse_sql_statements @@ -287,6 +287,9 @@ def _main(): action="store_true", help="print more information during execution", ) + parser.add_argument( + "--version", "-V", action="version", version="%(prog)s {}".format(__version__) + ) parser.add_argument( "--database", type=str,
3YOURMIND/django-migration-linter
fbf0f4419336fcb1235fa57f5575ad2593354e44
diff --git a/tests/functional/test_cmd_line_call.py b/tests/functional/test_cmd_line_call.py index a2861fa..47d7944 100644 --- a/tests/functional/test_cmd_line_call.py +++ b/tests/functional/test_cmd_line_call.py @@ -16,7 +16,7 @@ import os import shutil import unittest from subprocess import Popen, PIPE -from django_migration_linter import utils, DEFAULT_CACHE_PATH +from django_migration_linter import utils, DEFAULT_CACHE_PATH, constants from tests import fixtures import sys @@ -274,3 +274,25 @@ class CallLinterFromCommandLineTest(unittest.TestCase): self.assertTrue(lines[0].endswith('ERR')) self.assertTrue(lines[2].endswith('OK')) self.assertTrue(lines[3].startswith('*** Summary')) + + +class VersionOptionLinterFromCommandLineTest(CallLinterFromCommandLineTest): + def test_call_with_version_option(self): + cmd = "{} --version".format(self.linter_exec) + process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) + process.wait() + self.assertEqual(process.returncode, 0) + process_read_stream = process.stderr if sys.version_info.major == 2 else process.stdout + lines = list(map(utils.clean_bytes_to_str, process_read_stream.readlines())) + self.assertEqual(len(lines), 1) + self.assertEqual(lines[0], "django-migration-linter {}".format(constants.__version__)) + + def test_call_with_short_version_option(self): + cmd = "{} -V".format(self.linter_exec) + process = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) + process.wait() + self.assertEqual(process.returncode, 0) + process_read_stream = process.stderr if sys.version_info.major == 2 else process.stdout + lines = list(map(utils.clean_bytes_to_str, process_read_stream.readlines())) + self.assertEqual(len(lines), 1) + self.assertEqual(lines[0], "django-migration-linter {}".format(constants.__version__))
Add --version option Pretty straightforward. Have a `--version` that prints the current version of the linter.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/functional/test_cmd_line_call.py::VersionOptionLinterFromCommandLineTest::test_call_with_version_option", "tests/functional/test_cmd_line_call.py::VersionOptionLinterFromCommandLineTest::test_call_with_short_version_option" ]
[ "tests/functional/test_cmd_line_call.py::VersionOptionLinterFromCommandLineTest::test_call_linter_cmd_line_cache", "tests/functional/test_cmd_line_call.py::CallLinterFromCommandLineTest::test_call_linter_cmd_line_cache" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2019-01-21T21:29:34"
apache-2.0
4degrees__clique-26
diff --git a/source/clique/collection.py b/source/clique/collection.py index 0c3b296..db9276c 100644 --- a/source/clique/collection.py +++ b/source/clique/collection.py @@ -251,15 +251,25 @@ class Collection(object): else: data['padding'] = '%d' - if self.indexes: + if '{holes}' in pattern: data['holes'] = self.holes().format('{ranges}') + if '{range}' in pattern or '{ranges}' in pattern: indexes = list(self.indexes) - if len(indexes) == 1: + indexes_count = len(indexes) + + if indexes_count == 0: + data['range'] = '' + + elif indexes_count == 1: data['range'] = '{0}'.format(indexes[0]) + else: - data['range'] = '{0}-{1}'.format(indexes[0], indexes[-1]) + data['range'] = '{0}-{1}'.format( + indexes[0], indexes[-1] + ) + if '{ranges}' in pattern: separated = self.separate() if len(separated) > 1: ranges = [collection.format('{range}') @@ -270,11 +280,6 @@ class Collection(object): data['ranges'] = ', '.join(ranges) - else: - data['holes'] = '' - data['range'] = '' - data['ranges'] = '' - return pattern.format(**data) def is_contiguous(self):
4degrees/clique
a89507304acce5931f940c34025a6547fa8227b5
diff --git a/test/unit/test_collection.py b/test/unit/test_collection.py index ce4daa7..11cb01e 100644 --- a/test/unit/test_collection.py +++ b/test/unit/test_collection.py @@ -2,6 +2,7 @@ # :copyright: Copyright (c) 2013 Martin Pengelly-Phillips # :license: See LICENSE.txt. +import sys import inspect import pytest @@ -242,7 +243,6 @@ def test_remove_non_member(): (PaddedCollection, '{range}', '1-12'), (PaddedCollection, '{ranges}', '1-3, 7, 9-12'), (PaddedCollection, '{holes}', '4-6, 8'), - ]) def test_format(CollectionCls, pattern, expected): '''Format collection according to pattern.''' @@ -250,6 +250,25 @@ def test_format(CollectionCls, pattern, expected): assert collection.format(pattern) == expected +def test_format_sparse_collection(): + '''Format sparse collection without recursion error.''' + recursion_limit = sys.getrecursionlimit() + recursion_error_occurred = False + + try: + collection = PaddedCollection( + indexes=set(range(0, recursion_limit * 2, 2)) + ) + collection.format() + except RuntimeError as error: + if 'maximum recursion depth exceeded' in str(error): + recursion_error_occurred = True + else: + raise + + assert not recursion_error_occurred + + @pytest.mark.parametrize(('collection', 'expected'), [ (PaddedCollection(indexes=set([])), True), (PaddedCollection(indexes=set([1])), True),
collection.format hits maximum recursion depth for collections with lots of holes. The following code gives an example. ```python paths = ["name.{0:04d}.jpg".format(x) for x in range(2000)[::2]] collection = clique.assemble(paths)[0][0] collection.format("{head}####{tail}") ```
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/unit/test_collection.py::test_format_sparse_collection" ]
[ "test/unit/test_collection.py::test_remove_non_member", "test/unit/test_collection.py::test_separate[non-contiguous", "test/unit/test_collection.py::test_is_compatible[incompatible", "test/unit/test_collection.py::test_compatible_merge[complimentary]", "test/unit/test_collection.py::test_holes[range", "test/unit/test_collection.py::test_comparisons[different", "test/unit/test_collection.py::test_format[PaddedCollection-{ranges}-1-3,", "test/unit/test_collection.py::test_is_compatible[compatible]", "test/unit/test_collection.py::test_holes[empty]", "test/unit/test_collection.py::test_contains[non-member", "test/unit/test_collection.py::test_compatible_merge[duplicates]", "test/unit/test_collection.py::test_is_contiguous[empty]", "test/unit/test_collection.py::test_match[padded-collection:unpadded", "test/unit/test_collection.py::test_add[unpadded-collection:padded", "test/unit/test_collection.py::test_add[padded-collection:unpadded", "test/unit/test_collection.py::test_format[PaddedCollection-{head}-/head.]", "test/unit/test_collection.py::test_holes[single", "test/unit/test_collection.py::test_add[padded-collection:padded", "test/unit/test_collection.py::test_is_contiguous[contiguous", "test/unit/test_collection.py::test_not_implemented_comparison", "test/unit/test_collection.py::test_format[PaddedCollection-{range}-1-12]", "test/unit/test_collection.py::test_format[PaddedCollection-{padding}-%04d]", "test/unit/test_collection.py::test_format[PaddedCollection-{holes}-4-6,", "test/unit/test_collection.py::test_is_contiguous[single]", "test/unit/test_collection.py::test_compatible_merge[both", "test/unit/test_collection.py::test_match[different", "test/unit/test_collection.py::test_str", "test/unit/test_collection.py::test_unsettable_indexes", "test/unit/test_collection.py::test_format[UnpaddedCollection-{padding}-%d]", "test/unit/test_collection.py::test_contains[different", "test/unit/test_collection.py::test_incompatible_merge[incompatible", "test/unit/test_collection.py::test_format[PaddedCollection-{tail}-.ext]", "test/unit/test_collection.py::test_separate[empty]", "test/unit/test_collection.py::test_add[unpadded-collection:unpadded", "test/unit/test_collection.py::test_repr", "test/unit/test_collection.py::test_match[padded-collection:padded", "test/unit/test_collection.py::test_change_property[padding-4-^head\\\\.(?P<index>(?P<padding>0*)\\\\d+?)\\\\.tail$-head.0001.tail]", "test/unit/test_collection.py::test_iterator[padded-collection]", "test/unit/test_collection.py::test_comparisons[equal]", "test/unit/test_collection.py::test_iterator[unpadded-collection]", "test/unit/test_collection.py::test_escaping_expression", "test/unit/test_collection.py::test_match[unpadded-collection:unpadded", "test/unit/test_collection.py::test_separate[single", "test/unit/test_collection.py::test_holes[multiple", "test/unit/test_collection.py::test_remove", "test/unit/test_collection.py::test_holes[contiguous", "test/unit/test_collection.py::test_holes[missing", "test/unit/test_collection.py::test_match[unpadded-collection:padded", "test/unit/test_collection.py::test_add_duplicate", "test/unit/test_collection.py::test_is_contiguous[non-contiguous]", "test/unit/test_collection.py::test_contains[valid", "test/unit/test_collection.py::test_separate[contiguous" ]
{ "failed_lite_validators": [ "has_short_problem_statement" ], "has_test_patch": true, "is_lite": false }
"2016-04-30T17:21:04"
apache-2.0
6si__shipwright-79
diff --git a/CHANGES.rst b/CHANGES.rst index f034d37..89cf5f1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,8 @@ 0.5.1 (unreleased) ------------------ -- Nothing changed yet. +- Add --pull-cache to pull images from repository before building. + (`Issue #49 <https://github.com/6si/shipwright/issues/49>`_). 0.5.0 (2016-08-19) diff --git a/shipwright/base.py b/shipwright/base.py index 213d597..421f1af 100644 --- a/shipwright/base.py +++ b/shipwright/base.py @@ -4,10 +4,11 @@ from . import build, dependencies, docker, push class Shipwright(object): - def __init__(self, source_control, docker_client, tags): + def __init__(self, source_control, docker_client, tags, pull_cache=False): self.source_control = source_control self.docker_client = docker_client self.tags = tags + self._pull_cache = pull_cache def targets(self): return self.source_control.targets() @@ -18,7 +19,10 @@ class Shipwright(object): return self._build(this_ref_str, targets) def _build(self, this_ref_str, targets): - for evt in build.do_build(self.docker_client, this_ref_str, targets): + client = self.docker_client + pull_cache = self._pull_cache + ref = this_ref_str + for evt in build.do_build(client, ref, targets, pull_cache): yield evt # now that we're built and tagged all the images. diff --git a/shipwright/build.py b/shipwright/build.py index 707d4f9..4ee1558 100644 --- a/shipwright/build.py +++ b/shipwright/build.py @@ -13,7 +13,7 @@ def _merge(d1, d2): return d -def do_build(client, build_ref, targets): +def do_build(client, build_ref, targets, pull_cache): """ Generic function for building multiple images while notifying a callback function with output produced. @@ -39,11 +39,11 @@ def do_build(client, build_ref, targets): parent_ref = None if target.parent: parent_ref = build_index.get(target.parent) - for evt in build(client, parent_ref, target): + for evt in build(client, parent_ref, target, pull_cache): yield evt -def build(client, parent_ref, image): +def build(client, parent_ref, image, pull_cache): """ builds the given image tagged with <build_ref> and ensures that it depends on it's parent if it's part of this build group (shares @@ -62,7 +62,25 @@ def build(client, parent_ref, image): built_tags = docker.last_built_from_docker(client, image.name) if image.ref in built_tags: - return [] + return + + if pull_cache: + pull_evts = client.pull( + repository=image.name, + tag=image.ref, + stream=True, + ) + + failed = False + for evt in pull_evts: + event = process_event_(evt) + if 'error' in event: + failed = True + else: + yield event + + if not failed: + return build_evts = client.build( fileobj=mkcontext(parent_ref, image.path), @@ -73,4 +91,5 @@ def build(client, parent_ref, image): dockerfile=os.path.basename(image.path), ) - return (process_event_(evt) for evt in build_evts) + for evt in build_evts: + yield process_event_(evt) diff --git a/shipwright/cli.py b/shipwright/cli.py index 24f6f78..82eaf50 100644 --- a/shipwright/cli.py +++ b/shipwright/cli.py @@ -109,6 +109,11 @@ def argparser(): help='Build working tree, including uncommited and untracked changes', action='store_true', ) + common.add_argument( + '--pull-cache', + help='When building try to pull previously built images', + action='store_true', + ) a_arg( common, '-d', '--dependants', help='Build DEPENDANTS and all its dependants', @@ -157,7 +162,6 @@ def old_style_arg_dict(namespace): '--exclude': _flatten(ns.exclude), '--help': False, '--no-build': getattr(ns, 'no_build', False), - '--dirty': getattr(ns, 'dirty', False), '--upto': _flatten(ns.upto), '--x-assert-hostname': ns.x_assert_hostname, '-H': ns.docker_host, @@ -237,8 +241,10 @@ def run(path, arguments, client_cfg, environ, new_style_args=None): if new_style_args is None: dirty = False + pull_cache = False else: dirty = new_style_args.dirty + pull_cache = new_style_args.pull_cache namespace = config['namespace'] name_map = config.get('names', {}) @@ -249,7 +255,7 @@ def run(path, arguments, client_cfg, environ, new_style_args=None): 'to commit these changes, re-run with the --dirty flag.' ) - sw = Shipwright(scm, client, arguments['tags']) + sw = Shipwright(scm, client, arguments['tags'], pull_cache) command = getattr(sw, command_name) show_progress = sys.stdout.isatty()
6si/shipwright
7d3ccf39acc79bb6d33a787e773227358764dd2c
diff --git a/tests/integration/test_docker_builds.py b/tests/integration/test_docker_builds.py index 00aa6be..3a22616 100644 --- a/tests/integration/test_docker_builds.py +++ b/tests/integration/test_docker_builds.py @@ -12,7 +12,7 @@ from .utils import commit_untracked, create_repo, get_defaults def default_args(): - return argparse.Namespace(dirty=False) + return argparse.Namespace(dirty=False, pull_cache=False) def test_sample(tmpdir, docker_client): @@ -734,3 +734,85 @@ def test_build_with_repo_digest(tmpdir, docker_client, registry): ) for image in old_images: cli.remove_image(image, force=True) + + +def test_docker_buld_pull_cache(tmpdir, docker_client, registry): + path = str(tmpdir.join('shipwright-localhost-sample')) + source = pkg_resources.resource_filename( + __name__, + 'examples/shipwright-localhost-sample', + ) + repo = create_repo(path, source) + tag = repo.head.ref.commit.hexsha[:12] + + client_cfg = docker_utils.kwargs_from_env() + cli = docker_client + + defaults = get_defaults() + defaults['push'] = True + try: + shipw_cli.run( + path=path, + client_cfg=client_cfg, + arguments=defaults, + environ={}, + ) + + # Remove the build images: + old_images = ( + cli.images(name='localhost:5000/service1', quiet=True) + + cli.images(name='localhost:5000/shared', quiet=True) + + cli.images(name='localhost:5000/base', quiet=True) + ) + for image in old_images: + cli.remove_image(image, force=True) + + images_after_delete = ( + cli.images(name='localhost:5000/service1') + + cli.images(name='localhost:5000/shared') + + cli.images(name='localhost:5000/base') + ) + assert images_after_delete == [] + + args = default_args() + args.pull_cache = True + + shipw_cli.run( + path=path, + client_cfg=client_cfg, + arguments=defaults, + environ={}, + new_style_args=args, + ) + + service1, shared, base = ( + cli.images(name='localhost:5000/service1') + + cli.images(name='localhost:5000/shared') + + cli.images(name='localhost:5000/base') + ) + + assert set(service1['RepoTags']) == { + 'localhost:5000/service1:master', + 'localhost:5000/service1:latest', + 'localhost:5000/service1:' + tag, + } + + assert set(shared['RepoTags']) == { + 'localhost:5000/shared:master', + 'localhost:5000/shared:latest', + 'localhost:5000/shared:' + tag, + } + + assert set(base['RepoTags']) == { + 'localhost:5000/base:master', + 'localhost:5000/base:latest', + 'localhost:5000/base:' + tag, + } + finally: + old_images = ( + cli.images(name='localhost:5000/service1', quiet=True) + + cli.images(name='localhost:5000/shared', quiet=True) + + cli.images(name='localhost:5000/base', quiet=True) + ) + for image in old_images: + cli.remove_image(image, force=True) diff --git a/tests/test_cli.py b/tests/test_cli.py index 260eb92..064f931 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -16,7 +16,6 @@ def get_defaults(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': False, '--upto': [], '--x-assert-hostname': False, '-H': None, @@ -90,7 +89,6 @@ def test_args(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': False, '--upto': [], '--x-assert-hostname': True, '-H': None, @@ -105,7 +103,7 @@ def test_args_2(): args = [ '--account=x', '--x-assert-hostname', 'build', '-d', 'foo', 'bar', - '-t', 'foo', '--dirty', + '-t', 'foo', '--dirty', '--pull-cache', ] parser = cli.argparser() arguments = cli.old_style_arg_dict(parser.parse_args(args)) @@ -118,7 +116,6 @@ def test_args_2(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': True, '--upto': [], '--x-assert-hostname': True, '-H': None, @@ -142,7 +139,6 @@ def test_args_base(): '--exclude': [], '--help': False, '--no-build': False, - '--dirty': False, '--upto': [], '--x-assert-hostname': False, '-H': None,
docker pull all images for current branch and master before building Because our buildserver forgets the docker cache between builds we pull the previous build for all the images. it would be great if we could get shipwright to do it. Otherwise a command like "shipright images" which lists all the images that shipwright *would* build would let us write our own command to do this.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_cli.py::test_args", "tests/test_cli.py::test_args_2", "tests/test_cli.py::test_args_base" ]
[ "tests/integration/test_docker_builds.py::test_dirty_fails_without_flag", "tests/test_cli.py::test_without_json_manifest", "tests/test_cli.py::test_push_also_builds", "tests/test_cli.py::test_assert_hostname" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2016-08-22T09:51:49"
apache-2.0
ARM-software__mango-11
diff --git a/mango/domain/distribution.py b/mango/domain/distribution.py index 4f5b69d..bb9e14d 100644 --- a/mango/domain/distribution.py +++ b/mango/domain/distribution.py @@ -1,49 +1,5 @@ -# Defining loguniform distribution -""" -Credits: Extended from the original definition of rvs function in scipy/scipy/stats/_distn_infrastructure.py -for the class rv_generic and the _rvs function for the uniform distribution from -scipy/scipy/stats/_continuous_distns.py -""" +from scipy.stats import loguniform as _loguniform -from scipy.stats import rv_continuous -import numpy as np - -class log_uniform_gen(rv_continuous): - """A log uniform distribution with base 10 - """ - - def __init__(self, *args, **kwargs): - self.base = 10 - super(log_uniform_gen, self).__init__(*args, **kwargs) - - def _log(self, x): - return np.log(x) / np.log(self.base) - - def _argcheck(self, a, b): - return (a > 0) & (b > a) - - def _get_support(self, a, b): - return a, b - - def _pdf(self, x, a, b): - # reciprocal.pdf(x, a, b) = 1 / (x*log(b/a)) - return 1.0 / (x * self._log(b * 1.0 / a)) - - def _logpdf(self, x, a, b): - return np.log(x) - np.log(self._log(b * 1.0 / a)) - - def _cdf(self, x, a, b): - return (self._log(x) - self._log(a)) / self._log(b * 1.0 / a) - - def _ppf(self, q, a, b): - return a*pow(b*1.0/a, q) - - def _munp(self, n, a, b): - return 1.0/self._log(b*1.0/a) / n * (pow(b*1.0, n) - pow(a*1.0, n)) - - def _entropy(self, a, b): - return 0.5*np.log(a*b)+np.log(self._log(b*1.0/a)) - - -loguniform = log_uniform_gen(name='loguniform') \ No newline at end of file +def loguniform(a, b): + return _loguniform(10 ** a, 10 ** (a + b))
ARM-software/mango
e2d4fd8ae61d2ab8921c94fa2f4dafc1119dbab2
diff --git a/mango/tests/test_domain_space.py b/mango/tests/test_domain_space.py index f393f2b..58fcbc6 100644 --- a/mango/tests/test_domain_space.py +++ b/mango/tests/test_domain_space.py @@ -2,6 +2,7 @@ import numpy as np from scipy.stats import uniform, loguniform from mango.domain.domain_space import domain_space +from mango.domain.distribution import loguniform as mango_loguniform def test_domain(): @@ -34,6 +35,15 @@ def test_domain(): assert (sample[param] in params[param]) +def test_mango_loguniform(): + space = { + 'a': mango_loguniform(-3, 6) + } + ds = domain_space(space, domain_size=1000) + samples = ds.get_domain() + assert all(1e-3 < sample['a'] < 1e3 for sample in samples) + + def test_gp_samples_to_params(): space = { 'a': range(10), @@ -91,7 +101,7 @@ def test_gp_space(): assert (X >= 0.0).all() assert (X[:, 0] == 1.).all() # a assert (X[:, 1] == 0.).all() # b - assert np.isin(X[:, 2], [0.0, 0.5, 1.0]).all() # c + assert np.isin(X[:, 2], [0.0, 0.5, 1.0]).all() # c assert np.isin(X[:, 4:7], np.eye(3)).all() # e assert X.shape == (ds.domain_size, 12) @@ -110,5 +120,3 @@ def test_gp_space(): X2 = ds.convert_to_gp(params) assert np.isclose(X2, X).all() - -
Domain error in loguniform Hi, seems that there is a problem with `loguniform` when one of its argument is negative. For example, my code is runnable when the first argument of `loguniform` is positive and it generates domain error when the first argument is a negative number. Any thought on this?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "mango/tests/test_domain_space.py::test_mango_loguniform" ]
[ "mango/tests/test_domain_space.py::test_domain", "mango/tests/test_domain_space.py::test_gp_samples_to_params", "mango/tests/test_domain_space.py::test_gp_space" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2020-06-11T05:56:07"
apache-2.0
ARM-software__mango-47
diff --git a/README.md b/README.md index 16b11cb..2f14c63 100644 --- a/README.md +++ b/README.md @@ -352,8 +352,9 @@ The configuration parameters are: ... return True/False ``` -Early stopping is one of Mango's important features that allow to early terminate the current parallel search based on the custom user-designed criteria, such as the total optimization time spent, current validation accuracy achieved, or improvements in the past few iterations. For usage see early stopping examples [notebook](https://github.com/ARM-software/mango/blob/master/examples/EarlyStopping.ipynb). + Early stopping is one of Mango's important features that allow to early terminate the current parallel search based on the custom user-designed criteria, such as the total optimization time spent, current validation accuracy achieved, or improvements in the past few iterations. For usage see early stopping examples [notebook](https://github.com/ARM-software/mango/blob/master/examples/EarlyStopping.ipynb). +- initial_custom: A list of initial evaluation points to warm up the optimizer instead of random sampling. For example, for a search space with two parameters `x1` and `x2` the input could be: `[{'x1': 10, 'x2': -5}, {'x1': 0, 'x2': 10}]`. This allows the user to customize the initial evaluation points and therefore guide the optimization process. If this option is given then `initial_random` is ignored. The default configuration parameters can be modified, as shown below. Only the parameters whose values need to adjusted can be passed as the dictionary. diff --git a/mango/tuner.py b/mango/tuner.py index 360a859..97f02a1 100644 --- a/mango/tuner.py +++ b/mango/tuner.py @@ -29,6 +29,7 @@ class Tuner: class Config: domain_size: int = None initial_random: int = 2 + initial_custom: dict = None num_iteration: int = 20 batch_size: int = 1 optimizer: str = 'Bayesian' @@ -151,25 +152,35 @@ class Tuner: self.maximize_objective = False return self.run() + + def run_initial(self): + if self.config.initial_custom is not None: + X_tried = copy.deepcopy(self.config.initial_custom) + X_list, Y_list = self.runUserObjective(X_tried) + else: + # getting first few random values + X_tried = self.ds.get_random_sample(self.config.initial_random) + X_list, Y_list = self.runUserObjective(X_tried) + + # in case initial random results are invalid try different samples + n_tries = 1 + while len(Y_list) < self.config.initial_random and n_tries < 3: + X_tried2 = self.ds.get_random_sample(self.config.initial_random - len(Y_list)) + X_list2, Y_list2 = self.runUserObjective(X_tried2) + X_tried2.extend(X_tried2) + X_list = np.append(X_list, X_list2) + Y_list = np.append(Y_list, Y_list2) + n_tries += 1 + + if len(Y_list) == 0: + raise ValueError("No valid configuration found to initiate the Bayesian Optimizer") + return X_list, Y_list, X_tried + def runBayesianOptimizer(self): results = dict() - # getting first few random values - random_hyper_parameters = self.ds.get_random_sample(self.config.initial_random) - X_list, Y_list = self.runUserObjective(random_hyper_parameters) - - # in case initial random results are invalid try different samples - n_tries = 1 - while len(Y_list) < self.config.initial_random and n_tries < 3: - random_hps = self.ds.get_random_sample(self.config.initial_random - len(Y_list)) - X_list2, Y_list2 = self.runUserObjective(random_hps) - random_hyper_parameters.extend(random_hps) - X_list = np.append(X_list, X_list2) - Y_list = np.append(Y_list, Y_list2) - n_tries += 1 + X_list, Y_list, X_tried = self.run_initial() - if len(Y_list) == 0: - raise ValueError("No valid configuration found to initiate the Bayesian Optimizer") # evaluated hyper parameters are used X_init = self.ds.convert_GP_space(X_list) @@ -186,7 +197,7 @@ class Tuner: X_sample = X_init Y_sample = Y_init - hyper_parameters_tried = random_hyper_parameters + hyper_parameters_tried = X_tried objective_function_values = Y_list surrogate_values = Y_list
ARM-software/mango
a71bc007a0c4e39462fd1810cdbcf99c4e854679
diff --git a/tests/test_tuner.py b/tests/test_tuner.py index 24e7c99..98e5fbd 100644 --- a/tests/test_tuner.py +++ b/tests/test_tuner.py @@ -14,7 +14,6 @@ import numpy as np from mango.domain.domain_space import domain_space from mango import Tuner, scheduler from scipy.stats import uniform -from mango.domain.distribution import loguniform # Simple param_dict param_dict = {"a": uniform(0, 1), # uniform distribution @@ -125,7 +124,7 @@ def test_rosenbrock(): results.append(result) return results - tuner = Tuner(param_dict, objfunc, conf_dict=dict(domain_size=100000)) + tuner = Tuner(param_dict, objfunc, conf_dict=dict(domain_size=100000, num_iteration=40)) results = tuner.run() print('best hyper parameters:', results['best_params']) @@ -190,6 +189,40 @@ def test_convex(): assert abs(results['best_params']['y'] - y_opt) <= 3 +def test_initial_custom(): + param_dict = { + 'x': range(-100, 10), + 'y': range(-10, 20), + } + + x_opt = 0 + y_opt = 0 + + def objfunc(args_list): + results = [] + for hyper_par in args_list: + x = hyper_par['x'] + y = hyper_par['y'] + result = (x ** 2 + y ** 2) / 1e4 + results.append(result) + return results + + config = dict(initial_custom=[dict(x=-100, y=20), + dict(x=10, y=20)] + ) + + tuner = Tuner(param_dict, objfunc, conf_dict=config) + results = tuner.minimize() + + print('best hyper parameters:', results['best_params']) + print('best Accuracy:', results['best_objective']) + + assert abs(results['best_params']['x'] - x_opt) <= 3 + assert abs(results['best_params']['y'] - y_opt) <= 3 + assert results['random_params'][0] == config['initial_custom'][0] + assert results['random_params'][1] == config['initial_custom'][1] + + def test_local_scheduler(): param_space = dict(x=range(-10, 10), y=range(-10, 10))
Is it possible to add an initial data point? The way Im currently using mango, I will always have a first run with good defaults. Is it possible to use this information somehow? I have quite wide ranges for my hyper parameters, and I think this would help a lot.
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/test_tuner.py::test_initial_custom" ]
[ "tests/test_tuner.py::test_domain", "tests/test_tuner.py::test_tuner", "tests/test_tuner.py::test_rosenbrock", "tests/test_tuner.py::test_config", "tests/test_tuner.py::test_convex", "tests/test_tuner.py::test_six_hump", "tests/test_tuner.py::test_celery_scheduler", "tests/test_tuner.py::test_custom_scheduler", "tests/test_tuner.py::test_early_stopping_simple", "tests/test_tuner.py::test_early_stopping_complex" ]
{ "failed_lite_validators": [ "has_many_modified_files", "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2022-03-02T18:10:44"
apache-2.0
ARMmbed__greentea-237
diff --git a/mbed_greentea/mbed_report_api.py b/mbed_greentea/mbed_report_api.py index da3f0d9..82acb5c 100644 --- a/mbed_greentea/mbed_report_api.py +++ b/mbed_greentea/mbed_report_api.py @@ -38,6 +38,13 @@ def exporter_json(test_result_ext, test_suite_properties=None): @details This is a machine friendly format """ import json + for target in test_result_ext.values(): + for suite in target.values(): + try: + suite["single_test_output"] = suite["single_test_output"]\ + .decode("unicode_escape") + except KeyError: + pass return json.dumps(test_result_ext, indent=4) @@ -211,7 +218,10 @@ def exporter_testcase_junit(test_result_ext, test_suite_properties=None): test_cases.append(tc) ts_name = target_name - test_build_properties = test_suite_properties[target_name] if target_name in test_suite_properties else None + if test_suite_properties and target_name in test_suite_properties: + test_build_properties = test_suite_properties[target_name] + else: + test_build_properties = None ts = TestSuite(ts_name, test_cases, properties=test_build_properties) test_suites.append(ts) @@ -584,7 +594,9 @@ def get_result_overlay_dropdowns(result_div_id, test_results): result_output_div_id = "%s_output" % result_div_id result_output_dropdown = get_dropdown_html(result_output_div_id, "Test Output", - test_results['single_test_output'].rstrip("\n"), + test_results['single_test_output'] + .decode("unicode-escape") + .rstrip("\n"), output_text=True) # Add a dropdown for the testcases if they are present @@ -740,10 +752,14 @@ def exporter_html(test_result_ext, test_suite_properties=None): test_results['single_test_count'] += 1 result_class = get_result_colour_class(test_results['single_test_result']) + try: + percent_pass = int((test_results['single_test_passes']*100.0)/test_results['single_test_count']) + except ZeroDivisionError: + percent_pass = 100 this_row += result_cell_template % (result_class, result_div_id, test_results['single_test_result'], - int((test_results['single_test_passes']*100.0)/test_results['single_test_count']), + percent_pass, test_results['single_test_passes'], test_results['single_test_count'], result_overlay)
ARMmbed/greentea
86f5ec3211a8f7f324bcdd3201012945ee0534ac
diff --git a/test/report_api.py b/test/report_api.py new file mode 100644 index 0000000..122e26e --- /dev/null +++ b/test/report_api.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +""" +mbed SDK +Copyright (c) 2017 ARM Limited + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import unittest +from mock import patch + +from mbed_greentea.mbed_report_api import exporter_html, \ + exporter_memory_metrics_csv, exporter_testcase_junit, \ + exporter_testcase_text, exporter_text, exporter_json + + +class ReportEmitting(unittest.TestCase): + + + report_fns = [exporter_html, exporter_memory_metrics_csv, + exporter_testcase_junit, exporter_testcase_text, + exporter_text, exporter_json] + def test_report_zero_tests(self): + test_data = {} + for report_fn in self.report_fns: + report_fn(test_data) + + def test_report_zero_testcases(self): + test_data = { + 'k64f-gcc_arm': { + 'garbage_test_suite' :{ + u'single_test_result': u'NOT_RAN', + u'elapsed_time': 0.0, + u'build_path': u'N/A', + u'build_path_abs': u'N/A', + u'copy_method': u'N/A', + u'image_path': u'N/A', + u'single_test_output': b'N/A', + u'platform_name': u'k64f', + u'test_bin_name': u'N/A', + u'testcase_result': {}, + } + } + } + for report_fn in self.report_fns: + report_fn(test_data)
mbedgt crash with float division by zero Hi Here is my command: mbedgt -V -v -t NUCLEO_F401RE-ARM,NUCLEO_F401RE-GCC_ARM,NUCLEO_F401RE-IAR,NUCLEO_F410RB-ARM,NUCLEO_F410RB-GCC_ARM,NUCLEO_F410RB-IAR,NUCLEO_F411RE-ARM,NUCLEO_F411RE-GCC_ARM,NUCLEO_F411RE-IAR --report-html=/c/xxx.html It has crashed: ... mbedgt: all tests finished! mbedgt: shuffle seed: 0.3680156551 mbedgt: exporting to HTML file mbedgt: unexpected error: float division by zero Traceback (most recent call last): File "C:\Python27\Scripts\mbedgt-script.py", line 11, in <module> load_entry_point('mbed-greentea==1.2.6', 'console_scripts', 'mbedgt')() File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 401, in main cli_ret = main_cli(opts, args) File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 1050, in main_cli html_report = exporter_html(test_report) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 747, in exporter_html int((test_results['single_test_passes']*100.0)/test_results['single_test_count']), ZeroDivisionError: float division by zero
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/report_api.py::ReportEmitting::test_report_zero_testcases" ]
[ "test/report_api.py::ReportEmitting::test_report_zero_tests" ]
{ "failed_lite_validators": [ "has_many_hunks" ], "has_test_patch": true, "is_lite": false }
"2017-09-25T13:51:40"
apache-2.0
ARMmbed__greentea-243
diff --git a/mbed_greentea/mbed_report_api.py b/mbed_greentea/mbed_report_api.py index 166bc29..22a3778 100644 --- a/mbed_greentea/mbed_report_api.py +++ b/mbed_greentea/mbed_report_api.py @@ -42,7 +42,7 @@ def exporter_json(test_result_ext, test_suite_properties=None): for suite in target.values(): try: suite["single_test_output"] = suite["single_test_output"]\ - .decode("unicode_escape") + .decode("utf-8", "replace") except KeyError: pass return json.dumps(test_result_ext, indent=4) @@ -603,7 +603,7 @@ def get_result_overlay_dropdowns(result_div_id, test_results): result_output_dropdown = get_dropdown_html(result_output_div_id, "Test Output", test_results['single_test_output'] - .decode("unicode-escape") + .decode("utf-8", "replace") .rstrip("\n"), output_text=True)
ARMmbed/greentea
8f7b28f8ec739156d238304fa4f5f2e5156536f5
diff --git a/test/report_api.py b/test/report_api.py index 122e26e..2a4275f 100644 --- a/test/report_api.py +++ b/test/report_api.py @@ -45,7 +45,7 @@ class ReportEmitting(unittest.TestCase): u'build_path_abs': u'N/A', u'copy_method': u'N/A', u'image_path': u'N/A', - u'single_test_output': b'N/A', + u'single_test_output': b'\x80abc\uXXXX' , u'platform_name': u'k64f', u'test_bin_name': u'N/A', u'testcase_result': {},
mbedgt crash with UnicodeDecodeError Hi I am sorry, but I still get some crash with the new green tea version ... mbedgt: exporting to HTML file 'C:/mcu/reports/report__mbed_os5_release_non_regression_F756ZG_mbed-os-5.5.7__2017_09_28_00_06.html'... mbedgt: unexpected error: 'unicodeescape' codec can't decode bytes in position 6308-6310: truncated \uXXXX escape Traceback (most recent call last): File "C:\Python27\Scripts\mbedgt-script.py", line 11, in <module> load_entry_point('mbed-greentea==1.3.0', 'console_scripts', 'mbedgt')() File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 416, in main cli_ret = main_cli(opts, args) File "c:\python27\lib\site-packages\mbed_greentea\mbed_greentea_cli.py", line 1067, in main_cli html_report = exporter_html(test_report) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 747, in exporter_html test_results) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 636, in get_result_overlay overlay_dropdowns = get_result_overlay_dropdowns(result_div_id, test_results) File "c:\python27\lib\site-packages\mbed_greentea\mbed_report_api.py", line 598, in get_result_overlay_dropdowns .decode("unicode-escape") UnicodeDecodeError: 'unicodeescape' codec can't decode bytes in position 6308-6310: truncated \uXXXX escape @theotherjimmy
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/report_api.py::ReportEmitting::test_report_zero_testcases" ]
[ "test/report_api.py::ReportEmitting::test_report_zero_tests" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2017-09-29T17:09:53"
apache-2.0
ARMmbed__greentea-250
diff --git a/mbed_greentea/mbed_target_info.py b/mbed_greentea/mbed_target_info.py index 356676b..c825bcf 100644 --- a/mbed_greentea/mbed_target_info.py +++ b/mbed_greentea/mbed_target_info.py @@ -20,6 +20,17 @@ Author: Przemyslaw Wirkus <[email protected]> import os import re import json +from os import walk +try: + from contextlib import suppress +except ImportError: + from contextlib import contextmanager + @contextmanager + def suppress(*excs): + try: + yield + except excs: + pass from mbed_greentea.mbed_common_api import run_cli_process from mbed_greentea.mbed_greentea_log import gt_logger @@ -381,82 +392,65 @@ def get_platform_property(platform, property): :return: property value, None if property not found """ - # First load from targets.json if available - value_from_targets_json = get_platform_property_from_targets(platform, property) - if value_from_targets_json: - return value_from_targets_json - - # Check if info is available for a specific platform - if platform in TARGET_INFO_MAPPING: - if property in TARGET_INFO_MAPPING[platform]['properties']: - return TARGET_INFO_MAPPING[platform]['properties'][property] + default = _get_platform_property_from_default(property) + from_targets_json = _get_platform_property_from_targets( + platform, property, default) + if from_targets_json: + return from_targets_json + from_info_mapping = _get_platform_property_from_info_mapping(platform, property) + if from_info_mapping: + return from_info_mapping + return default + +def _get_platform_property_from_default(property): + with suppress(KeyError): + return TARGET_INFO_MAPPING['default'][property] + +def _get_platform_property_from_info_mapping(platform, property): + with suppress(KeyError): + return TARGET_INFO_MAPPING[platform]['properties'][property] + +def _platform_property_from_targets_json(targets, platform, property, default): + """! Get a platforms's property from the target data structure in + targets.json. Takes into account target inheritance. + @param targets Data structure parsed from targets.json + @param platform Name of the platform + @param property Name of the property + @param default the fallback value if none is found, but the target exists + @return property value, None if property not found - # Check if default data is available - if 'default' in TARGET_INFO_MAPPING: - if property in TARGET_INFO_MAPPING['default']: - return TARGET_INFO_MAPPING['default'][property] - - return None + """ + with suppress(KeyError): + return targets[platform][property] + with suppress(KeyError): + for inherited_target in targets[platform]['inherits']: + result = _platform_property_from_targets_json(targets, inherited_target, property, None) + if result: + return result + if platform in targets: + return default + +IGNORED_DIRS = ['.build', 'BUILD', 'tools'] + +def _find_targets_json(path): + for root, dirs, files in walk(path, followlinks=True): + for ignored_dir in IGNORED_DIRS: + if ignored_dir in dirs: + dirs.remove(ignored_dir) + if 'targets.json' in files: + yield os.path.join(root, 'targets.json') -def get_platform_property_from_targets(platform, property): +def _get_platform_property_from_targets(platform, property, default): """ Load properties from targets.json file somewhere in the project structure :param platform: :return: property value, None if property not found """ - - def get_platform_property_from_targets(targets, platform, property): - """! Get a platforms's property from the target data structure in - targets.json. Takes into account target inheritance. - @param targets Data structure parsed from targets.json - @param platform Name of the platform - @param property Name of the property - @return property value, None if property not found - - """ - - result = None - if platform in targets: - if property in targets[platform]: - result = targets[platform][property] - elif 'inherits' in targets[platform]: - result = None - for inherited_target in targets[platform]['inherits']: - result = get_platform_property_from_targets(targets, inherited_target, property) - - # Stop searching after finding the first value for the property - if result: - break - - return result - - result = None - targets_json_path = [] - for root, dirs, files in os.walk(os.getcwd(), followlinks=True): - ignored_dirs = ['.build', 'BUILD', 'tools'] - - for ignored_dir in ignored_dirs: - if ignored_dir in dirs: - dirs.remove(ignored_dir) - - if 'targets.json' in files: - targets_json_path.append(os.path.join(root, 'targets.json')) - - if not targets_json_path: - gt_logger.gt_log_warn("No targets.json files found, using default target properties") - - for targets_path in targets_json_path: - try: + for targets_path in _find_targets_json(os.getcwd()): + with suppress(IOError, ValueError): with open(targets_path, 'r') as f: targets = json.load(f) - - # Load property from targets.json - result = get_platform_property_from_targets(targets, platform, property) - - # If a valid property was found, stop looking + result = _platform_property_from_targets_json(targets, platform, property, default) if result: - break - except Exception: - continue - return result + return result diff --git a/setup.py b/setup.py index e98e109..0734dfe 100644 --- a/setup.py +++ b/setup.py @@ -50,13 +50,15 @@ setup(name='mbed-greentea', license=LICENSE, test_suite = 'test', entry_points={ - "console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main",], + "console_scripts": ["mbedgt=mbed_greentea.mbed_greentea_cli:main",], }, install_requires=["PrettyTable>=0.7.2", - "PySerial>=3.0", - "mbed-host-tests>=1.2.0", - "mbed-ls>=1.2.15", - "junit-xml", - "lockfile", - "mock", - "colorama>=0.3,<0.4"]) + "PySerial>=3.0", + "mbed-host-tests>=1.2.0", + "mbed-ls>=1.2.15", + "junit-xml", + "lockfile", + "mock", + "six", + "colorama>=0.3,<0.4"]) +
ARMmbed/greentea
b8bcffbb7aaced094f252a4ddfe930e8237fb484
diff --git a/test/mbed_gt_target_info.py b/test/mbed_gt_target_info.py index e3f0a6a..96cd1db 100644 --- a/test/mbed_gt_target_info.py +++ b/test/mbed_gt_target_info.py @@ -21,6 +21,8 @@ import shutil import tempfile import unittest +from six import StringIO + from mock import patch from mbed_greentea import mbed_target_info @@ -338,8 +340,168 @@ mbed-gcc 1.1.0 result = mbed_target_info.add_target_info_mapping("null") - def test_get_platform_property_from_targets(self): - result = mbed_target_info.get_platform_property_from_targets({}, {}) + def test_get_platform_property_from_targets_no_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find: + _find.return_value = iter([]) + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_no_file(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.side_effect = IOError + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_invalid_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{") + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_empty_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{}") + result = mbed_target_info._get_platform_property_from_targets("not_a_platform", "not_a_property", "default") + self.assertIsNone(result) + + def test_get_platform_property_from_targets_no_value(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{\"K64F\": {}}") + result = mbed_target_info._get_platform_property_from_targets("K64F", "not_a_property", "default") + self.assertEqual(result, "default") + + def test_get_platform_property_from_targets_in_json(self): + with patch("mbed_greentea.mbed_target_info._find_targets_json") as _find,\ + patch("mbed_greentea.mbed_target_info.open") as _open: + _find.return_value = iter(["foo"]) + _open.return_value.__enter__.return_value = StringIO("{\"K64F\": {\"copy_method\": \"cp\"}}") + result = mbed_target_info._get_platform_property_from_targets("K64F", "copy_method", "default") + self.assertEqual("cp", result) + + def test_find_targets_json(self): + with patch("mbed_greentea.mbed_target_info.walk") as _walk: + _walk.return_value = iter([("", ["foo"], []), ("foo", [], ["targets.json"])]) + result = list(mbed_target_info._find_targets_json("bogus_path")) + self.assertEqual(result, ["foo/targets.json"]) + + def test_find_targets_json_ignored(self): + with patch("mbed_greentea.mbed_target_info.walk") as _walk: + walk_result =[("", [".build"], [])] + _walk.return_value = iter(walk_result) + result = list(mbed_target_info._find_targets_json("bogus_path")) + self.assertEqual(result, []) + self.assertEqual(walk_result, [("", [], [])]) + + def test_platform_property_from_targets_json_empty(self): + result = mbed_target_info._platform_property_from_targets_json( + {}, "not_a_target", "not_a_property", "default" + ) + self.assertIsNone(result) + + def test_platform_property_from_targets_json_base_target(self): + result = mbed_target_info._platform_property_from_targets_json( + {"K64F": {"copy_method": "cp"}}, "K64F", "copy_method", "default" + ) + self.assertEqual(result, "cp") + + def test_platform_property_from_targets_json_inherits(self): + result = mbed_target_info._platform_property_from_targets_json( + {"K64F": {"inherits": ["Target"]}, "Target": {"copy_method": "cp"}}, + "K64F", "copy_method", "default" + ) + self.assertEqual(result, "cp") + + def test_platform_property_from_default_missing(self): + result = mbed_target_info._get_platform_property_from_default("not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_default(self): + result = mbed_target_info._get_platform_property_from_default("copy_method") + self.assertEqual(result, "default") + + def test_platform_property_from_info_mapping_bad_platform(self): + result = mbed_target_info._get_platform_property_from_info_mapping("not_a_platform", "not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_info_mapping_missing(self): + result = mbed_target_info._get_platform_property_from_info_mapping("K64F", "not_a_property") + self.assertIsNone(result) + + def test_platform_property_from_info_mapping(self): + result = mbed_target_info._get_platform_property_from_info_mapping("K64F", "copy_method") + self.assertEqual(result, "default") + + + # The following test cases are taken from this table: + # + # Num | In targets.json | In yotta blob | In Default | property used + # --- | --------------- | ------------- | ---------- | -------------- + # 1 | Yes | No | Yes |`targets.json` + # 2 | Yes | Yes | Yes |`targets.json` + # 3 | No | Yes | Yes | yotta blob + # 4 | No | No | Yes | default + # 5 | No | No | No | None + # 6 | Yes | No | No |`targets.json` + # 7 | Yes | Yes | No |`targets.json` + # 8 | No | Yes | No | yotta blob + def test_platform_property(self): + """Test that platform_property picks the property value preserving + the following priority relationship: + targets.json > yotta blob > default + """ + with patch("mbed_greentea.mbed_target_info._get_platform_property_from_targets") as _targets,\ + patch("mbed_greentea.mbed_target_info._get_platform_property_from_info_mapping") as _info_mapping,\ + patch("mbed_greentea.mbed_target_info._get_platform_property_from_default") as _default: + # 1 + _targets.return_value = "targets" + _info_mapping.return_value = None + _default.return_value = "default" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 2 + _info_mapping.return_value = "yotta" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 3 + _targets.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "yotta") + # 4 + _info_mapping.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "default") + # 5 + _default.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + None) + # 6 + _targets.return_value = "targets" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 7 + _info_mapping.return_value = "yotta" + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "targets") + # 8 + _targets.return_value = None + self.assertEqual( + mbed_target_info.get_platform_property("K64F", "copy_method"), + "yotta") def test_parse_yotta_json_for_build_name(self):
Target property priority incorrect Currently we have priority as follows: ``` internal yotta blob > targets.json > tool default ``` This is a bug. Instead the priority should be: ``` targets.json /w default > internal yotta blob > tool delaut ``` This implies a few test cases: In targets.json | In yotta blob | property used | Currently Works ---------------------- | ------------- | ---------------- | --------------- Yes, with property | No | `targets.json` | Yes Yes, without property| No | default | Yes Yes, with property | Yes | `targets.json` | No Yes, without property | Yes | default | No No | No | default | Yes No | Yes | yotta blob | Yes @bridadan Is this the issue masked by #248?
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_inherits", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_invalid_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_in_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_base_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_bad_platform", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_file", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json_ignored", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_empty_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_value", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_empty", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default" ]
[ "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_keywords", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_failed_open", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_valid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_add_target_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_json_for_build_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_multiple", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_target_from_current_dir_ok", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_json_data", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text_2", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_chars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_version", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_valid", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_with_ssl_errors", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_fail", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_text" ]
{ "failed_lite_validators": [ "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2017-10-20T19:13:58"
apache-2.0
ARMmbed__greentea-263
diff --git a/mbed_greentea/mbed_greentea_cli.py b/mbed_greentea/mbed_greentea_cli.py index f6a13c4..446b965 100644 --- a/mbed_greentea/mbed_greentea_cli.py +++ b/mbed_greentea/mbed_greentea_cli.py @@ -23,6 +23,7 @@ import os import sys import random import optparse +import fnmatch from time import time try: from Queue import Queue @@ -119,18 +120,6 @@ def create_filtered_test_list(ctest_test_list, test_by_names, skip_test, test_sp @return """ - def filter_names_by_prefix(test_case_name_list, prefix_name): - """! - @param test_case_name_list List of all test cases - @param prefix_name Prefix of test name we are looking for - @result Set with names of test names starting with 'prefix_name' - """ - result = list() - for test_name in test_case_name_list: - if test_name.startswith(prefix_name): - result.append(test_name) - return sorted(result) - filtered_ctest_test_list = ctest_test_list test_list = None invalid_test_names = [] @@ -143,17 +132,15 @@ def create_filtered_test_list(ctest_test_list, test_by_names, skip_test, test_sp gt_logger.gt_log("test case filter (specified with -n option)") for test_name in set(test_list): - if test_name.endswith('*'): - # This 'star-sufix' filter allows users to filter tests with fixed prefixes - # Example: -n 'TESTS-mbed_drivers* will filter all test cases with name starting with 'TESTS-mbed_drivers' - for test_name_filtered in filter_names_by_prefix(ctest_test_list.keys(), test_name[:-1]): - gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(test_name_filtered)) - filtered_ctest_test_list[test_name_filtered] = ctest_test_list[test_name_filtered] - elif test_name not in ctest_test_list: - invalid_test_names.append(test_name) + gt_logger.gt_log_tab(test_name) + matches = [test for test in ctest_test_list.keys() if fnmatch.fnmatch(test, test_name)] + gt_logger.gt_log_tab(str(ctest_test_list)) + if matches: + for match in matches: + gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(match)) + filtered_ctest_test_list[match] = ctest_test_list[match] else: - gt_logger.gt_log_tab("test filtered in '%s'"% gt_logger.gt_bright(test_name)) - filtered_ctest_test_list[test_name] = ctest_test_list[test_name] + invalid_test_names.append(test_name) if skip_test: test_list = skip_test.split(',')
ARMmbed/greentea
68508c5f4d7cf0635c75399d0ff7cfa896fdf2cc
diff --git a/test/mbed_gt_cli.py b/test/mbed_gt_cli.py index 0646c20..8f4a1eb 100644 --- a/test/mbed_gt_cli.py +++ b/test/mbed_gt_cli.py @@ -21,6 +21,36 @@ import sys import unittest from mbed_greentea import mbed_greentea_cli +from mbed_greentea.tests_spec import TestSpec + +test_spec_def = { + "builds": { + "K64F-ARM": { + "platform": "K64F", + "toolchain": "ARM", + "base_path": "./.build/K64F/ARM", + "baud_rate": 115200, + "tests": { + "mbed-drivers-test-generic_tests":{ + "binaries":[ + { + "binary_type": "bootable", + "path": "./.build/K64F/ARM/mbed-drivers-test-generic_tests.bin" + } + ] + }, + "mbed-drivers-test-c_strings":{ + "binaries":[ + { + "binary_type": "bootable", + "path": "./.build/K64F/ARM/mbed-drivers-test-c_strings.bin" + } + ] + } + } + } + } +} class GreenteaCliFunctionality(unittest.TestCase): @@ -86,5 +116,36 @@ class GreenteaCliFunctionality(unittest.TestCase): os.chdir(curr_dir) shutil.rmtree(test1_dir) + def test_create_filtered_test_list(self): + test_spec = TestSpec() + test_spec.parse(test_spec_def) + test_build = test_spec.get_test_builds()[0] + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + 'mbed-drivers-test-generic_*', + None, + test_spec=test_spec) + self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-generic_tests'])) + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + '*_strings', + None, + test_spec=test_spec) + self.assertEqual(set(test_list.keys()), set(['mbed-drivers-test-c_strings'])) + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + 'mbed*s', + None, + test_spec=test_spec) + expected = set(['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests']) + self.assertEqual(set(test_list.keys()), expected) + + test_list = mbed_greentea_cli.create_filtered_test_list(test_build.get_tests(), + '*-drivers-*', + None, + test_spec=test_spec) + expected = set(['mbed-drivers-test-c_strings', 'mbed-drivers-test-generic_tests']) + self.assertEqual(set(test_list.keys()), expected) + if __name__ == '__main__': unittest.main() diff --git a/test/mbed_gt_target_info.py b/test/mbed_gt_target_info.py index e630e7b..a12ba09 100644 --- a/test/mbed_gt_target_info.py +++ b/test/mbed_gt_target_info.py @@ -416,7 +416,7 @@ mbed-gcc 1.1.0 with patch("mbed_greentea.mbed_target_info.walk") as _walk: _walk.return_value = iter([("", ["foo"], []), ("foo", [], ["targets.json"])]) result = list(mbed_target_info._find_targets_json("bogus_path")) - self.assertEqual(result, ["foo/targets.json"]) + self.assertEqual(result, [os.path.join("foo", "targets.json")]) def test_find_targets_json_ignored(self): with patch("mbed_greentea.mbed_target_info.walk") as _walk:
Test names are not correctly globbed Test names only respect a wildcard that is placed at the end of the string. Ex. "mbed-os-*". However, it does not respect the wildcard anywhere else. Ex. "*-timer" The build tools accept these wildcards, so greentea should as well. This is the line responsible: https://github.com/ARMmbed/greentea/blob/32b95b44be653c3db527c02e1c5e1ffdc7d37f6f/mbed_greentea/mbed_greentea_cli.py#L146 Should be switched to `fnmatch`. (This is mostly a note to myself to fix it)
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_create_filtered_test_list" ]
[ "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_invalid_path", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_hello_string", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_default_path", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_local_host_tests_dir_valid_path", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_get_greentea_version", "test/mbed_gt_cli.py::GreenteaCliFunctionality::test_print_version", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json_ignored", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_value", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_add_target_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_chars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_json_for_build_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_fail", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_invalid_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_multiple", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_version", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_targets_from_yotta_local_module_valid", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_keywords", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_with_ssl_errors", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_no_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_invalid_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_inherits", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_invalid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_failed_open", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_default_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_nl_whitechars", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_name", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_target", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_target_cmd_output_mixed_rcnl", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_yotta_target_from_local_config_valid_path", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_missing_json_data", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_empty", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_bad_platform", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_info_mapping_missing", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output_new_style_text_2", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_mbed_target_from_target_json_no_keywords", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_no_file", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_empty_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_platform_property_from_targets_in_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_get_mbed_target_from_current_dir_ok", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_find_targets_json", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_parse_yotta_search_cmd_output", "test/mbed_gt_target_info.py::GreenteaTargetInfo::test_platform_property_from_targets_json_base_target" ]
{ "failed_lite_validators": [], "has_test_patch": true, "is_lite": true }
"2018-02-15T17:29:56"
apache-2.0
ARMmbed__mbed-tools-138
diff --git a/news/20201201142709.bugfix b/news/20201201142709.bugfix new file mode 100644 index 0000000..0468f3e --- /dev/null +++ b/news/20201201142709.bugfix @@ -0,0 +1,1 @@ +Fix bug where we failed to handle config options that contain quotes (#125) diff --git a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl index e4820af..08ccced 100644 --- a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl +++ b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl @@ -65,7 +65,7 @@ set(MBED_CONFIG_DEFINITIONS # options {% for option in options -%} {% if option.value is not none -%} - {%if '{' in option.value|string or '(' in option.value|string %}"{% endif %}-D{{option.macro_name}}={{option.value}}{% if '}' in option.value|string or ')' in option.value|string %}"{% endif %} + "-D{{option.macro_name}}={{option.value|replace("\"", "\\\"")}}" {% endif %} {%- endfor %} # macros
ARMmbed/mbed-tools
94a3bd761d6ab3305c81da93517767aafff58d7e
diff --git a/tests/build/_internal/test_cmake_file.py b/tests/build/_internal/test_cmake_file.py index 1f59cb3..b0247a8 100644 --- a/tests/build/_internal/test_cmake_file.py +++ b/tests/build/_internal/test_cmake_file.py @@ -2,67 +2,69 @@ # Copyright (C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -from unittest import TestCase +import pytest -from tests.build._internal.config.factories import ConfigFactory +from tests.build._internal.config.factories import ConfigFactory, SourceFactory from mbed_tools.build._internal.cmake_file import generate_mbed_config_cmake_file, _render_mbed_config_cmake_template +from mbed_tools.build._internal.config.config import _create_config_option -class TestGenerateCMakeListsFile(TestCase): - def test_correct_arguments_passed(self): - target = dict() - target["labels"] = ["foo"] - target["extra_labels"] = ["morefoo"] - target["features"] = ["bar"] - target["components"] = ["baz"] - target["macros"] = ["macbaz"] - target["device_has"] = ["stuff"] - target["c_lib"] = ["c_lib"] - target["core"] = ["core"] - target["printf_lib"] = ["printf_lib"] - target["supported_form_factors"] = ["arduino"] +TOOLCHAIN_NAME = "gcc" + + [email protected]() +def fake_target(): + return { + "labels": ["foo"], + "extra_labels": ["morefoo"], + "features": ["bar"], + "components": ["baz"], + "macros": ["macbaz"], + "device_has": ["stuff"], + "c_lib": ["c_lib"], + "core": ["core"], + "printf_lib": ["printf_lib"], + "supported_form_factors": ["arduino"], + "supported_c_libs": {TOOLCHAIN_NAME: ["ginormous"]}, + "supported_application_profiles": ["full", "bare-metal"], + } + + +class TestGenerateCMakeListsFile: + def test_correct_arguments_passed(self, fake_target): config = ConfigFactory() mbed_target = "K64F" - toolchain_name = "GCC" - target["supported_c_libs"] = {toolchain_name.lower(): ["small", "std"]} - target["supported_application_profiles"] = ["full", "bare-metal"] - - result = generate_mbed_config_cmake_file(mbed_target, target, config, toolchain_name) - - self.assertEqual( - result, _render_mbed_config_cmake_template(target, config, toolchain_name, mbed_target,), - ) - - -class TestRendersCMakeListsFile(TestCase): - def test_returns_rendered_content(self): - target = dict() - target["labels"] = ["foo"] - target["extra_labels"] = ["morefoo"] - target["features"] = ["bar"] - target["components"] = ["baz"] - target["macros"] = ["macbaz"] - target["device_has"] = ["stuff"] - target["core"] = ["core"] - target["c_lib"] = ["c_lib"] - target["printf_lib"] = ["printf_lib"] - target["supported_form_factors"] = ["arduino"] + + result = generate_mbed_config_cmake_file(mbed_target, fake_target, config, TOOLCHAIN_NAME) + + assert result == _render_mbed_config_cmake_template(fake_target, config, TOOLCHAIN_NAME, mbed_target,) + + +class TestRendersCMakeListsFile: + def test_returns_rendered_content(self, fake_target): config = ConfigFactory() - toolchain_name = "baz" - target["supported_c_libs"] = {toolchain_name.lower(): ["small", "std"]} - target["supported_application_profiles"] = ["full", "bare-metal"] - result = _render_mbed_config_cmake_template(target, config, toolchain_name, "target_name") + result = _render_mbed_config_cmake_template(fake_target, config, TOOLCHAIN_NAME, "target_name") - for label in target["labels"] + target["extra_labels"]: - self.assertIn(label, result) + for label in fake_target["labels"] + fake_target["extra_labels"]: + assert label in result - for macro in target["features"] + target["components"] + [toolchain_name]: - self.assertIn(macro, result) + for macro in fake_target["features"] + fake_target["components"] + [TOOLCHAIN_NAME]: + assert macro in result - for toolchain in target["supported_c_libs"]: - self.assertIn(toolchain, result) + for toolchain in fake_target["supported_c_libs"]: + assert toolchain in result for supported_c_libs in toolchain: - self.assertIn(supported_c_libs, result) + assert supported_c_libs in result + + for supported_application_profiles in fake_target["supported_application_profiles"]: + assert supported_application_profiles in result + + def test_returns_quoted_content(self, fake_target): + config = ConfigFactory() + source = SourceFactory() + + # Add an option whose value contains quotes to the config. + _create_config_option(config, "iotc-mqtt-host", '{"mqtt.2030.ltsapis.goog", IOTC_MQTT_PORT}', source) - for supported_application_profiles in target["supported_application_profiles"]: - self.assertIn(supported_application_profiles, result) + result = _render_mbed_config_cmake_template(fake_target, config, TOOLCHAIN_NAME, "target_name") + assert '"-DMBED_CONF_IOTC_MQTT_HOST={\\"mqtt.2030.ltsapis.goog\\", IOTC_MQTT_PORT}"' in result
mbed-tools fails to handle config options that contain quotes ### Description From @rajkan01: For the below mbed_lib.json config ``` "iotc-mqtt-host": { "help": "IOTC MQTT host configuration. Defaults to mqtt.2030.ltsapis.goog host and port number 8883 if undefined", "value": "{\"mqtt.2030.ltsapis.goog\", IOTC_MQTT_PORT}", "macro_name": "IOTC_MQTT_HOST" } ``` Mbedtools is generating `"-DIOTC_MQTT_HOST={"mqtt.2030.ltsapis.goog", IOTC_MQTT_PORT}"` config starts double-quotes from -D itself, and CMake prepossessing time this macro gets divided into multiple #define like below because of this define begin with double-quotes and also the value ("mqtt.2030.ltsapis.goog") with double-quote consider to be a string ``` #define IOTC_MQTT_HOST { #define mqtt .2030.ltsapis.goog, IOTC_MQTT_PORT} 1 ``` Could someone check this, why is the mbedtools generating macros starts with double-quotes which include `-D` and fix. I've attached `main.ii` and `mbed_config.cmake` [mbed_config.zip](https://github.com/ARMmbed/mbed-tools/files/5602300/mbed_config.zip) ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [ ] Enhancement - [X] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/_internal/test_cmake_file.py::TestRendersCMakeListsFile::test_returns_quoted_content" ]
[ "tests/build/_internal/test_cmake_file.py::TestGenerateCMakeListsFile::test_correct_arguments_passed", "tests/build/_internal/test_cmake_file.py::TestRendersCMakeListsFile::test_returns_rendered_content" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2020-12-01T14:33:04"
apache-2.0
ARMmbed__mbed-tools-154
diff --git a/README.md b/README.md index fdd2e05..eff3449 100644 --- a/README.md +++ b/README.md @@ -48,6 +48,10 @@ follows: ## Installation +`mbed-tools` relies on the Ninja build system and CMake. +- CMake. [Install version 3.19.0 or newer for all operating systems](https://cmake.org/install/). +- Ninja. [Install version 1.0 or newer for all operating systems](https://github.com/ninja-build/ninja/wiki/Pre-built-Ninja-packages). + We recommend installing `mbed-tools` in a Python virtual environment to avoid dependency conflicts. To install the most recent production quality release use: diff --git a/news/20201210131204.bugfix b/news/20201210131204.bugfix new file mode 100644 index 0000000..65ae014 --- /dev/null +++ b/news/20201210131204.bugfix @@ -0,0 +1,1 @@ +Emit more useful error messages if CMake or Ninja aren't found in PATH. diff --git a/src/mbed_tools/build/build.py b/src/mbed_tools/build/build.py index 66822bc..2334bc4 100644 --- a/src/mbed_tools/build/build.py +++ b/src/mbed_tools/build/build.py @@ -22,6 +22,7 @@ def build_project(build_dir: pathlib.Path, target: Optional[str] = None) -> None build_dir: Path to the CMake build tree. target: The CMake target to build (e.g 'install') """ + _check_ninja_found() target_flag = ["--target", target] if target is not None else [] _cmake_wrapper("--build", str(build_dir), *target_flag) @@ -34,6 +35,7 @@ def generate_build_system(source_dir: pathlib.Path, build_dir: pathlib.Path, pro build_dir: Path to the CMake build tree. profile: The Mbed build profile (develop, debug or release). """ + _check_ninja_found() _cmake_wrapper("-S", str(source_dir), "-B", str(build_dir), "-GNinja", f"-DCMAKE_BUILD_TYPE={profile}") @@ -41,5 +43,16 @@ def _cmake_wrapper(*cmake_args: str) -> None: try: logger.debug("Running CMake with args: %s", cmake_args) subprocess.run(["cmake", *cmake_args], check=True) + except FileNotFoundError: + raise MbedBuildError("Could not find CMake. Please ensure CMake is installed and added to PATH.") except subprocess.CalledProcessError: raise MbedBuildError("CMake invocation failed!") + + +def _check_ninja_found() -> None: + try: + subprocess.run(["ninja", "--version"], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + except FileNotFoundError: + raise MbedBuildError( + "Could not find the 'Ninja' build program. Please ensure 'Ninja' is installed and added to PATH." + )
ARMmbed/mbed-tools
9d6b2c71a7ddc93bd71279482a7572cac30ed745
diff --git a/tests/build/test_build.py b/tests/build/test_build.py index b9d32af..5293966 100644 --- a/tests/build/test_build.py +++ b/tests/build/test_build.py @@ -2,45 +2,60 @@ # Copyright (C) 2020 Arm Mbed. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -import pathlib +import subprocess -from tempfile import TemporaryDirectory -from unittest import TestCase, mock +from unittest import mock + +import pytest from mbed_tools.build.build import build_project, generate_build_system from mbed_tools.build.exceptions import MbedBuildError -class TestBuildProject(TestCase): - @mock.patch("mbed_tools.build.build._cmake_wrapper") - def test_invokes_cmake_with_correct_args(self, cmake_wrapper): [email protected] +def subprocess_run(): + with mock.patch("mbed_tools.build.build.subprocess.run", autospec=True) as subproc: + yield subproc + + +class TestBuildProject: + def test_invokes_cmake_with_correct_args(self, subprocess_run): build_project(build_dir="cmake_build", target="install") - cmake_wrapper.assert_called_once_with("--build", "cmake_build", "--target", "install") + subprocess_run.assert_called_with(["cmake", "--build", "cmake_build", "--target", "install"], check=True) - @mock.patch("mbed_tools.build.build._cmake_wrapper") - def test_invokes_cmake_with_correct_args_if_no_target_passed(self, cmake_wrapper): + def test_invokes_cmake_with_correct_args_if_no_target_passed(self, subprocess_run): build_project(build_dir="cmake_build") - cmake_wrapper.assert_called_once_with("--build", "cmake_build") + subprocess_run.assert_called_with(["cmake", "--build", "cmake_build"], check=True) - def test_raises_build_error_if_build_dir_doesnt_exist(self): - with TemporaryDirectory() as tmp_dir: - nonexistent_build_dir = pathlib.Path(tmp_dir, "cmake_build") + def test_raises_build_error_if_cmake_invocation_fails(self, subprocess_run): + subprocess_run.side_effect = (None, subprocess.CalledProcessError(1, "")) - with self.assertRaises(MbedBuildError): - build_project(nonexistent_build_dir) + with pytest.raises(MbedBuildError, match="CMake invocation failed"): + build_project(build_dir="cmake_build") [email protected]("mbed_tools.build.build._cmake_wrapper") -class TestConfigureProject(TestCase): - def test_invokes_cmake_with_correct_args(self, cmake_wrapper): +class TestConfigureProject: + def test_invokes_cmake_with_correct_args(self, subprocess_run): source_dir = "source_dir" build_dir = "cmake_build" profile = "debug" generate_build_system(source_dir, build_dir, profile) - cmake_wrapper.assert_called_once_with( - "-S", source_dir, "-B", build_dir, "-GNinja", f"-DCMAKE_BUILD_TYPE={profile}" + subprocess_run.assert_called_with( + ["cmake", "-S", source_dir, "-B", build_dir, "-GNinja", f"-DCMAKE_BUILD_TYPE={profile}"], check=True ) + + def test_raises_when_ninja_cannot_be_found(self, subprocess_run): + subprocess_run.side_effect = FileNotFoundError + + with pytest.raises(MbedBuildError, match="Ninja"): + generate_build_system("", "", "") + + def test_raises_when_cmake_cannot_be_found(self, subprocess_run): + subprocess_run.side_effect = (None, FileNotFoundError) + + with pytest.raises(MbedBuildError, match="Could not find CMake"): + generate_build_system("", "", "")
README.md : miss cmake and ninja information ### Description Hi This morning, I spent some time on a new PC to install this new mbed tool, This was not working, and I got several not friendly messages... ... till I remembered that I didn't install cmake yet... So my request would be: - to update tools when cmake is not installed with some friendly message "please install cmake" - same for ninja - to update README.md to add information how to install cmake and ninja Thx @0xc0170 @MarceloSalazar @JeanMarcR ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [x] Enhancement - [ ] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/test_build.py::TestBuildProject::test_raises_build_error_if_cmake_invocation_fails", "tests/build/test_build.py::TestConfigureProject::test_raises_when_ninja_cannot_be_found", "tests/build/test_build.py::TestConfigureProject::test_raises_when_cmake_cannot_be_found" ]
[ "tests/build/test_build.py::TestBuildProject::test_invokes_cmake_with_correct_args", "tests/build/test_build.py::TestBuildProject::test_invokes_cmake_with_correct_args_if_no_target_passed", "tests/build/test_build.py::TestConfigureProject::test_invokes_cmake_with_correct_args" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2020-12-10T13:15:11"
apache-2.0
ARMmbed__mbed-tools-190
diff --git a/news/169.bugfix b/news/169.bugfix new file mode 100644 index 0000000..78b6135 --- /dev/null +++ b/news/169.bugfix @@ -0,0 +1,1 @@ +Support use of user@host:directory syntax with the import subcommand. diff --git a/src/mbed_tools/project/mbed_program.py b/src/mbed_tools/project/mbed_program.py index d095e5b..c3a9536 100644 --- a/src/mbed_tools/project/mbed_program.py +++ b/src/mbed_tools/project/mbed_program.py @@ -113,6 +113,9 @@ def parse_url(name_or_url: str) -> Dict[str, str]: url_obj = urlparse(name_or_url) if url_obj.hostname: url = url_obj.geturl() + elif ":" in name_or_url.split("/", maxsplit=1)[0]: + # If non-standard and no slashes before first colon, git will recognize as scp ssh syntax + url = name_or_url else: url = f"https://github.com/armmbed/{url_obj.path}" # We need to create a valid directory name from the url path section.
ARMmbed/mbed-tools
d4dd48ce58952851f9cb2a9e98b0f788a61a23a3
diff --git a/tests/project/test_mbed_program.py b/tests/project/test_mbed_program.py index 7f700f0..be83aa9 100644 --- a/tests/project/test_mbed_program.py +++ b/tests/project/test_mbed_program.py @@ -127,6 +127,12 @@ class TestParseURL(TestCase): self.assertEqual(data["url"], url) self.assertEqual(data["dst_path"], "mbed-os-example-numskull") + def test_creates_valid_dst_dir_from_ssh_url(self): + url = "git@superversioncontrol:superorg/mbed-os-example-numskull" + data = parse_url(url) + self.assertEqual(data["url"], url) + self.assertEqual(data["dst_path"], "mbed-os-example-numskull") + class TestFindProgramRoot(TestCase): @patchfs
mbed-tools import fails to import an example with ssh url ### Description <!-- A detailed description of what is being reported. Please include steps to reproduce the problem. Things to consider sharing: - What version of the package is being used (pip show mbed-tools)? - What is the host platform and version (e.g. macOS 10.15.2, Windows 10, Ubuntu 18.04 LTS)? --> mbed-tools version: **5.0.0** Command: `mbed-tools -vv import [email protected]:ARMmbed/mbed-os-example-blinky.git` Expected: mbed-os-example-blinky example cloned onto a local machine. Output: ``` Cloning Mbed program '[email protected]:ARMmbed/mbed-os-example-blinky.git' Resolving program library dependencies. ERROR: Cloning git repository from url 'https://github.com/armmbed/[email protected]:ARMmbed/mbed-os-example-blinky.git' failed. Error from VCS: Cmd('git') failed due to: exit code(128) cmdline: git clone --progress -v https://github.com/armmbed/[email protected]:ARMmbed/mbed-os-example-blinky.git mbed-os-example-blinky.git More information may be available by using the command line option '-vvv'. ``` ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [ ] Enhancement - [x] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/project/test_mbed_program.py::TestParseURL::test_creates_valid_dst_dir_from_ssh_url" ]
[ "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_raises_if_no_mbed_os_dir_found_and_check_mbed_os_is_true", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_raises_if_path_is_not_a_program", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_returns_valid_program", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_existing_with_mbed_os_path_returns_valid_program", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_generates_valid_program_creating_directory", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_generates_valid_program_creating_directory_in_cwd", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_generates_valid_program_existing_directory", "tests/project/test_mbed_program.py::TestInitialiseProgram::test_from_new_local_dir_raises_if_path_is_existing_program", "tests/project/test_mbed_program.py::TestParseURL::test_creates_url_and_dst_dir_from_name", "tests/project/test_mbed_program.py::TestParseURL::test_creates_valid_dst_dir_from_url", "tests/project/test_mbed_program.py::TestFindProgramRoot::test_finds_program_at_current_path", "tests/project/test_mbed_program.py::TestFindProgramRoot::test_finds_program_higher_in_dir_tree", "tests/project/test_mbed_program.py::TestFindProgramRoot::test_raises_if_no_program_found" ]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files" ], "has_test_patch": true, "is_lite": false }
"2021-02-15T13:43:30"
apache-2.0
ARMmbed__mbed-tools-196
diff --git a/news/20210218112043.bugfix b/news/20210218112043.bugfix new file mode 100644 index 0000000..f595eac --- /dev/null +++ b/news/20210218112043.bugfix @@ -0,0 +1,1 @@ +Raise a nicer error from mbed-tools detect when running on an unrecognised OS. diff --git a/src/mbed_tools/devices/_internal/detect_candidate_devices.py b/src/mbed_tools/devices/_internal/detect_candidate_devices.py index 7078c4f..418cbc2 100644 --- a/src/mbed_tools/devices/_internal/detect_candidate_devices.py +++ b/src/mbed_tools/devices/_internal/detect_candidate_devices.py @@ -8,6 +8,7 @@ from typing import Iterable from mbed_tools.devices._internal.candidate_device import CandidateDevice from mbed_tools.devices._internal.base_detector import DeviceDetector +from mbed_tools.devices.exceptions import UnknownOSError def detect_candidate_devices() -> Iterable[CandidateDevice]: @@ -26,7 +27,12 @@ def _get_detector_for_current_os() -> DeviceDetector: from mbed_tools.devices._internal.linux.device_detector import LinuxDeviceDetector return LinuxDeviceDetector() - else: + if platform.system() == "Darwin": from mbed_tools.devices._internal.darwin.device_detector import DarwinDeviceDetector return DarwinDeviceDetector() + + raise UnknownOSError( + f"We have detected the OS you are running is '{platform.system()}'. " + "Unfortunately we haven't implemented device detection support for this OS yet. Sorry!" + ) diff --git a/src/mbed_tools/devices/exceptions.py b/src/mbed_tools/devices/exceptions.py index 4763b88..570941d 100644 --- a/src/mbed_tools/devices/exceptions.py +++ b/src/mbed_tools/devices/exceptions.py @@ -16,3 +16,7 @@ class DeviceLookupFailed(MbedDevicesError): class NoDevicesFound(MbedDevicesError): """No Mbed Enabled devices were found.""" + + +class UnknownOSError(MbedDevicesError): + """The current OS is not supported."""
ARMmbed/mbed-tools
fe56531239ba0a1cbe2ce696f00f9b58889f05bc
diff --git a/tests/devices/_internal/test_detect_candidate_devices.py b/tests/devices/_internal/test_detect_candidate_devices.py index 75c5032..74137b3 100644 --- a/tests/devices/_internal/test_detect_candidate_devices.py +++ b/tests/devices/_internal/test_detect_candidate_devices.py @@ -2,39 +2,49 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # -from unittest import TestCase, mock +import pytest +from unittest import mock from tests.devices.markers import windows_only, darwin_only, linux_only from mbed_tools.devices._internal.base_detector import DeviceDetector +from mbed_tools.devices.exceptions import UnknownOSError from mbed_tools.devices._internal.detect_candidate_devices import ( detect_candidate_devices, _get_detector_for_current_os, ) -class TestDetectCandidateDevices(TestCase): +class TestDetectCandidateDevices: @mock.patch("mbed_tools.devices._internal.detect_candidate_devices._get_detector_for_current_os") def test_returns_candidates_using_os_specific_detector(self, _get_detector_for_current_os): detector = mock.Mock(spec_set=DeviceDetector) _get_detector_for_current_os.return_value = detector - self.assertEqual(detect_candidate_devices(), detector.find_candidates.return_value) + assert detect_candidate_devices() == detector.find_candidates.return_value -class TestGetDetectorForCurrentOS(TestCase): +class TestGetDetectorForCurrentOS: @windows_only def test_windows_uses_correct_module(self): from mbed_tools.devices._internal.windows.device_detector import WindowsDeviceDetector - self.assertIsInstance(_get_detector_for_current_os(), WindowsDeviceDetector) + assert isinstance(_get_detector_for_current_os(), WindowsDeviceDetector) @darwin_only def test_darwin_uses_correct_module(self): from mbed_tools.devices._internal.darwin.device_detector import DarwinDeviceDetector - self.assertIsInstance(_get_detector_for_current_os(), DarwinDeviceDetector) + assert isinstance(_get_detector_for_current_os(), DarwinDeviceDetector) @linux_only def test_linux_uses_correct_module(self): from mbed_tools.devices._internal.linux.device_detector import LinuxDeviceDetector - self.assertIsInstance(_get_detector_for_current_os(), LinuxDeviceDetector) + assert isinstance(_get_detector_for_current_os(), LinuxDeviceDetector) + + @mock.patch("platform.system") + def test_raises_when_os_is_unknown(self, platform_system): + os_name = "SomethingNobodyUses" + platform_system.return_value = os_name + + with pytest.raises(UnknownOSError): + _get_detector_for_current_os()
BSD: `mbed-tools detect` causes Python stack trace from mbed_tools/devices ### Description Desired behavior: - `mbed-tools detect` lists out USB-connected boards, or a friendly message if it can't Actual behavior: - `mbed-tools detect` causes a Python stack trace to be printed ``` [mbedtools] [email protected]:~ % mbed-tools detect Traceback (most recent call last): File "/home/patater/venvs/mbedtools/bin/mbed-tools", line 8, in <module> sys.exit(cli()) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 829, in __call__ return self.main(*args, **kwargs) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 782, in main rv = self.invoke(ctx) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/cli/main.py", line 38, in invoke super().invoke(context) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 1259, in invoke return _process_result(sub_ctx.command.invoke(sub_ctx)) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 1066, in invoke return ctx.invoke(self.callback, **ctx.params) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/click/core.py", line 610, in invoke return callback(*args, **kwargs) File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/cli/list_connected_devices.py", line 29, in list_connected_devices connected_devices = get_connected_devices() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/devices.py", line 32, in get_connected_devices for candidate_device in detect_candidate_devices(): File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/detect_candidate_devices.py", line 16, in detect_candidate_devices return detector.find_candidates() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/darwin/device_detector.py", line 40, in find_candidates usb_devices_data = system_profiler.get_end_usb_devices_data() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/darwin/system_profiler.py", line 42, in get_end_usb_devices_data data = get_all_usb_devices_data() File "/home/patater/venvs/mbedtools/lib/python3.7/site-packages/mbed_tools/devices/_internal/darwin/system_profiler.py", line 34, in get_all_usb_devices_data output = subprocess.check_output(["system_profiler", "-xml", "SPUSBDataType"], stderr=subprocess.DEVNULL) File "/usr/local/lib/python3.7/subprocess.py", line 411, in check_output **kwargs).stdout File "/usr/local/lib/python3.7/subprocess.py", line 488, in run with Popen(*popenargs, **kwargs) as process: File "/usr/local/lib/python3.7/subprocess.py", line 800, in __init__ restore_signals, start_new_session) File "/usr/local/lib/python3.7/subprocess.py", line 1551, in _execute_child raise child_exception_type(errno_num, err_msg, err_filename) FileNotFoundError: [Errno 2] No such file or directory: 'system_profiler': 'system_profiler' ``` It looks like something called `darwin/device_detector.py` is being used, but my host OS is not Darwin or macOS. It's just your friendly, neighborhood FreeBSD. I'd guess the OS detection is not very sophisticated. ``` [mbedtools] [email protected]:~ % mbed-tools --version 5.4.0 ``` ### Issue request type - [ ] Enhancement - [X] Bug
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/devices/_internal/test_detect_candidate_devices.py::TestDetectCandidateDevices::test_returns_candidates_using_os_specific_detector", "tests/devices/_internal/test_detect_candidate_devices.py::TestGetDetectorForCurrentOS::test_linux_uses_correct_module", "tests/devices/_internal/test_detect_candidate_devices.py::TestGetDetectorForCurrentOS::test_raises_when_os_is_unknown" ]
[]
{ "failed_lite_validators": [ "has_added_files", "has_many_modified_files" ], "has_test_patch": true, "is_lite": false }
"2021-02-18T11:23:15"
apache-2.0
ARMmbed__mbed-tools-270
diff --git a/news/222.bugfix b/news/222.bugfix new file mode 100644 index 0000000..4bb5df6 --- /dev/null +++ b/news/222.bugfix @@ -0,0 +1,1 @@ +Add support for MBED_ROM_START, MBED_ROM_SIZE, MBED_RAM_START and MBED_RAM_SIZE in config system. diff --git a/src/mbed_tools/build/_internal/config/config.py b/src/mbed_tools/build/_internal/config/config.py index d93cfe4..bb493f2 100644 --- a/src/mbed_tools/build/_internal/config/config.py +++ b/src/mbed_tools/build/_internal/config/config.py @@ -8,7 +8,7 @@ import logging from collections import UserDict from typing import Any, Iterable, Hashable, Callable, List -from mbed_tools.build._internal.config.source import Override, ConfigSetting +from mbed_tools.build._internal.config.source import Memory, Override, ConfigSetting logger = logging.getLogger(__name__) @@ -18,13 +18,15 @@ class Config(UserDict): This object understands how to populate the different 'config sections' which all have different rules for how the settings are collected. - Applies overrides, appends macros and updates config settings. + Applies overrides, appends macros, updates memories, and updates config settings. """ def __setitem__(self, key: Hashable, item: Any) -> None: """Set an item based on its key.""" if key == CONFIG_SECTION: self._update_config_section(item) + elif key == MEMORIES_SECTION: + self._update_memories_section(item) elif key == OVERRIDES_SECTION: self._handle_overrides(item) elif key == MACROS_SECTION: @@ -67,6 +69,20 @@ class Config(UserDict): self.data[CONFIG_SECTION] = self.data.get(CONFIG_SECTION, []) + config_settings + def _update_memories_section(self, memories: List[Memory]) -> None: + defined_memories = self.data.get(MEMORIES_SECTION, []) + for memory in memories: + logger.debug(f"Adding memory settings `{memory.name}: start={memory.start} size={memory.size}`") + prev_defined = next((mem for mem in defined_memories if mem.name == memory.name), None) + if prev_defined is None: + defined_memories.append(memory) + else: + logger.warning( + f"You are attempting to redefine `{memory.name}` from {prev_defined.namespace}.\n" + f"The values from `{memory.namespace}` will be ignored" + ) + self.data[MEMORIES_SECTION] = defined_memories + def _find_first_config_setting(self, predicate: Callable) -> Any: """Find first config setting based on `predicate`. @@ -89,6 +105,7 @@ class Config(UserDict): CONFIG_SECTION = "config" MACROS_SECTION = "macros" +MEMORIES_SECTION = "memories" OVERRIDES_SECTION = "overrides" diff --git a/src/mbed_tools/build/_internal/config/source.py b/src/mbed_tools/build/_internal/config/source.py index 4ad7e37..59d01df 100644 --- a/src/mbed_tools/build/_internal/config/source.py +++ b/src/mbed_tools/build/_internal/config/source.py @@ -28,8 +28,8 @@ def prepare( ) -> dict: """Prepare a config source for entry into the Config object. - Extracts config and override settings from the source. Flattens these nested dictionaries out into lists of - objects which are namespaced in the way the Mbed config system expects. + Extracts memory, config and override settings from the source. Flattens these nested dictionaries out into + lists of objects which are namespaced in the way the Mbed config system expects. Args: input_data: The raw config JSON object parsed from the config file. @@ -46,6 +46,11 @@ def prepare( for key in data: data[key] = _sanitise_value(data[key]) + memories = _extract_memories(namespace, data) + + if memories: + data["memories"] = memories + if "config" in data: data["config"] = _extract_config_settings(namespace, data["config"]) @@ -78,6 +83,31 @@ class ConfigSetting: self.value = _sanitise_value(self.value) +@dataclass +class Memory: + """Representation of a defined RAM/ROM region.""" + + name: str + namespace: str + start: str + size: str + + def __post_init__(self) -> None: + """Convert start and size to hex format strings.""" + try: + self.start = hex(int(self.start, 0)) + except ValueError: + raise ValueError( + f"Value of MBED_{self.name}_START in {self.namespace}, {self.start} is invalid: must be an integer" + ) + try: + self.size = hex(int(self.size, 0)) + except ValueError: + raise ValueError( + f"Value of MBED_{self.name}_SIZE in {self.namespace}, {self.size} is invalid: must be an integer" + ) + + @dataclass class Override: """Representation of a config override. @@ -128,6 +158,27 @@ def _extract_config_settings(namespace: str, config_data: dict) -> List[ConfigSe return settings +def _extract_memories(namespace: str, data: dict) -> List[Memory]: + memories = [] + for mem in ["rom", "ram"]: + start_attr = f"mbed_{mem}_start" + size_attr = f"mbed_{mem}_size" + start = data.get(start_attr) + size = data.get(size_attr) + + if size is not None and start is not None: + logger.debug(f"Extracting MBED_{mem.upper()} definitions in {namespace}: _START={start}, _SIZE={size}.") + + memory = Memory(mem.upper(), namespace, start, size) + memories.append(memory) + elif start is not None or size is not None: + raise ValueError( + f"{size_attr.upper()} and {start_attr.upper()} must be defined together. Only " + f"{'START' if start is not None else 'SIZE'} is defined in the lib {namespace}." + ) + return memories + + def _extract_target_overrides( namespace: str, override_data: dict, allowed_target_labels: Iterable[str] ) -> List[Override]: diff --git a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl index 8fb2119..7fadeb1 100644 --- a/src/mbed_tools/build/_internal/templates/mbed_config.tmpl +++ b/src/mbed_tools/build/_internal/templates/mbed_config.tmpl @@ -75,6 +75,10 @@ set(MBED_CONFIG_DEFINITIONS "-D{{setting_name}}={{value}}" {% endif -%} {%- endfor -%} +{% for memory in memories %} + "-DMBED_{{memory.name}}_START={{memory.start}}" + "-DMBED_{{memory.name}}_SIZE={{memory.size}}" +{%- endfor -%} {% for macro in macros %} "{{macro|replace("\"", "\\\"")}}" {%- endfor %}
ARMmbed/mbed-tools
73fc6ed6fd728beea588e100c2de83c439c29228
diff --git a/tests/build/_internal/config/test_config.py b/tests/build/_internal/config/test_config.py index 980ed4d..c7e2e35 100644 --- a/tests/build/_internal/config/test_config.py +++ b/tests/build/_internal/config/test_config.py @@ -2,10 +2,11 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # +import logging import pytest from mbed_tools.build._internal.config.config import Config -from mbed_tools.build._internal.config.source import prepare, ConfigSetting, Override +from mbed_tools.build._internal.config.source import prepare, ConfigSetting, Memory, Override class TestConfig: @@ -24,6 +25,17 @@ class TestConfig: with pytest.raises(ValueError, match="lib.param already defined"): conf.update(prepare({"config": {"param": {"value": 0}}}, source_name="lib")) + def test_logs_ignore_mbed_ram_repeated(self, caplog): + caplog.set_level(logging.DEBUG) + input_dict = {"mbed_ram_size": "0x80000", "mbed_ram_start": "0x24000000"} + input_dict2 = {"mbed_ram_size": "0x78000", "mbed_ram_start": "0x24200000"} + + conf = Config(prepare(input_dict, source_name="lib1")) + conf.update(prepare(input_dict2, source_name="lib2")) + + assert "values from `lib2` will be ignored" in caplog.text + assert conf["memories"] == [Memory("RAM", "lib1", "0x24000000", "0x80000")] + def test_target_overrides_handled(self): conf = Config( { diff --git a/tests/build/_internal/config/test_source.py b/tests/build/_internal/config/test_source.py index 962315a..b7f4a2a 100644 --- a/tests/build/_internal/config/test_source.py +++ b/tests/build/_internal/config/test_source.py @@ -2,8 +2,10 @@ # Copyright (c) 2020-2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # +import pytest + from mbed_tools.build._internal.config import source -from mbed_tools.build._internal.config.source import Override +from mbed_tools.build._internal.config.source import Memory, Override class TestPrepareSource: @@ -118,3 +120,48 @@ class TestPrepareSource: assert conf["config"][0].value == {"ETHERNET", "WIFI"} assert conf["sectors"] == {0, 2048} assert conf["header_info"] == {0, 2048, "bobbins", "magic"} + + def test_memory_attr_extracted(self): + lib = { + "mbed_ram_size": "0x80000", + "mbed_ram_start": "0x24000000", + "mbed_rom_size": "0x200000", + "mbed_rom_start": "0x08000000", + } + + conf = source.prepare(lib, "lib") + + assert Memory("RAM", "lib", "0x24000000", "0x80000") in conf["memories"] + assert Memory("ROM", "lib", "0x8000000", "0x200000") in conf["memories"] + + def test_memory_attr_converted_as_hex(self): + input_dict = {"mbed_ram_size": "1024", "mbed_ram_start": "0x24000000"} + + conf = source.prepare(input_dict, source_name="lib") + + memory, *_ = conf["memories"] + assert memory.size == "0x400" + + def test_raises_memory_size_not_integer(self): + input_dict = {"mbed_ram_size": "NOT INT", "mbed_ram_start": "0x24000000"} + + with pytest.raises(ValueError, match="_SIZE in lib, NOT INT is invalid: must be an integer"): + source.prepare(input_dict, "lib") + + def test_raises_memory_start_not_integer(self): + input_dict = {"mbed_ram_size": "0x80000", "mbed_ram_start": "NOT INT"} + + with pytest.raises(ValueError, match="_START in lib, NOT INT is invalid: must be an integer"): + source.prepare(input_dict, "lib") + + def test_raises_memory_size_defined_not_start(self): + input_dict = {"mbed_ram_size": "0x80000"} + + with pytest.raises(ValueError, match="Only SIZE is defined"): + source.prepare(input_dict) + + def test_raises_memory_start_defined_not_size(self): + input_dict = {"mbed_ram_start": "0x24000000"} + + with pytest.raises(ValueError, match="Only START is defined"): + source.prepare(input_dict)
MBED_ROM_START and friends unavailable on Mbed CLI2 ### Description <!-- A detailed description of what is being reported. Please include steps to reproduce the problem. Things to consider sharing: - What version of the package is being used (pip show mbed-tools)? - What is the host platform and version (e.g. macOS 10.15.2, Windows 10, Ubuntu 18.04 LTS)? --> On Mbed CLI, the following symbols are generated and passed to compiler, linker, or both: ```sh mbed compile -m NUMAKER_IOT_M487 -t ARM ``` **BUILD/NUMAKER_IOT_M487/ARM/.profile.c**: ``` { "flags": [ ...... "-DMBED_RAM_SIZE=0x28000", "-DMBED_RAM_START=0x20000000", "-DMBED_ROM_SIZE=0x80000", "-DMBED_ROM_START=0x0", ...... ``` **BUILD/NUMAKER_IOT_M487/ARM/.profile.ld**: ``` { "flags": [ ...... "--predefine=\"-DMBED_BOOT_STACK_SIZE=1024\"", "--predefine=\"-DMBED_RAM_SIZE=0x28000\"", "--predefine=\"-DMBED_RAM_START=0x20000000\"", "--predefine=\"-DMBED_ROM_SIZE=0x80000\"", "--predefine=\"-DMBED_ROM_START=0x0\"", ...... ``` But on Mbed CLI2, they are unavailable in `cmake_build/NUMAKER_IOT_M487/develop/ARM/mbed_config.cmake` or elsewhere. ```sh mbed-tools compile -m NUMAKER_IOT_M487 -t ARM ``` ### Issue request type <!-- Please add only one `x` to one of the following types. Do not fill multiple types (split the issue otherwise). For questions please use https://forums.mbed.com/ --> - [ ] Enhancement - [x] Bug ### Mbed/Tool version **mbed-os**: 6.8.0 **mbed-cli**: 1.10.5 **mbed-tools**:: 7.2.1
0
2401580b6f41fe72f1360493ee46e8a842bd04ba
[ "tests/build/_internal/config/test_config.py::TestConfig::test_target_overrides_separate_namespace", "tests/build/_internal/config/test_config.py::TestConfig::test_target_overrides_handled", "tests/build/_internal/config/test_config.py::TestConfig::test_macros_are_appended_to", "tests/build/_internal/config/test_config.py::TestConfig::test_raises_when_trying_to_add_duplicate_config_setting", "tests/build/_internal/config/test_config.py::TestConfig::test_warns_and_skips_override_for_undefined_config_parameter", "tests/build/_internal/config/test_config.py::TestConfig::test_config_updated", "tests/build/_internal/config/test_config.py::TestConfig::test_logs_ignore_mbed_ram_repeated", "tests/build/_internal/config/test_config.py::TestConfig::test_lib_overrides_handled", "tests/build/_internal/config/test_config.py::TestConfig::test_cumulative_fields_can_be_modified", "tests/build/_internal/config/test_config.py::TestConfig::test_ignores_present_option", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_size_defined_not_start", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_start_defined_not_size", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_memory_attr_extracted", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_override_fields_from_lib_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_start_not_integer", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_override_fields_from_target_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_target_overrides_only_collected_for_valid_targets", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_memory_attr_converted_as_hex", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_cumulative_fields_parsed", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_raises_memory_size_not_integer", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_config_fields_from_lib_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_config_fields_from_target_are_namespaced", "tests/build/_internal/config/test_source.py::TestPrepareSource::test_converts_config_setting_value_lists_to_sets" ]
[]
{ "failed_lite_validators": [ "has_hyperlinks", "has_added_files", "has_many_modified_files", "has_many_hunks", "has_pytest_match_arg" ], "has_test_patch": true, "is_lite": false }
"2021-04-13T11:28:54"
apache-2.0

Dataset Summary

SWE-bench Extra is a dataset that can be used to train or evaluate agentic systems specializing in resolving GitHub issues. It is based on the methodology used to build SWE-bench benchmark and includes 6,415 Issue-Pull Request pairs sourced from 1,988 Python repositories.

Dataset Description

The SWE-bench Extra dataset supports the development of software engineering agents capable of autonomously solving GitHub issues. The data collection process, based on the SWE-bench methodology, involves the following steps:

  1. Issue and Pull Request Collection: Issues are gathered and linked with pull requests that successfully resolve them.
  2. Filtering: Instances are filtered based on attributes such as issue descriptions, relevant code paths, and test patches.
  3. Execution-based Validation: The project environments are set up and tests are run to verify that they execute correctly.

For a more detailed description of the data collection process, please refer to our blog post Scaling data collection for training software engineering agents.

As an example use case of this dataset, we’ve used SWE-bench-extra instances to generate a dataset of 80,036 trajectories nebius/swe-agent-trajectories. We’ve then trained an action generator model, that achieves a score of 19.2% on the subset of 50 random instances from the SWE-bench Verified benchmark, representing a 30% relative improvement over its parent model Qwen2.5-72B-Instruct, which scored 14.8%. Further augmenting the action generator with a guided search based on a critic model, also trained on this data, achieves 40.6% on the full SWE-bench Verified benchmark, which is state-of-the-art among agents using solely open-weight models. You can read more about this agent in our blog post, “Leveraging Training and Search for Better Software Engineering Agents”.

How to Use

from datasets import load_dataset
ds = load_dataset('nebius/SWE-bench-extra')

Dataset Statistics

Average, 75th percentile, and maximum values characterizing various attributes of the collected instances. Statistics are micro-averaged without grouping by repository.

Data Type Mean p75 Max
Issue text Length (words) 111.5 146 1,294
Code base Files (Non-test) 71.71 72.00 2,264
Lines (Non-test) 15,163.38 13,777 1,039,288
Gold patch Files edited 2.6 3 7
Lines edited 56 76 300
Tests Fail to Pass 10.94 5 4,941
Total 58.5 49 7,820

Dataset Structure

The dataset contains the following fields. It includes all fields from SWE-bench and adds a meta column, which indicates whether the instance meets the "lite" criteria and, if not, lists the failed validators.

Field name Type Description
instance_id str A formatted instance identifier, usually as repo_owner__repo_name-PR-number.
patch str The gold patch, the patch generated by the PR (minus test-related code), that resolved the issue.
repo str The repository owner/name identifier from GitHub.
base_commit str The commit hash of the repository representing the HEAD of the repository before the solution PR is applied.
hints_text str Comments made on the issue prior to the creation of the solution PR’s first commit creation date.
created_at str The creation date of the pull request.
test_patch str A test-file patch that was contributed by the solution PR.
problem_statement str The issue title and body.
version str Installation version to use for running evaluation.
environment_setup_commit str Commit hash to use for environment setup and installation.
FAIL_TO_PASS str A JSON list of strings that represent the set of tests resolved by the PR and tied to the issue resolution.
PASS_TO_PASS str A JSON list of strings that represent tests that should pass before and after the PR application.
meta str A JSON dictionary indicating whether the instance is lite, along with a list of failed lite validators if it is not.
license str The type of license of the repository.

To execute instances within SWE-bench, you need to provide a default recipe for dependency installation. The constants required for running these instances are described in this constants.py.

License

The dataset is licensed under the Creative Commons Attribution 4.0 license. However, please respect the license of each specific repository on which a particular instance is based. To facilitate this, the license of each repository at the time of the commit is provided for every instance.

Downloads last month
72