id
stringlengths 30
32
| content
stringlengths 139
2.8k
|
|---|---|
codereview_new_python_data_6341
|
def incremental_update_mb_metadata_cache(use_lb_conn: bool):
def cleanup_mbid_mapping_table():
- """ Find msids which are mapped to mbids that are now absent from mb_metadata_cache because
- those have been merged (redirects) or deleted from MB. """
query = """
UPDATE mbid_mapping mm
SET last_updated = 'epoch'
" and flag them to be re-mapped"
def incremental_update_mb_metadata_cache(use_lb_conn: bool):
def cleanup_mbid_mapping_table():
+ """ Find msids which are mapped to mbids that are now absent from mb_metadata_cache
+ because those have been merged (redirects) or deleted from MB and flag such msids
+ to be re-mapped by the mbid mapping writer."""
query = """
UPDATE mbid_mapping mm
SET last_updated = 'epoch'
|
codereview_new_python_data_6342
|
def _get_playable_recommendations_list(mbids_and_ratings_list):
}
}
- These changes are done because to show cover art of a particular listen its recording_mbid and then its release_mbid, caa_id , caa_release_mbid are needed in order to perform the frontend logic in utils.tsx and render it on frontend of the web app.
-
Returns:
OK, now I understand what you meant, thank you.
I think this is too verbose as it is; I suggest the following which should be enough for this docstring:
```suggestion
These fields are required to show cover art on the front-end web app.
```
def _get_playable_recommendations_list(mbids_and_ratings_list):
}
}
+ These fields are required to show cover art on the front-end web app.
Returns:
|
codereview_new_python_data_6343
|
def add_do_not_recommend():
Currently, the supported entity types are ``artist``, ``release``, ``release_group`` and ``recording``. The ``until``
key in the json body is optional. If absent, the don't recommend entry will be added forever. If present, it should
- be an epoch timestamp in seconds denoting the time till which the entity should not be recommended.
:reqheader Authorization: Token <user token>
:statuscode 200: feedback accepted.
Does epoch imply UTC? If so, all good here.
def add_do_not_recommend():
Currently, the supported entity types are ``artist``, ``release``, ``release_group`` and ``recording``. The ``until``
key in the json body is optional. If absent, the don't recommend entry will be added forever. If present, it should
+ be an utc timestamp in seconds denoting the time till which the entity should not be recommended.
:reqheader Authorization: Token <user token>
:statuscode 200: feedback accepted.
|
codereview_new_python_data_6346
|
def run(self, no_swap=False, no_analyze=False):
total_rows = curs.rowcount
log(f"{self.table_name}: fetch {total_rows:,} rows")
while True:
- batch = curs.fetchmany(1000)
if len(batch) == 0:
break
Define a constant?
def run(self, no_swap=False, no_analyze=False):
total_rows = curs.rowcount
log(f"{self.table_name}: fetch {total_rows:,} rows")
while True:
+ batch = curs.fetchmany(BATCH_SIZE)
if len(batch) == 0:
break
|
codereview_new_python_data_6347
|
def msb_transfer_db():
@cli.command()
def generate_playlists():
- """ Generate daily playlists for users after checking timezone settings """
run_daily_jams_troi_bot()
I think this option should have a more descriptive name and desc. We should make it clear that this is an internal function as part of our daily playlist generation, rather than a core function of troi.
def msb_transfer_db():
@cli.command()
def generate_playlists():
+ """ Generate daily playlists for users soon after the new day begins in their timezone. This is an internal LB
+ method and not a core function of troi.
+ """
run_daily_jams_troi_bot()
|
codereview_new_python_data_6348
|
def get_sitewide_fresh_releases(pivot_release_date: date, release_date_window_da
, artist_credit_name
, release_name"""
- psycopg2.extras.register_uuid()
with psycopg2.connect(current_app.config["MB_DATABASE_URI"]) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as curs:
curs.execute(query, (from_date, to_date))
- row = curs.fetchone()
return [FreshRelease(**dict(row)) for row in curs.fetchall()]
Probably register somewhere in app or database engine creation?
def get_sitewide_fresh_releases(pivot_release_date: date, release_date_window_da
, artist_credit_name
, release_name"""
with psycopg2.connect(current_app.config["MB_DATABASE_URI"]) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as curs:
curs.execute(query, (from_date, to_date))
return [FreshRelease(**dict(row)) for row in curs.fetchall()]
|
codereview_new_python_data_6349
|
def get_sitewide_fresh_releases(pivot_release_date: date, release_date_window_da
, artist_credit_name
, release_name"""
- psycopg2.extras.register_uuid()
with psycopg2.connect(current_app.config["MB_DATABASE_URI"]) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as curs:
curs.execute(query, (from_date, to_date))
- row = curs.fetchone()
return [FreshRelease(**dict(row)) for row in curs.fetchall()]
Forgot to remove?
def get_sitewide_fresh_releases(pivot_release_date: date, release_date_window_da
, artist_credit_name
, release_name"""
with psycopg2.connect(current_app.config["MB_DATABASE_URI"]) as conn:
with conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as curs:
curs.execute(query, (from_date, to_date))
return [FreshRelease(**dict(row)) for row in curs.fetchall()]
|
codereview_new_python_data_6350
|
def select_timezone():
try:
update_timezone = str(form.timezone.data)
db_usersetting.set_timezone(current_user.id, update_timezone)
- flash.info("timezone reset")
except DatabaseException:
flash.error("Something went wrong! Unable to update timezone right now.")
return redirect(url_for("profile.info"))
if form.csrf_token.errors:
- flash.error('Cannot update timezone due to error during authentication, please try again later.')
return redirect(url_for('profile.info'))
return render_template(
For this error, lets give fewer details, since that might mean someone is trying to hack us. If that is the case, lets not give then any help. Lets have this say: "Unable to update timezone."
def select_timezone():
try:
update_timezone = str(form.timezone.data)
db_usersetting.set_timezone(current_user.id, update_timezone)
+ flash.info("Your timezone has been saved.")
except DatabaseException:
flash.error("Something went wrong! Unable to update timezone right now.")
return redirect(url_for("profile.info"))
if form.csrf_token.errors:
+ flash.error('Unable to update timezone.')
return redirect(url_for('profile.info'))
return render_template(
|
codereview_new_python_data_6352
|
def select_timezone():
flash.error("Something went wrong! Unable to update timezone right now.")
return redirect(url_for("profile.info"))
- if form.csrf_token.errors:
flash.error('Unable to update timezone.')
return redirect(url_for('profile.info'))
Should likely be form.errors instead.
def select_timezone():
flash.error("Something went wrong! Unable to update timezone right now.")
return redirect(url_for("profile.info"))
+ if form.errors:
flash.error('Unable to update timezone.')
return redirect(url_for('profile.info'))
|
codereview_new_python_data_6354
|
def insert(feedback: Feedback):
with db.engine.connect() as connection, connection.begin():
# delete the existing feedback and then insert new feedback. we cannot use ON CONFLICT DO UPDATE
- # because it is possible for a user to submit the feedback using recording_msid only and then
- # using both recording_msid and recording_mbid at once. Then the ON CONFLICT clause won't work
- # well. We can use partial unique indexes to make it work but there will still be duplicates.
- # therefore, we delete first then insert new feedback in the same transaction.
connection.execute(text(delete_query), params)
connection.execute(text(insert_query), params)
these last few sentences don't appear to be as important as the first ones, if we want to simplify a bit then we can remove them
def insert(feedback: Feedback):
with db.engine.connect() as connection, connection.begin():
# delete the existing feedback and then insert new feedback. we cannot use ON CONFLICT DO UPDATE
+ # because it is possible for a user to submit the feedback using recording_msid only and then using
+ # both recording_msid and recording_mbid at once in which case the ON CONFLICT doesn't work well.
connection.execute(text(delete_query), params)
connection.execute(text(insert_query), params)
|
codereview_new_python_data_6513
|
"-DUSES_P080", # Dallas iButton
"-DUSES_P081", # Cron
"-DUSES_P082", # GPS
-# "-DUSES_P085", # AcuDC24x
"-DUSES_P098", # PWM motor
-# "-DUSES_P100", # Pulse Counter - DS2423
# "-DUSES_P087", # Serial Proxy
# "-DUSES_P094", # CUL Reader
# "-DUSES_P095", # TFT ILI9341
For ESP32 these don't have to be disabled, they will still fit in the build
"-DUSES_P080", # Dallas iButton
"-DUSES_P081", # Cron
"-DUSES_P082", # GPS
+# "-DUSES_P085", # AcuDC24x
"-DUSES_P098", # PWM motor
+# "-DUSES_P100", # Pulse Counter - DS2423
# "-DUSES_P087", # Serial Proxy
# "-DUSES_P094", # CUL Reader
# "-DUSES_P095", # TFT ILI9341
|
codereview_new_python_data_6525
|
"-DUSE_TRIGONOMETRIC_FUNCTIONS_RULES",
"-DUSE_CUSTOM_PROVISIONING",
"-DUSE_SETTINGS_ARCHIVE"
]
I don't think this comment makes sense here, as it suggests the p2p feature is somehow deprecated.
"-DUSE_TRIGONOMETRIC_FUNCTIONS_RULES",
"-DUSE_CUSTOM_PROVISIONING",
+ "-DFEATURE_ESPEASY_P2P=1",
+
"-DUSE_SETTINGS_ARCHIVE"
]
|
codereview_new_python_data_6536
|
def test_minmax_frame(mm, res):
# See issue #3406
DT = dt.Frame(range(5))
assert mm(DT)[0,0] == res
- #assert_equals(DT_minmax, DT.min())
-
#-------------------------------------------------------------------------------
# sum
This line can be safely removed?
def test_minmax_frame(mm, res):
# See issue #3406
DT = dt.Frame(range(5))
assert mm(DT)[0,0] == res
#-------------------------------------------------------------------------------
# sum
|
codereview_new_python_data_6543
|
def get_versions():
versions = []
strip_versions = []
versions.append("Release_1_8_20")
- pre = "Release_1_9_"
for o in shlex.split(raw_versions):
if o.startswith(pre):
- if len(o.replace(pre,"")) == 1 or len(o.replace(pre,"")) == 2:
- strip_versions.append(int(o.replace(pre,"")))
for i in sorted(set(strip_versions)):
- versions.append(pre + str(i))
versions.append("master")
return versions
will this continue to work for future version tags like e.g. `Release_1_10_0` or `Release_2_0_0`?
def get_versions():
versions = []
strip_versions = []
versions.append("Release_1_8_20")
+
+ pre = "Release_"
for o in shlex.split(raw_versions):
if o.startswith(pre):
+ ver = o.split('_')
+ v = int(ver[1])*1000+int(ver[2])
+ if v >= 1009:
+ strip_versions.append(v*1000+int(ver[3]))
for i in sorted(set(strip_versions)):
+ versions.append(pre + str(int((i/1000)/1000)) + "_" + str(int((i/1000) % 1000)) + "_" + str(i%1000))
+
versions.append("master")
+ print(versions)
return versions
|
codereview_new_python_data_6551
|
name='flatbuffers',
version='22.11.23',
license='Apache 2.0',
- license_files='LICENSE.txt',
author='Derek Bailey',
author_email='derekbailey@google.com',
url='https://google.github.io/flatbuffers/',
Can we just reference the license in `../LICENSE.txt` instead of making a copy of it here?
name='flatbuffers',
version='22.11.23',
license='Apache 2.0',
+ license_files='../LICENSE.txt',
author='Derek Bailey',
author_email='derekbailey@google.com',
url='https://google.github.io/flatbuffers/',
|
codereview_new_python_data_6559
|
def glob(path, pattern):
flatc(
NO_INCL_OPTS
- + DART_OPTS
- + ["--go"],
schema="include_test/include_test1.fbs",
include="include_test/sub",
)
flatc(
NO_INCL_OPTS
- + DART_OPTS
- + ["--go"],
schema="include_test/sub/include_test2.fbs",
include="include_test",
)
Why do we need to generate go files for this?
def glob(path, pattern):
flatc(
NO_INCL_OPTS
+ + DART_OPTS,
schema="include_test/include_test1.fbs",
include="include_test/sub",
)
flatc(
NO_INCL_OPTS
+ + DART_OPTS,
schema="include_test/sub/include_test2.fbs",
include="include_test",
)
|
codereview_new_python_data_6701
|
def init_settings(self, jupyter_app, kernel_manager, contents_manager,
log.warning(_("""Alternatively use `%s` when working on the notebook's Javascript and LESS""") % 'npm run build:watch')
warnings.warn(_("The `ignore_minified_js` flag is deprecated and will be removed in Notebook 6.0"), DeprecationWarning)
- log.warning("Notebook version 5 is not longer being maintained. Please upgrade to version 6 or later.")
now = utcnow()
root_dir = contents_manager.root_dir
```suggestion
log.warning("Notebook version 5 is no longer maintained. Please upgrade to version 6 or later.")
```
def init_settings(self, jupyter_app, kernel_manager, contents_manager,
log.warning(_("""Alternatively use `%s` when working on the notebook's Javascript and LESS""") % 'npm run build:watch')
warnings.warn(_("The `ignore_minified_js` flag is deprecated and will be removed in Notebook 6.0"), DeprecationWarning)
+ log.warning("Notebook version 5 is no longer maintained. Please upgrade to version 6 or later.")
now = utcnow()
root_dir = contents_manager.root_dir
|
codereview_new_python_data_6869
|
def leInt(i1, i2): # noqa: N802
def mlEquals( # noqa: N802
term1: KInner,
term2: KInner,
- sort1: Union[str, KSort] = Sorts.K,
- sort2: Union[str, KSort] = Sorts.K,
) -> KApply:
- return KLabel('#Equals', sort1, sort2)(term1, term2)
def mlEqualsTrue(term: KInner) -> KApply: # noqa: N802
I think we should consider renaming these to `sort_args` and `sort_return` or something, instead of `sort1/2`, because people might think it means the sorts of `term1` and `term2`, when it doesn't. Sort1 should be the sort of both arguments, sort2 is the sort that the result is projected into.
Doesn't have to be this PR
def leInt(i1, i2): # noqa: N802
def mlEquals( # noqa: N802
term1: KInner,
term2: KInner,
+ arg_sort: Union[str, KSort] = Sorts.K,
+ sort: Union[str, KSort] = Sorts.K,
) -> KApply:
+ return KLabel('#Equals', arg_sort, sort)(term1, term2)
def mlEqualsTrue(term: KInner) -> KApply: # noqa: N802
|
codereview_new_python_data_6871
|
from ..kast import EMPTY_ATT, KAst, KDefinition, KFlatModuleList, KRequire
from ..kastManip import remove_generated_cells
-from ..ktool import KompileBackend, _kprove
from .kprove_test import KProveTest
```suggestion
from ..ktool.kompile import KompileBackend, _kprove
```
from ..kast import EMPTY_ATT, KAst, KDefinition, KFlatModuleList, KRequire
from ..kastManip import remove_generated_cells
+from ..ktool import KompileBackend
+from ..ktool.kprove import _kprove
from .kprove_test import KProveTest
|
codereview_new_python_data_6872
|
from .kompile import KompileBackend, kompile
-from .kprint import (
- KPrint,
- _kast,
- applied_label_str,
- build_symbol_table,
- indent,
- paren,
- pretty_print_kast,
- unparser_for_production,
-)
-from .kprove import KProve, _kprove
-from .krun import KRun, _krun
We really shouldn't be encouraging the use of private members.
```suggestion
from .kprove import KProve
```
from .kompile import KompileBackend, kompile
+from .kprint import KPrint
+from .kprove import KProve
+from .krun import KRun
|
codereview_new_python_data_6873
|
def __init__(self, definition_dir: Path, use_directory: Optional[Path] = None) -
self.definition_hash = hash_str(self.definition)
def kore_to_kast(self, kore: Kore) -> KAst:
- output = _kast(self.definition_dir, kore.text, input='kore')
return KAst.from_dict(json.loads(output)['term'])
def kast_to_kore(self, kast: KAst) -> Kore:
```suggestion
output = _kast(self.definition_dir, kore.text, input='kore', output='json')
```
def __init__(self, definition_dir: Path, use_directory: Optional[Path] = None) -
self.definition_hash = hash_str(self.definition)
def kore_to_kast(self, kore: Kore) -> KAst:
+ output = _kast(self.definition_dir, kore.text, input='kore', output='json')
return KAst.from_dict(json.loads(output)['term'])
def kast_to_kore(self, kast: KAst) -> Kore:
|
codereview_new_python_data_6874
|
def pretty_print_kast(kast: KAst, symbol_table: SymbolTable, debug=False):
if type(kast) is KProduction:
if 'klabel' not in kast.att and kast.klabel:
kast = kast.update_atts({'klabel': kast.klabel.name})
- sort_str = pretty_print_kast(kast.sort, symbol_table, debug=debug)
- if not kast.items:
- return 'syntax ' + sort_str
- production_str = ' '.join([pretty_print_kast(pi, symbol_table, debug=debug) for pi in kast.items])
att_str = pretty_print_kast(kast.att, symbol_table, debug=debug)
- return 'syntax ' + sort_str + ' ::= ' + production_str + ' ' + att_str
if type(kast) is KSyntaxSort:
sort_str = pretty_print_kast(kast.sort, symbol_table, debug=debug)
att_str = pretty_print_kast(kast.att, symbol_table, debug=debug)
Wouldn't this drop attributes? I think handling attributes is important because we commonly have the `token` attribute on productions like these.
def pretty_print_kast(kast: KAst, symbol_table: SymbolTable, debug=False):
if type(kast) is KProduction:
if 'klabel' not in kast.att and kast.klabel:
kast = kast.update_atts({'klabel': kast.klabel.name})
+ syntax_str = 'syntax ' + pretty_print_kast(kast.sort, symbol_table, debug=debug)
+ if kast.items:
+ syntax_str += ' '.join([pretty_print_kast(pi, symbol_table, debug=debug) for pi in kast.items])
att_str = pretty_print_kast(kast.att, symbol_table, debug=debug)
+ if att_str:
+ syntax_str += ' ' + att_str
+ return syntax_str
if type(kast) is KSyntaxSort:
sort_str = pretty_print_kast(kast.sort, symbol_table, debug=debug)
att_str = pretty_print_kast(kast.att, symbol_table, debug=debug)
|
codereview_new_python_data_6875
|
def pretty_print_kast(kast: KAst, symbol_table: SymbolTable, debug=False):
kast = kast.update_atts({'klabel': kast.klabel.name})
syntax_str = 'syntax ' + pretty_print_kast(kast.sort, symbol_table, debug=debug)
if kast.items:
- syntax_str += ' '.join([pretty_print_kast(pi, symbol_table, debug=debug) for pi in kast.items])
att_str = pretty_print_kast(kast.att, symbol_table, debug=debug)
if att_str:
syntax_str += ' ' + att_str
```suggestion
syntax_str += ' ::= ' + ' '.join([pretty_print_kast(pi, symbol_table, debug=debug) for pi in kast.items])
```
def pretty_print_kast(kast: KAst, symbol_table: SymbolTable, debug=False):
kast = kast.update_atts({'klabel': kast.klabel.name})
syntax_str = 'syntax ' + pretty_print_kast(kast.sort, symbol_table, debug=debug)
if kast.items:
+ syntax_str += ' ::= ' + ' '.join([pretty_print_kast(pi, symbol_table, debug=debug) for pi in kast.items])
att_str = pretty_print_kast(kast.att, symbol_table, debug=debug)
if att_str:
syntax_str += ' ' + att_str
|
codereview_new_python_data_6876
|
def dict(self) -> Dict[str, Any]:
@property
def text(self) -> str:
- return self.name
class Pattern(Kore, ABC):
This change in how `SortApp.text` works is needed in order to get these tests passing, because the frontend does not want to parse sorts with `{}` behind them: https://github.com/runtimeverification/k/issues/2762
def dict(self) -> Dict[str, Any]:
@property
def text(self) -> str:
+ return self.name + ' ' + _braced(sort.text for sort in self.sorts)
class Pattern(Kore, ABC):
|
codereview_new_python_data_6878
|
class PrettyPrintKastTest(TestCase):
(KRule(TRUE), 'rule true\n '),
(KRule(TRUE, ensures=TRUE), 'rule true\n '),
(KRule(TRUE, ensures=KApply('_andBool_', [TRUE, TRUE])), 'rule true\n ensures ( true\n andBool ( true\n ))\n '),
- (Subst({'X': TRUE, 'Y': KApply('_andBool_', [TRUE, TRUE])}), 'X |-> true\nY |-> _andBool_ ( true , true )'),
)
SYMBOL_TABLE: Final[Mapping[str, Callable]] = {}
Seems like a test that belongs in `test_subst.py` instead.
class PrettyPrintKastTest(TestCase):
(KRule(TRUE), 'rule true\n '),
(KRule(TRUE, ensures=TRUE), 'rule true\n '),
(KRule(TRUE, ensures=KApply('_andBool_', [TRUE, TRUE])), 'rule true\n ensures ( true\n andBool ( true\n ))\n '),
)
SYMBOL_TABLE: Final[Mapping[str, Callable]] = {}
|
codereview_new_python_data_6879
|
def unapply(self, term: KInner) -> KInner:
new_term = KRewrite(lhs, rhs).replace(new_term)
return new_term
- def pretty_print(self, kprint) -> str:
- return '\n'.join(key + ' |-> ' + kprint.pretty_print(value) for key, value in self.items())
@final
What do you think about `pretty` or `pretty_str`? For me, `print` suggests side effects.
def unapply(self, term: KInner) -> KInner:
new_term = KRewrite(lhs, rhs).replace(new_term)
return new_term
+ def pretty(self, kprint) -> Iterable[str]:
+ return (key + ' |-> ' + kprint.pretty_print(value) for key, value in self.items())
@final
|
codereview_new_python_data_6880
|
class EdgeLike(ABC):
@abstractmethod
def pretty(self, kprint: KPrint) -> Iterable[str]:
- assert False, 'Must be overridden'
def __lt__(self, other):
if not isinstance(other, KCFG.EdgeLike):
```suggestion
...
```
class EdgeLike(ABC):
@abstractmethod
def pretty(self, kprint: KPrint) -> Iterable[str]:
+ ...
def __lt__(self, other):
if not isinstance(other, KCFG.EdgeLike):
|
codereview_new_python_data_6881
|
def kompile(
hook_namespaces: Iterable[str] = (),
emit_json=False,
concrete_rules: Iterable[str] = (),
- additional_args: Iterable[str] = ()
) -> Path:
check_file_path(main_file)
This is a matter of personal preference, but I like to add the trailing comma to such lists. It is easier to extend, rearrange, etc. this way.
```suggestion
additional_args: Iterable[str] = (),
```
def kompile(
hook_namespaces: Iterable[str] = (),
emit_json=False,
concrete_rules: Iterable[str] = (),
+ additional_args: Iterable[str] = (),
) -> Path:
check_file_path(main_file)
|
codereview_new_python_data_6882
|
def _ml_impl(antecedents: Iterable[KInner], consequents: Iterable[KInner]) -> KI
return mlImplies(antecedent, consequent, Sorts.GENERATED_TOP_CELL)
- def with_constraint(self, new_constraint: KInner) -> 'CTerm':
return CTerm(mlAnd([self.config, new_constraint] + list(self.constraints), Sorts.GENERATED_TOP_CELL))
I'd call it `add_constraint` instead. To me, `with_constraint` suggests "take the configuration, and couple it with the constraint provided."
```suggestion
def add_constraint(self, constraint: KInner) -> 'CTerm':
return CTerm(mlAnd((self.config, constraint) + self.constraints, Sorts.GENERATED_TOP_CELL))
```
def _ml_impl(antecedents: Iterable[KInner], consequents: Iterable[KInner]) -> KI
return mlImplies(antecedent, consequent, Sorts.GENERATED_TOP_CELL)
+ def add_constraint(self, new_constraint: KInner) -> 'CTerm':
return CTerm(mlAnd([self.config, new_constraint] + list(self.constraints), Sorts.GENERATED_TOP_CELL))
|
codereview_new_python_data_6883
|
def syntax_productions(self) -> List[KProduction]:
@staticmethod
def _is_non_free_constructor(label: str) -> bool:
is_cell_map_constructor = label.endswith('CellMapItem') or label.endswith('CellMap_')
- is_builtin_data_constructor = label in ['_Set_', '_List_', '_Map_', 'SetItem', 'ListItem', '_|->_']
return is_cell_map_constructor or is_builtin_data_constructor
@property
```suggestion
is_builtin_data_constructor = label in ['_Set_', '_List_', '_Map_', 'SetItem', 'ListItem', '_|->_']
```
```suggestion
is_builtin_data_constructor = label in {'_Set_', '_List_', '_Map_', 'SetItem', 'ListItem', '_|->_'}
```
def syntax_productions(self) -> List[KProduction]:
@staticmethod
def _is_non_free_constructor(label: str) -> bool:
is_cell_map_constructor = label.endswith('CellMapItem') or label.endswith('CellMap_')
+ is_builtin_data_constructor = label in {'_Set_', '_List_', '_Map_', 'SetItem', 'ListItem', '_|->_'}
return is_cell_map_constructor or is_builtin_data_constructor
@property
|
codereview_new_python_data_6884
|
def resolve(node_id: str) -> str:
for verified_ids in dct.get('verified') or []:
cfg.add_verified(resolve(verified_ids['source']), resolve(verified_ids['target']))
- for alias, id in (dct.get('aliases') or {}).items():
cfg.add_alias(name=alias, id=id)
return cfg
```suggestion
for alias, id in dct.get('aliases', {}).items():
```
def resolve(node_id: str) -> str:
for verified_ids in dct.get('verified') or []:
cfg.add_verified(resolve(verified_ids['source']), resolve(verified_ids['target']))
+ for alias, id in dct.get('aliases', {}).items():
cfg.add_alias(name=alias, id=id)
return cfg
|
codereview_new_python_data_6885
|
def resolve(node_id: str) -> str:
for verified_ids in dct.get('verified') or []:
cfg.add_verified(resolve(verified_ids['source']), resolve(verified_ids['target']))
- for alias, id in (dct.get('aliases') or {}).items():
cfg.add_alias(name=alias, id=id)
return cfg
```suggestion
cfg.add_alias(name=alias, id=resolve(id))
```
def resolve(node_id: str) -> str:
for verified_ids in dct.get('verified') or []:
cfg.add_verified(resolve(verified_ids['source']), resolve(verified_ids['target']))
+ for alias, id in dct.get('aliases', {}).items():
cfg.add_alias(name=alias, id=id)
return cfg
|
codereview_new_python_data_6886
|
def resolve(node_id: str) -> str:
for verified_ids in dct.get('verified') or []:
cfg.add_verified(resolve(verified_ids['source']), resolve(verified_ids['target']))
- for alias, id in (dct.get('aliases') or {}).items():
cfg.add_alias(name=alias, id=id)
return cfg
```suggestion
def alias(self, node_id: str) -> str:
```
def resolve(node_id: str) -> str:
for verified_ids in dct.get('verified') or []:
cfg.add_verified(resolve(verified_ids['source']), resolve(verified_ids['target']))
+ for alias, id in dct.get('aliases', {}).items():
cfg.add_alias(name=alias, id=id)
return cfg
|
codereview_new_python_data_6888
|
def applyByNode(requestContext, seriesList, nodeNum, templateFunction, newName=N
"""
prefixes = set()
for series in seriesList:
- prefix = '.'.join(series.name.split('.')[:nodeNum + 1])
prefixes.add(prefix)
results = []
newContext = requestContext.copy()
```suggestion
if nodeNum > len(nodes):
```
def applyByNode(requestContext, seriesList, nodeNum, templateFunction, newName=N
"""
prefixes = set()
for series in seriesList:
+ nodes = series.name.split('.')
+ if nodeNum >= len(nodes):
+ raise InputParameterError("{} do not contans {} nodes".format(series.name, nodeNum))
+ prefix = '.'.join(nodes[:nodeNum + 1])
prefixes.add(prefix)
results = []
newContext = requestContext.copy()
|
codereview_new_python_data_6889
|
def _decode_stability_pool_event(
event.event_type = HistoryEventType.STAKING
event.event_subtype = HistoryEventSubType.REWARD
event.counterparty = CPT_LIQUITY
- event.notes = f"Collect {event.balance.amount} {event.asset.symbol_or_name()} from liquity's stability pool" # noqa: E501
elif event.asset == self.lusd:
event.event_type = HistoryEventType.STAKING
event.event_subtype = HistoryEventSubType.REMOVE_ASSET
I would use `event.asset.symbol_or_name()` here
def _decode_stability_pool_event(
event.event_type = HistoryEventType.STAKING
event.event_subtype = HistoryEventSubType.REWARD
event.counterparty = CPT_LIQUITY
+ resolved_asset = event.asset.resolve_to_asset_with_symbol()
+ event.notes = f"Collect {event.balance.amount} {resolved_asset.symbol} from liquity's stability pool" # noqa: E501
elif event.asset == self.lusd:
event.event_type = HistoryEventType.STAKING
event.event_subtype = HistoryEventSubType.REMOVE_ASSET
|
codereview_new_python_data_6890
|
-from typing import TYPE_CHECKING
-from rotkehlchen.constants.timing import DATA_UPDATES_REFRESH
-from rotkehlchen.db.updates import LAST_DATA_UPDATES_KEY
from rotkehlchen.serialization.deserialize import deserialize_timestamp
from rotkehlchen.utils.misc import ts_now
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
-def should_check_data_updates(database: 'DBHandler') -> bool:
"""
- Checks if the last time we checked data updates is far enough to trigger
- the process of querying it again.
"""
with database.conn.read_ctx() as cursor:
- cursor.execute('SELECT value FROM settings WHERE name=?', (LAST_DATA_UPDATES_KEY,))
timestamp_in_db = cursor.fetchone()
if timestamp_in_db is None:
return True
last_update_ts = deserialize_timestamp(timestamp_in_db)
- return ts_now() - last_update_ts >= DATA_UPDATES_REFRESH
```suggestion
LAST_EVM_ACCOUNTS_DETECT_KEY = 'last_evm_accounts_detect_ts'
```
+from typing import TYPE_CHECKING, Literal
+
from rotkehlchen.serialization.deserialize import deserialize_timestamp
from rotkehlchen.utils.misc import ts_now
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
+def should_run_periodic_task(
+ database: 'DBHandler',
+ key_name: Literal['last_data_updates_ts', 'last_evm_accounts_detect_ts'],
+ refresh_period: int,
+) -> bool:
"""
+ Checks if enough time has elapsed since the last run of a periodic task in order to run
+ it again.
"""
with database.conn.read_ctx() as cursor:
+ cursor.execute('SELECT value FROM settings WHERE name=?', (key_name,))
timestamp_in_db = cursor.fetchone()
if timestamp_in_db is None:
return True
last_update_ts = deserialize_timestamp(timestamp_in_db)
+ return ts_now() - last_update_ts >= refresh_period
|
codereview_new_python_data_6891
|
-from typing import TYPE_CHECKING
-from rotkehlchen.constants.timing import DATA_UPDATES_REFRESH
-from rotkehlchen.db.updates import LAST_DATA_UPDATES_KEY
from rotkehlchen.serialization.deserialize import deserialize_timestamp
from rotkehlchen.utils.misc import ts_now
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
-def should_check_data_updates(database: 'DBHandler') -> bool:
"""
- Checks if the last time we checked data updates is far enough to trigger
- the process of querying it again.
"""
with database.conn.read_ctx() as cursor:
- cursor.execute('SELECT value FROM settings WHERE name=?', (LAST_DATA_UPDATES_KEY,))
timestamp_in_db = cursor.fetchone()
if timestamp_in_db is None:
return True
last_update_ts = deserialize_timestamp(timestamp_in_db)
- return ts_now() - last_update_ts >= DATA_UPDATES_REFRESH
cute. You guys checked each other's PRs and made the same file? Let's see who will be hit with the conflicts :rofl:
+from typing import TYPE_CHECKING, Literal
+
from rotkehlchen.serialization.deserialize import deserialize_timestamp
from rotkehlchen.utils.misc import ts_now
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
+def should_run_periodic_task(
+ database: 'DBHandler',
+ key_name: Literal['last_data_updates_ts', 'last_evm_accounts_detect_ts'],
+ refresh_period: int,
+) -> bool:
"""
+ Checks if enough time has elapsed since the last run of a periodic task in order to run
+ it again.
"""
with database.conn.read_ctx() as cursor:
+ cursor.execute('SELECT value FROM settings WHERE name=?', (key_name,))
timestamp_in_db = cursor.fetchone()
if timestamp_in_db is None:
return True
last_update_ts = deserialize_timestamp(timestamp_in_db)
+ return ts_now() - last_update_ts >= refresh_period
|
codereview_new_python_data_6892
|
ETH_PROTOCOLS_CACHE_REFRESH = DAY_IN_SECONDS * 3
DATA_UPDATES_REFRESH = DAY_IN_SECONDS
To be consistent with naming perhaps
```suggestion
EVM_ACCOUNTS_DETECTION_REFRESH = DAY_IN_SECONDS
```
ETH_PROTOCOLS_CACHE_REFRESH = DAY_IN_SECONDS * 3
DATA_UPDATES_REFRESH = DAY_IN_SECONDS
+EVM_ACCOUNTS_DETECTION_REFRESH = DAY_IN_SECONDS
|
codereview_new_python_data_6893
|
class WSMessageType(Enum):
PREMIUM_STATUS_UPDATE = auto()
DB_UPGRADE_STATUS = auto()
# Used for evm address migration after new chain integration
- EVM_ADDRESS_MIGRATION = auto()
# Used for when a new token is found and saved via processing evm transactions
NEW_EVM_TOKEN_DETECTED = auto()
DATA_MIGRATION_STATUS = auto()
as I said re-use and generalize the `EVM_ADDRESS_MIGRATION` message. It's the exact same message and the frontend will handle it in the same way.
class WSMessageType(Enum):
PREMIUM_STATUS_UPDATE = auto()
DB_UPGRADE_STATUS = auto()
# Used for evm address migration after new chain integration
+ EVM_ACCOUNTS_DETECTION = auto()
# Used for when a new token is found and saved via processing evm transactions
NEW_EVM_TOKEN_DETECTED = auto()
DATA_MIGRATION_STATUS = auto()
|
codereview_new_python_data_6894
|
class WSMessageType(Enum):
PREMIUM_STATUS_UPDATE = auto()
DB_UPGRADE_STATUS = auto()
# Used for evm address migration after new chain integration
- EVM_ADDRESS_MIGRATION = auto()
# Used for when a new token is found and saved via processing evm transactions
NEW_EVM_TOKEN_DETECTED = auto()
DATA_MIGRATION_STATUS = auto()
re consistency as per the previous comment
```suggestion
EVM_ACCOUNTS_DETECTION = auto()
```
class WSMessageType(Enum):
PREMIUM_STATUS_UPDATE = auto()
DB_UPGRADE_STATUS = auto()
# Used for evm address migration after new chain integration
+ EVM_ACCOUNTS_DETECTION = auto()
# Used for when a new token is found and saved via processing evm transactions
NEW_EVM_TOKEN_DETECTED = auto()
DATA_MIGRATION_STATUS = auto()
|
codereview_new_python_data_6895
|
-from typing import TYPE_CHECKING
-from rotkehlchen.constants.timing import DATA_UPDATES_REFRESH
-from rotkehlchen.db.updates import LAST_DATA_UPDATES_KEY
from rotkehlchen.serialization.deserialize import deserialize_timestamp
from rotkehlchen.utils.misc import ts_now
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
-def should_check_data_updates(database: 'DBHandler') -> bool:
"""
- Checks if the last time we checked data updates is far enough to trigger
- the process of querying it again.
"""
with database.conn.read_ctx() as cursor:
- cursor.execute('SELECT value FROM settings WHERE name=?', (LAST_DATA_UPDATES_KEY,))
timestamp_in_db = cursor.fetchone()
if timestamp_in_db is None:
return True
last_update_ts = deserialize_timestamp(timestamp_in_db)
- return ts_now() - last_update_ts >= DATA_UPDATES_REFRESH
you can limit the type at `key_name`. We know all of them so you can use `Literal`
+from typing import TYPE_CHECKING, Literal
+
from rotkehlchen.serialization.deserialize import deserialize_timestamp
from rotkehlchen.utils.misc import ts_now
if TYPE_CHECKING:
from rotkehlchen.db.dbhandler import DBHandler
+def should_run_periodic_task(
+ database: 'DBHandler',
+ key_name: Literal['last_data_updates_ts', 'last_evm_accounts_detect_ts'],
+ refresh_period: int,
+) -> bool:
"""
+ Checks if enough time has elapsed since the last run of a periodic task in order to run
+ it again.
"""
with database.conn.read_ctx() as cursor:
+ cursor.execute('SELECT value FROM settings WHERE name=?', (key_name,))
timestamp_in_db = cursor.fetchone()
if timestamp_in_db is None:
return True
last_update_ts = deserialize_timestamp(timestamp_in_db)
+ return ts_now() - last_update_ts >= refresh_period
|
codereview_new_python_data_6967
|
# known checksums for different PACE versions. used to validate the download.
checksums = { \
- 'v.2021.2.3.upd2': '8fd1162724d349b930e474927197f20d',
- 'v.2021.4.9': '4db54962fbd6adcf8c18d46e1798ceb5',
- 'v.2021.9.28': 'f98363bb98adc7295ea63974738c2a1b',
- 'v.2021.10.25': 'a2ac3315c41a1a4a5c912bcb1bc9c5cc',
- 'v.2021.10.25.fix': 'e0572de57039d4afedefb25707b6ceae',
'v.2021.10.25.fix2': '32394d799bc282bb57696c78c456e64f',
'v.2022.06.27': '400f0a4b44c1ce64ae47796e6de4bba8'
}
Since the code in `src/ML-PACE` assumes the latest version, you can delete the hashes for the older versions from this table.
# known checksums for different PACE versions. used to validate the download.
checksums = { \
'v.2021.10.25.fix2': '32394d799bc282bb57696c78c456e64f',
'v.2022.06.27': '400f0a4b44c1ce64ae47796e6de4bba8'
}
|
codereview_new_python_data_6969
|
def compat_kw(*args, **kw):
)
elif dtype.kind == "c":
result = st.complex_numbers(
- width=8 * dtype.itemsize, # convert from bytes to bits
**compat_kw(
"min_magnitude",
"max_magnitude",
Sould we do this instead, much like in the float case above?
```suggestion
width=min(8 * dtype.itemsize, 128), # convert from bytes to bits
```
Because right now, [a test](https://github.com/HypothesisWorks/hypothesis/actions/runs/4091754172/jobs/7056042667#step:6:927) fails for the dtype `G`, which is an alias for `complex256`.
def compat_kw(*args, **kw):
)
elif dtype.kind == "c":
result = st.complex_numbers(
+ width=min(8 * dtype.itemsize, 128), # convert from bytes to bits
**compat_kw(
"min_magnitude",
"max_magnitude",
|
codereview_new_python_data_6970
|
def complex_numbers(
the system ``sqrt`` function.
The width argument specifies the maximum number of bits of precision
- required to represent the entire generated complex.
Valid values are 64 or 128, which correspond to the real and imaginary
components having width 32 or 64, respectively.
Passing ``width=64`` will still use the builtin 128-bit
```suggestion
required to represent the entire generated complex number.
```
def complex_numbers(
the system ``sqrt`` function.
The width argument specifies the maximum number of bits of precision
+ required to represent the entire generated complex number.
Valid values are 64 or 128, which correspond to the real and imaginary
components having width 32 or 64, respectively.
Passing ``width=64`` will still use the builtin 128-bit
|
codereview_new_python_data_6971
|
def complex_numbers(
check_type(bool, allow_subnormal, "allow_subnormal")
if width not in (64, 128):
raise InvalidArgument(
- f"Got width={width!r}, but the only valid values are the integers 64 and 128. "
- "Other data types like complex32 or complex256 are not supported."
# For numpy, these types would be supported (but not by CPython):
# https://numpy.org/doc/stable/reference/arrays.scalars.html#complex-floating-point-types
)
```suggestion
f"width={width!r}, but must be 64 or 128 (other complex dtypes "
"such as complex32 or complex256 are not supported)"
```
nit/brevity
def complex_numbers(
check_type(bool, allow_subnormal, "allow_subnormal")
if width not in (64, 128):
raise InvalidArgument(
+ f"width={width!r}, but must be 64 or 128 (other complex dtypes "
+ "such as complex32 or complex256 are not supported)"
# For numpy, these types would be supported (but not by CPython):
# https://numpy.org/doc/stable/reference/arrays.scalars.html#complex-floating-point-types
)
|
codereview_new_python_data_6972
|
import sys
from copy import copy
-from functools import lru_cache
from types import SimpleNamespace
from typing import Tuple
Looks like this import is unused now, once you delete it feel free to merge!
import sys
from copy import copy
from types import SimpleNamespace
from typing import Tuple
|
codereview_new_python_data_6973
|
def xfail(
condition: bool = True,
*,
reason: str = "",
- raises: Union[Type[BaseException], Tuple[Type[BaseException], ...]] = BaseException,
) -> "example":
"""Mark this example as an expected failure, like pytest.mark.xfail().
```suggestion
raises: Union[
Type[BaseException], Tuple[Type[BaseException], ...]
] = BaseException,
```
def xfail(
condition: bool = True,
*,
reason: str = "",
+ raises: Union[
+ Type[BaseException], Tuple[Type[BaseException], ...]
+ ] = BaseException,
) -> "example":
"""Mark this example as an expected failure, like pytest.mark.xfail().
|
codereview_new_python_data_6974
|
def my_hook():
if r in RANDOMS_TO_MANAGE.values():
return
- if not PYPY:
# PYPY does not have `sys.getrefcount`
gc.collect()
if not gc.get_referrers(r):
```suggestion
if not PYPY: # pragma: no branch
```
def my_hook():
if r in RANDOMS_TO_MANAGE.values():
return
+ if not PYPY: # pragma: no branch
# PYPY does not have `sys.getrefcount`
gc.collect()
if not gc.get_referrers(r):
|
codereview_new_python_data_6975
|
def key(name):
if __name__ == "__main__":
# This would be really really annoying to write automated tests for, so I've
# done some manual exploratory testing: `pip install grequests gevent==21.12.0`,
- # delete gevent from the known-ftz set and reverse-alphabetical sort so we land
- # on grequests before gevent, and call print() as desired to observe behavior.
import grequests # noqa
print(identify_ftz_culprits())
```suggestion
# delete gevent from known-ftz set and reverse-alphabetical sort so we land
```
Include a quick example of how to do the reverse-alphabetical sort? I ended up swapping "gevent" and "grequests" if identified in `key()` so `grequests` shows up first lol.
def key(name):
if __name__ == "__main__":
# This would be really really annoying to write automated tests for, so I've
# done some manual exploratory testing: `pip install grequests gevent==21.12.0`,
+ # and call print() as desired to observe behavior.
import grequests # noqa
+ # To test without skipping to a known answer, uncomment the following line and
+ # change the last element of key from `name` to `-len(name)` so that we check
+ # grequests before gevent.
+ ## KNOWN_EVER_CULPRITS = [c for c in KNOWN_EVER_CULPRITS if c != "gevent"]
print(identify_ftz_culprits())
|
codereview_new_python_data_6976
|
with warnings.catch_warnings():
# We ignore all warnings here as many array modules warn on import
warnings.simplefilter("ignore")
- # We go through the steps described in README.md to define `xp_xps_parais`,
- # which contains the array module(s) to be ran against the test suite along
# with their respective strategy namespaces.
if test_xp_option == "default":
xp_and_xps_pairs = [(mock_xp, mock_xps)]
```suggestion
# We go through the steps described in README.md to define `xp_xps_pairs`,
# which contains the array module(s) to be run against the test suite, along
```
with warnings.catch_warnings():
# We ignore all warnings here as many array modules warn on import
warnings.simplefilter("ignore")
+ # We go through the steps described in README.md to define `xp_xps_pairs`,
+ # which contains the array module(s) to be run against the test suite, along
# with their respective strategy namespaces.
if test_xp_option == "default":
xp_and_xps_pairs = [(mock_xp, mock_xps)]
|
codereview_new_python_data_6977
|
def complex_numbers(
it is an error to enable ``allow_infinity``.
``allow_subnormal`` is applied to each part of the complex number
- separately.
The magnitude constraints are respected up to a relative error
of (around) floating-point epsilon, due to implementation via
```suggestion
separately, as for :func:`~hypothesis.strategies.floats`.
```
def complex_numbers(
it is an error to enable ``allow_infinity``.
``allow_subnormal`` is applied to each part of the complex number
+ separately, as for :func:`~hypothesis.strategies.floats`.
The magnitude constraints are respected up to a relative error
of (around) floating-point epsilon, due to implementation via
|
codereview_new_python_data_6978
|
def test_minmax_magnitude_equal(data, mag):
def _is_subnormal(x):
- return -sys.float_info.min < x < sys.float_info.min
@pytest.mark.parametrize(
Not quite - this would incorrectly say that +/- zero is subnormal. I prefer `0 < abs(x) < sys.float_info.min`.
def test_minmax_magnitude_equal(data, mag):
def _is_subnormal(x):
+ return 0 < abs(x) < sys.float_info.min
@pytest.mark.parametrize(
|
codereview_new_python_data_6979
|
def scan(self, timeout=None):
for p, test_case in product(
self.state_paths, self.configuration.test_cases):
log_automotive.info("Scan path %s", p)
- terminate = False if kill_time is None else \
- kill_time <= time.monotonic()
if terminate or self.configuration.stop_event.is_set():
log_automotive.debug(
"Execution time exceeded. Terminating scan!")
```suggestion
terminate = kill_time and kill_time <= time.monotonic()
```
def scan(self, timeout=None):
for p, test_case in product(
self.state_paths, self.configuration.test_cases):
log_automotive.info("Scan path %s", p)
+ terminate = kill_time and kill_time <= time.monotonic()
if terminate or self.configuration.stop_event.is_set():
log_automotive.debug(
"Execution time exceeded. Terminating scan!")
|
codereview_new_python_data_6980
|
class GTPHeader(Packet):
def post_build(self, p, pay):
p += pay
if self.length is None:
tmp_len = len(p) - 4 if self.version == 2 else len(p) - 8
p = p[:2] + struct.pack("!H", tmp_len) + p[4:]
return p
Could you explain how a test on the version fixes your issue?
class GTPHeader(Packet):
def post_build(self, p, pay):
p += pay
if self.length is None:
+ # The message length field is calculated different in GTPv1 and GTPv2. # noqa: E501
+ # For GTPv1 it is defined as the rest of the packet following the mandatory 8-byte GTP header # noqa: E501
+ # For GTPv2 it is defined as the length of the message in bytes excluding the mandatory part of the GTP-C header (the first 4 bytes) # noqa: E501
tmp_len = len(p) - 4 if self.version == 2 else len(p) - 8
p = p[:2] + struct.pack("!H", tmp_len) + p[4:]
return p
|
codereview_new_python_data_6981
|
def itom(x):
def decode_locale_str(x):
# type: (bytes) -> str
return x.decode(encoding=locale.getlocale()[1] or "utf-8", errors="replace")
Could you add a docstring?
def itom(x):
def decode_locale_str(x):
# type: (bytes) -> str
+ """
+ Decode bytes into a string using the system locale.
+ Useful on Windows where it can be unusual (e.g. cp1252)
+ """
return x.decode(encoding=locale.getlocale()[1] or "utf-8", errors="replace")
|
codereview_new_python_data_6982
|
def _flush_inout(self):
def destroy(self):
# type: () -> None
if self.started.locked():
raise ValueError("Can't close running Automaton ! Call stop() beforehand")
self._flush_inout()
Could you add a docstring?
def _flush_inout(self):
def destroy(self):
# type: () -> None
+ """
+ Destroys a stopped Automaton: this cleanups all opened file descriptors.
+ Required on PyPy for instance where the garbage collector behaves differently.
+ """
if self.started.locked():
raise ValueError("Can't close running Automaton ! Call stop() beforehand")
self._flush_inout()
|
codereview_new_python_data_6983
|
def _get_initial_requests(self, **kwargs):
def __reduce__(self): # type: ignore
f, t, d = super(ServiceEnumerator, self).__reduce__() # type: ignore
try:
- for k, v in d["_request_iterators"].items():
d["_request_iterators"][k] = list(v)
except KeyError:
pass
try:
- for k in d["_retry_pkt"].keys():
d["_retry_pkt"][k] = list(self._get_retry_iterator(k))
except KeyError:
pass
```suggestion
for k, v in six.iteritems(d["_request_iterators"]):
```
def _get_initial_requests(self, **kwargs):
def __reduce__(self): # type: ignore
f, t, d = super(ServiceEnumerator, self).__reduce__() # type: ignore
try:
+ for k, v in six.iteritems(d["_request_iterators"]):
d["_request_iterators"][k] = list(v)
except KeyError:
pass
try:
+ for k in d["_retry_pkt"]:
d["_retry_pkt"][k] = list(self._get_retry_iterator(k))
except KeyError:
pass
|
codereview_new_python_data_6984
|
def _get_initial_requests(self, **kwargs):
def __reduce__(self): # type: ignore
f, t, d = super(ServiceEnumerator, self).__reduce__() # type: ignore
try:
- for k, v in d["_request_iterators"].items():
d["_request_iterators"][k] = list(v)
except KeyError:
pass
try:
- for k in d["_retry_pkt"].keys():
d["_retry_pkt"][k] = list(self._get_retry_iterator(k))
except KeyError:
pass
```suggestion
for k in d["_retry_pkt"]:
```
def _get_initial_requests(self, **kwargs):
def __reduce__(self): # type: ignore
f, t, d = super(ServiceEnumerator, self).__reduce__() # type: ignore
try:
+ for k, v in six.iteritems(d["_request_iterators"]):
d["_request_iterators"][k] = list(v)
except KeyError:
pass
try:
+ for k in d["_retry_pkt"]:
d["_retry_pkt"][k] = list(self._get_retry_iterator(k))
except KeyError:
pass
|
codereview_new_python_data_6985
|
class STUNGenericTlv(Packet):
def dispatch_hook(cls, _pkt=None, *args, **kwargs):
if _pkt and len(_pkt) >= 2:
t = struct.unpack("!H", _pkt[:2])[0]
- return _stun_tlv_class.get(t, "STUNGenericTlv")
return cls
def guess_payload_class(self, payload):
```suggestion
return _stun_tlv_class.get(t, cls)
```
class STUNGenericTlv(Packet):
def dispatch_hook(cls, _pkt=None, *args, **kwargs):
if _pkt and len(_pkt) >= 2:
t = struct.unpack("!H", _pkt[:2])[0]
+ return _stun_tlv_class.get(t, cls)
return cls
def guess_payload_class(self, payload):
|
codereview_new_python_data_6986
|
-# SPDX-License-Identifier: GPL-2.0-or-later
# This file is part of Scapy
# See https://scapy.net/ for more information
# Copyright (C) 2018 antoine.torre <torreantoine1@gmail.com>
This should be GPL-2.0-only
+# SPDX-License-Identifier: GPL-2.0-only
# This file is part of Scapy
# See https://scapy.net/ for more information
# Copyright (C) 2018 antoine.torre <torreantoine1@gmail.com>
|
codereview_new_python_data_6987
|
class RandClasslessStaticRoutesField(RandField):
"""
def _fix(self):
- return "%s/%d:%s" % (RandIP(), RandByte(), RandIP())
class ClasslessFieldListField(FieldListField):
```suggestion
return "%s/%d:%s" % (RandIP(), RandNum(0, 32), RandIP())
```
class RandClasslessStaticRoutesField(RandField):
"""
def _fix(self):
+ return "%s/%d:%s" % (RandIP(), RandNum(0, 32), RandIP())
class ClasslessFieldListField(FieldListField):
|
codereview_new_python_data_6988
|
no_restore=False)
# Build the APK
-precommands.execute([])
# Remove the aab files as we don't need them, this saves space
output_dir = const.PUBDIR
Does the build get the `NuGet.config` another way now?
You probably would need at least the dotnet8 feed:
```xml
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<packageSources>
<clear />
<add key="nuget" value="https://api.nuget.org/v3/index.json" />
<add key="dotnet8" value="https://pkgs.dev.azure.com/dnceng/public/_packaging/dotnet8/nuget/v3/index.json" />
</packageSources>
</configuration>
```
no_restore=False)
# Build the APK
+precommands.execute(['--source', 'MauiNuGet.config'])
# Remove the aab files as we don't need them, this saves space
output_dir = const.PUBDIR
|
codereview_new_python_data_6989
|
'''
from shared.const import PUBDIR
from shared.runner import TestTraits, Runner
-from shared.versionmanager import versionsreadjsonfilesaveenv
EXENAME = 'MauiAndroidDefault'
if __name__ == "__main__":
- versionsreadjsonfilesaveenv(rf".\{PUBDIR}\versions.json")
traits = TestTraits(exename=EXENAME,
guiapp='false',
Probably better to follow "standard" naming conventions and use `_` in the method name.
'''
from shared.const import PUBDIR
from shared.runner import TestTraits, Runner
+from shared.versionmanager import versions_read_json_file_save_env
EXENAME = 'MauiAndroidDefault'
if __name__ == "__main__":
+ versions_read_json_file_save_env(rf".\{PUBDIR}\versions.json")
traits = TestTraits(exename=EXENAME,
guiapp='false',
|
codereview_new_python_data_6990
|
import os
import subprocess
-def versionswritejson(versiondict: dict, outputfile = 'versions.json'):
with open(outputfile, 'w') as file:
json.dump(versiondict, file)
-def versionsreadjson(inputfile = 'versions.json'):
with open(inputfile, 'r') as file:
return json.load(file)
-def versionswriteenv(versiondict: dict):
for key, value in versiondict.items():
os.environ[key] = value
-def versionsreadjsonfilesaveenv(inputfile = 'versions.json'):
- versions = versionsreadjson(inputfile)
print(f"Versions: {versions}")
- versionswriteenv(versions)
# Remove the versions.json file if we are in the lab to ensure SOD doesn't pick it up
if "PERFLAB_INLAB" in os.environ and os.environ["PERFLAB_INLAB"] == "1":
os.remove(inputfile)
-def GetVersionFromDllPowershell(dll_path: str):
result = subprocess.run(['powershell', '-Command', rf'Get-ChildItem {dll_path} | Select-Object -ExpandProperty VersionInfo | Select-Object -ExpandProperty ProductVersion'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return result.stdout.decode('utf-8').strip()
Name should be Python-like. You probably had brain in PS mode. :)
import os
import subprocess
+def versions_write_json(versiondict: dict, outputfile = 'versions.json'):
with open(outputfile, 'w') as file:
json.dump(versiondict, file)
+def versions_read_json(inputfile = 'versions.json'):
with open(inputfile, 'r') as file:
return json.load(file)
+def versions_write_env(versiondict: dict):
for key, value in versiondict.items():
os.environ[key] = value
+def versions_read_json_file_save_env(inputfile = 'versions.json'):
+ versions = versions_read_json(inputfile)
print(f"Versions: {versions}")
+ versions_write_env(versions)
# Remove the versions.json file if we are in the lab to ensure SOD doesn't pick it up
if "PERFLAB_INLAB" in os.environ and os.environ["PERFLAB_INLAB"] == "1":
os.remove(inputfile)
+def get_version_from_dll_powershell(dll_path: str):
result = subprocess.run(['powershell', '-Command', rf'Get-ChildItem {dll_path} | Select-Object -ExpandProperty VersionInfo | Select-Object -ExpandProperty ProductVersion'], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
return result.stdout.decode('utf-8').strip()
|
codereview_new_python_data_7102
|
def move_to_element(self, to_element):
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""Move the mouse by an offset of the specified element. Offsets are
- relative to the top-left corner of the element.
:Args:
- to_element: The WebElement to move to.
- - xoffset: X offset to move to.
- - yoffset: Y offset to move to.
"""
self.w3c_actions.pointer_action.move_to(to_element, int(xoffset), int(yoffset))
accidental removal of the docstring prefix here
def move_to_element(self, to_element):
def move_to_element_with_offset(self, to_element, xoffset, yoffset):
"""Move the mouse by an offset of the specified element. Offsets are
+ relative to the in-view center point of the element.
:Args:
- to_element: The WebElement to move to.
+ - xoffset: X offset to move to, as a positive or negative integer.
+ - yoffset: Y offset to move to, as a positive or negative integer.
"""
self.w3c_actions.pointer_action.move_to(to_element, int(xoffset), int(yoffset))
|
codereview_new_python_data_7107
|
def run(args: Tuple[str, str, str]) -> str:
- args: the components of the command being executed.
:Returns: The log string containing the driver location.
"""
- logger.debug(f"Executing selenium manager with: {args}")
completed_proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = completed_proc.stdout.decode("utf-8"), completed_proc.stderr.decode("utf-8")
if completed_proc.returncode:
- raise SeleniumManagerException(f"Selenium Manager exited non zero. {stdout}{stderr}")
else:
# selenium manager exited 0 successfully, parse the executable path from stdout.
return stdout.split("\t")[-1].strip()
I think we need to also rescue this with something like:
```
raise SeleniumManagerException(f"Unsuccessful command executed. {args}; {error}")
```
def run(args: Tuple[str, str, str]) -> str:
- args: the components of the command being executed.
:Returns: The log string containing the driver location.
"""
+ command = " ".join(args)
+ logger.debug(f"Executing: {command}")
completed_proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout = completed_proc.stdout.decode("utf-8").rstrip("\n")
+ stderr = completed_proc.stderr.decode("utf-8").rstrip("\n")
if completed_proc.returncode:
+ raise SeleniumManagerException(f"Selenium manager failed for: {command}. {stderr}")
else:
# selenium manager exited 0 successfully, parse the executable path from stdout.
return stdout.split("\t")[-1].strip()
|
codereview_new_python_data_7108
|
def run(args: Tuple[str, str, str]) -> str:
- args: the components of the command being executed.
:Returns: The log string containing the driver location.
"""
- logger.debug(f"Executing selenium manager with: {args}")
completed_proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- stdout, stderr = completed_proc.stdout.decode("utf-8"), completed_proc.stderr.decode("utf-8")
if completed_proc.returncode:
- raise SeleniumManagerException(f"Selenium Manager exited non zero. {stdout}{stderr}")
else:
# selenium manager exited 0 successfully, parse the executable path from stdout.
return stdout.split("\t")[-1].strip()
want to keep what command was executed in the error message...
def run(args: Tuple[str, str, str]) -> str:
- args: the components of the command being executed.
:Returns: The log string containing the driver location.
"""
+ command = " ".join(args)
+ logger.debug(f"Executing: {command}")
completed_proc = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ stdout = completed_proc.stdout.decode("utf-8").rstrip("\n")
+ stderr = completed_proc.stderr.decode("utf-8").rstrip("\n")
if completed_proc.returncode:
+ raise SeleniumManagerException(f"Selenium manager failed for: {command}. {stderr}")
else:
# selenium manager exited 0 successfully, parse the executable path from stdout.
return stdout.split("\t")[-1].strip()
|
codereview_new_python_data_7120
|
# under the License.
-__version__ = "4.4.4"
Please do not bump the version as we do not know if this will be 4.4.4 or 4.5.0
# under the License.
+__version__ = "4.4.3"
|
codereview_new_python_data_7121
|
from .common.proxy import Proxy # noqa
from .common.keys import Keys # noqa
-__version__ = '4.4.4'
# We need an explicit __all__ because the above won't otherwise be exported.
__all__ = [
Please do not bump the version as we do not know if this will be 4.4.4 or 4.5.0
from .common.proxy import Proxy # noqa
from .common.keys import Keys # noqa
+__version__ = '4.4.3'
# We need an explicit __all__ because the above won't otherwise be exported.
__all__ = [
|
codereview_new_python_data_7124
|
'project_urls': {
'Bug Tracker': 'https://github.com/SeleniumHQ/selenium/issues',
'Changes': 'https://github.com/SeleniumHQ/selenium/blob/trunk/py/CHANGES',
- 'Documentation': 'https://seleniumhq.github.io/selenium/docs/api/py/api.html',
'Source Code': 'https://github.com/SeleniumHQ/selenium/tree/trunk/py',
},
'python_requires': '~=3.7',
is this better pointing at: `https://github.com/SeleniumHQ/selenium/blob/trunk/py/CHANGES` specifically?
'project_urls': {
'Bug Tracker': 'https://github.com/SeleniumHQ/selenium/issues',
'Changes': 'https://github.com/SeleniumHQ/selenium/blob/trunk/py/CHANGES',
+ 'Documentation': 'https://www.selenium.dev/documentation/overview/',
'Source Code': 'https://github.com/SeleniumHQ/selenium/tree/trunk/py',
},
'python_requires': '~=3.7',
|
codereview_new_python_data_7125
|
'project_urls': {
'Bug Tracker': 'https://github.com/SeleniumHQ/selenium/issues',
'Changes': 'https://github.com/SeleniumHQ/selenium/blob/trunk/py/CHANGES',
- 'Documentation': 'https://seleniumhq.github.io/selenium/docs/api/py/api.html',
'Source Code': 'https://github.com/SeleniumHQ/selenium/tree/trunk/py',
},
'python_requires': '~=3.7',
I think this is old docs, tho I'm a little out of the know on the docs atm, maybe it should pointing at: https://www.selenium.dev/documentation/overview/?
'project_urls': {
'Bug Tracker': 'https://github.com/SeleniumHQ/selenium/issues',
'Changes': 'https://github.com/SeleniumHQ/selenium/blob/trunk/py/CHANGES',
+ 'Documentation': 'https://www.selenium.dev/documentation/overview/',
'Source Code': 'https://github.com/SeleniumHQ/selenium/tree/trunk/py',
},
'python_requires': '~=3.7',
|
codereview_new_python_data_7126
|
def __init__(self, host, firefox_profile, firefox_binary=None, timeout=30):
self.binary.launch_browser(self.profile, timeout=timeout)
_URL = "http://%s:%d/hub" % (HOST, PORT)
- super().__init__(URL, keep_alive=True)
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId': sessionId})
should be `_URL`.
def __init__(self, host, firefox_profile, firefox_binary=None, timeout=30):
self.binary.launch_browser(self.profile, timeout=timeout)
_URL = "http://%s:%d/hub" % (HOST, PORT)
+ super().__init__(_URL, keep_alive=True)
def quit(self, sessionId=None):
self.execute(Command.QUIT, {'sessionId': sessionId})
|
codereview_new_python_data_7127
|
def import_devtools(ver):
devtools_path = pathlib.Path(__file__).parents[1].joinpath("devtools")
versions = tuple(f.name for f in devtools_path.iterdir() if f.is_dir())
latest = max((int(x[1:]) for x in versions))
- logger.debug(f"Falling back to loading `devtools`: v{latest}")
devtools = importlib.import_module(f"{base}{latest}")
return devtools
log to `selenium`, not `trio_cdp`.
def import_devtools(ver):
devtools_path = pathlib.Path(__file__).parents[1].joinpath("devtools")
versions = tuple(f.name for f in devtools_path.iterdir() if f.is_dir())
latest = max((int(x[1:]) for x in versions))
+ selenium_logger = logging.getLogger(__name__)
+ selenium_logger.debug(f"Falling back to loading `devtools`: v{latest}")
devtools = importlib.import_module(f"{base}{latest}")
return devtools
|
codereview_new_python_data_7128
|
def test_missing_cdp_devtools_version_falls_back(caplog):
- with caplog.at_level(logging.DEBUG, logger="trio_cdp"):
assert isinstance(import_devtools("will_never_exist"), types.ModuleType)
# assert the fallback occurred successfully offered up a v{n} option.
assert re.match(r"Falling back to loading `devtools`: v\d+", caplog.records[-1].msg) is not None
change to `selenium` hierarchical logger once implemented.
def test_missing_cdp_devtools_version_falls_back(caplog):
+ with caplog.at_level(logging.DEBUG, logger="selenium"):
assert isinstance(import_devtools("will_never_exist"), types.ModuleType)
# assert the fallback occurred successfully offered up a v{n} option.
assert re.match(r"Falling back to loading `devtools`: v\d+", caplog.records[-1].msg) is not None
|
codereview_new_python_data_7236
|
def test_skip(rule_runner: RuleRunner) -> None:
assert not result
-@pytest.mark.skipif(
- not (has_python_version("3.7")), reason="Missing requisite Python"
-)
def test_3rdparty_plugin(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
```suggestion
not has_python_version("3.7"), reason="Missing requisite Python (3.7)"
```
debatable if it's good or bad to have the actual version we're looking for in the skip message.. good: it tells you right away what is missing if you're looking at the test results without having to consult the sources, bad: is we change the condition we also have to change the reason text. The bad can be fixed by extracting the version into a variable at the cost of more LOC.
def test_skip(rule_runner: RuleRunner) -> None:
assert not result
+@pytest.mark.skipif(not (has_python_version("3.7")), reason="Missing requisite Python")
def test_3rdparty_plugin(rule_runner: RuleRunner) -> None:
rule_runner.write_files(
{
|
codereview_new_python_data_7237
|
def _is_default(self, __name: str) -> bool:
assert isinstance(v, OptionsInfo)
return (
self.options.is_default(__name.lstrip("_"))
and resolve_environment_sensitive_option(v.flag_names[0], self) is None
)
Interesting. What was the issue here?
def _is_default(self, __name: str) -> bool:
assert isinstance(v, OptionsInfo)
return (
+ # vars beginning with `_` are exposed as option names with the leading `_` stripped
self.options.is_default(__name.lstrip("_"))
and resolve_environment_sensitive_option(v.flag_names[0], self) is None
)
|
codereview_new_python_data_7238
|
async def _go_search_paths(
AsdfPathString.LOCAL.value: asdf_result.local_tool_paths,
}
expanded: list[str] = []
for s in paths:
if s == "<PATH>":
- expanded.extend(await Get(PathEnvironmentVariable, {})) # noqa: PNT30: Linear search
elif s in special_strings:
special_paths = special_strings[s]
expanded.extend(special_paths)
Optional: You can clear out this `noqa` by fetching `PathEnvironmentVariable` outside of the loop. Obviously repeated `Get`s are only going to arise with pathological inputs, so 🤷
async def _go_search_paths(
AsdfPathString.LOCAL.value: asdf_result.local_tool_paths,
}
+ path_variables = await Get(PathEnvironmentVariable, {})
expanded: list[str] = []
for s in paths:
if s == "<PATH>":
+ expanded.extend(path_variables)
elif s in special_strings:
special_paths = special_strings[s]
expanded.extend(special_paths)
|
codereview_new_python_data_7239
|
def _is_default(self, __name: str) -> bool:
assert isinstance(v, OptionsInfo)
return (
self.options.is_default(__name.lstrip("_"))
and resolve_environment_sensitive_option(v.flag_names[0], self) is None
)
```suggestion
# vars beginning with `_` are exposed as option names with the leading `_` stripped
self.options.is_default(__name.lstrip("_"))
```
def _is_default(self, __name: str) -> bool:
assert isinstance(v, OptionsInfo)
return (
+ # vars beginning with `_` are exposed as option names with the leading `_` stripped
self.options.is_default(__name.lstrip("_"))
and resolve_environment_sensitive_option(v.flag_names[0], self) is None
)
|
codereview_new_python_data_7240
|
class GlobalOptions(BootstrapOptions, Subsystem):
default=[],
)
- work_dir = StrOption(
- advanced=True,
- default="",
- help=softwrap(
- """
- Specs on the command line are relative to the `work_dir`.
-
- Prefix specs with `//` to make them absolute from the build root. By default `work_dir`
- is set to the current working directory when invoking `pants`.
- """
- ),
- )
-
@classmethod
def validate_instance(cls, opts):
"""Validates an instance of global options for cases that are not prohibited via
Why does scie-pants need to set an env var?
class GlobalOptions(BootstrapOptions, Subsystem):
default=[],
)
@classmethod
def validate_instance(cls, opts):
"""Validates an instance of global options for cases that are not prohibited via
|
codereview_new_python_data_7241
|
class GlobalOptions(BootstrapOptions, Subsystem):
default=[],
)
- work_dir = StrOption(
- advanced=True,
- default="",
- help=softwrap(
- """
- Specs on the command line are relative to the `work_dir`.
-
- Prefix specs with `//` to make them absolute from the build root. By default `work_dir`
- is set to the current working directory when invoking `pants`.
- """
- ),
- )
-
@classmethod
def validate_instance(cls, opts):
"""Validates an instance of global options for cases that are not prohibited via
At a glance it's not obvious why we need this option. It looks like it's to communicate the cwd to the daemon, but it's not something the user will ever set, right? In which case could we just use a custom env var with some mangled name instead of an option? That way it won't appear in help output etc.
class GlobalOptions(BootstrapOptions, Subsystem):
default=[],
)
@classmethod
def validate_instance(cls, opts):
"""Validates an instance of global options for cases that are not prohibited via
|
codereview_new_python_data_7242
|
class Lambdex(PythonToolBase):
default_main = ConsoleScript("lambdex")
register_interpreter_constraints = True
- default_interpreter_constraints = ["CPython<4,>=3.7"]
register_lockfile = True
default_lockfile_resource = ("pants.backend.python.subsystems", "lambdex.lock")
I think it would make sense to keep the defaults to line up with what we say we support in `PythonGoogleCloudFunctionRuntimes`.
class Lambdex(PythonToolBase):
default_main = ConsoleScript("lambdex")
register_interpreter_constraints = True
+ default_interpreter_constraints = ["CPython>=3.7,<3.12"]
register_lockfile = True
default_lockfile_resource = ("pants.backend.python.subsystems", "lambdex.lock")
|
codereview_new_python_data_7243
|
class PythonSetup(Subsystem):
def interpreter_constraints(self) -> tuple[str, ...]:
# TODO: In 2.17.0.dev0 we should set the default above to None and tweak the message here
# appropriately.
- if not self.options.is_default("interpreter_constraints"):
warn_or_error(
"2.17.0.dev0",
"the factory default interpreter constraints value",
The not reads backwards. Is this right?
class PythonSetup(Subsystem):
def interpreter_constraints(self) -> tuple[str, ...]:
# TODO: In 2.17.0.dev0 we should set the default above to None and tweak the message here
# appropriately.
+ if self.options.is_default("interpreter_constraints"):
warn_or_error(
"2.17.0.dev0",
"the factory default interpreter constraints value",
|
codereview_new_python_data_7244
|
def help_text(val: str | Callable[[], str]) -> str | Callable[[], str]:
def docstring(doc: str | Callable[[], str]) -> Callable[[Callable[P, R]], Callable[P, R]]:
def wrapper(func: Callable[P, R]) -> Callable[P, R]:
func.__doc__ = strval(doc)
return func
Could you add a ... docstring ... to this function to explain when it should be used?
def help_text(val: str | Callable[[], str]) -> str | Callable[[], str]:
def docstring(doc: str | Callable[[], str]) -> Callable[[Callable[P, R]], Callable[P, R]]:
+ """Use this decorator to provide a dynamic doc-string to a function."""
+
def wrapper(func: Callable[P, R]) -> Callable[P, R]:
func.__doc__ = strval(doc)
return func
|
codereview_new_python_data_7245
|
def is_path_glob(spec: str) -> bool:
"""Check if `spec` should be treated as a `path` glob."""
- return len(spec) > 0 and spec[0].isalnum() or spec[0] in "_.:/*"
def glob_to_regexp(pattern: str, snap_to_path: bool = False) -> str:
```suggestion
return len(spec) > 0 and (spec[0].isalnum() or spec[0] in "_.:/*")
```
Can file names begin with `-` I wonder?
Maybe it would be easier to do: `and spec[0] not in "<[("`. But, that's what the check that this function replaced had. Is there a reason to be more restrictive and not just check for these magic chars?
def is_path_glob(spec: str) -> bool:
"""Check if `spec` should be treated as a `path` glob."""
+ return len(spec) > 0 and (spec[0].isalnum() or spec[0] in "_.:/*")
def glob_to_regexp(pattern: str, snap_to_path: bool = False) -> str:
|
codereview_new_python_data_7246
|
async def resolve_plugins(
internal_only=True,
python=python,
requirements=requirements,
description=f"Resolving plugins: {', '.join(requirements.req_strings)}",
),
)
This was `None` and thats a no-no. However there's no type hint for this param so `mypy` isn't complaining.
async def resolve_plugins(
internal_only=True,
python=python,
requirements=requirements,
+ interpreter_constraints=request.interpreter_constraints or InterpreterConstraints(),
description=f"Resolving plugins: {', '.join(requirements.req_strings)}",
),
)
|
codereview_new_python_data_7247
|
def rules():
- return [*pyenv_rules()]
```suggestion
return pyenv_rules()
```
def rules():
+ return pyenv_rules()
|
codereview_new_python_data_7248
|
async def get_python(
# they should list a more precise IC.
),
)
- specific_python = which_python_result.stdout.decode("ascii").strip()
shim_digest = await Get(
Digest,
Can remove the "ascii" here too
async def get_python(
# they should list a more precise IC.
),
)
+ specific_python = which_python_result.stdout.decode().strip()
shim_digest = await Get(
Digest,
|
codereview_new_python_data_7249
|
def __init__(
object.__setattr__(self, "digest", digest)
object.__setattr__(self, "args", tuple(args))
object.__setattr__(self, "extra_env", FrozenDict(extra_env or {}))
- object.__setattr__(self, "immutable_input_digests", FrozenDict(immutable_input_digests))
- object.__setattr__(self, "append_only_caches", FrozenDict(append_only_caches))
def to_run_in_sandbox_request(self) -> RunInSandboxRequest:
return RunInSandboxRequest(
This was likely cruft from removing `frozen_after_init`
def __init__(
object.__setattr__(self, "digest", digest)
object.__setattr__(self, "args", tuple(args))
object.__setattr__(self, "extra_env", FrozenDict(extra_env or {}))
+ object.__setattr__(self, "immutable_input_digests", FrozenDict(immutable_input_digests or {}))
+ object.__setattr__(self, "append_only_caches", FrozenDict(append_only_caches or {}))
def to_run_in_sandbox_request(self) -> RunInSandboxRequest:
return RunInSandboxRequest(
|
codereview_new_python_data_7250
|
async def run_in_sandbox_request(
runnable_dependencies = execution_environment.runnable_dependencies
extra_env: dict[str, str] = dict(run_request.extra_env or {})
- extra_path = extra_env.get("PATH", None)
- if extra_path is not None:
- del extra_env["PATH"]
extra_sandbox_contents = []
```suggestion
extra_path = extra_env.pop("PATH", None)
```
async def run_in_sandbox_request(
runnable_dependencies = execution_environment.runnable_dependencies
extra_env: dict[str, str] = dict(run_request.extra_env or {})
+ extra_path = extra_env.pop("PATH", None)
extra_sandbox_contents = []
|
codereview_new_python_data_7251
|
async def _prepare_process_request_from_target(
fetch_env_vars=shell_command.get(ShellCommandExtraEnvVarsField).value or (),
append_only_caches=merged_extras.append_only_caches,
supplied_env_var_values=FrozenDict(extra_env),
- immutable_input_digests=FrozenDict(merged_extras.immutable_input_digests),
log_on_process_errors=_LOG_ON_PROCESS_ERRORS,
log_output=shell_command[ShellCommandLogOutputField].value,
)
isn't `merged_extras.immutable_input_digests` immutable already (so no need to wrap in `FrozenDict`?)
async def _prepare_process_request_from_target(
fetch_env_vars=shell_command.get(ShellCommandExtraEnvVarsField).value or (),
append_only_caches=merged_extras.append_only_caches,
supplied_env_var_values=FrozenDict(extra_env),
+ immutable_input_digests=merged_extras.immutable_input_digests,
log_on_process_errors=_LOG_ON_PROCESS_ERRORS,
log_output=shell_command[ShellCommandLogOutputField].value,
)
|
codereview_new_python_data_7252
|
async def export(
env={"PATH": environment.get("PATH", ""), **cmd.extra_env},
run_in_workspace=True,
)
- ipr = await Effect( # noqa: PNT30: requires triage
- InteractiveProcessResult, InteractiveProcess, ip
- )
if ipr.exit_code:
raise ExportError(f"Failed to write {result.description} to {result_dir}")
if result.resolve:
`Effect`s will tend to be inherently sequential: ditto the case in `src/python/pants/core/goals/deploy.py` although it is less obvious there due to indirection. If skipping `await Effect` entirely is possible, that would be handy.
async def export(
env={"PATH": environment.get("PATH", ""), **cmd.extra_env},
run_in_workspace=True,
)
+ ipr = await Effect(InteractiveProcessResult, InteractiveProcess, ip)
if ipr.exit_code:
raise ExportError(f"Failed to write {result.description} to {result_dir}")
if result.resolve:
|
codereview_new_python_data_7253
|
class DistBuildResult:
wheel_config_settings = {wheel_config_settings_str}
sdist_config_settings = {sdist_config_settings_str}
-# Python 2.7 doesn't have the exist_ok arg on os.makedirs(), so we have to emulate it.
-def safe_mkdir(path):
- if not os.path.exists(path):
- parent = os.path.split(path)[0]
- if parent and parent != os.path.sep:
- safe_mkdir(parent)
- os.mkdir(path)
-
-safe_mkdir(dist_dir)
wheel_path = backend.build_wheel(dist_dir, wheel_config_settings) if build_wheel else None
sdist_path = backend.build_sdist(dist_dir, sdist_config_settings) if build_sdist else None
This seems a lot easier to know at a glance is correct: https://github.com/pantsbuild/pex/blob/2693db7c1ec6e1c810126820f250db206391969a/pex/common.py#L271-L275
Bonus, it avoids check and act races. Probably not a concern here though.
class DistBuildResult:
wheel_config_settings = {wheel_config_settings_str}
sdist_config_settings = {sdist_config_settings_str}
+# Python 2.7 doesn't have the exist_ok arg on os.makedirs().
+try:
+ os.makedirs(dist_dir)
+except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+
wheel_path = backend.build_wheel(dist_dir, wheel_config_settings) if build_wheel else None
sdist_path = backend.build_sdist(dist_dir, sdist_config_settings) if build_sdist else None
|
codereview_new_python_data_7254
|
def strval(val: str | Callable[[], str]) -> str:
return val
else:
return val()
This should probably just use a ternary?
def strval(val: str | Callable[[], str]) -> str:
return val
else:
return val()
+
+
+def help_text(val: str | Callable[[], str]) -> str | Callable[[], str]:
+ """Convenience method for defining an optionally lazy-evaluated softwrapped help string.
+
+ This exists because `mypy` does not respect the type hints defined on base `Field` and `Target`
+ classes.
+ """
+ # This can go away when https://github.com/python/mypy/issues/14702 is fixed
+ if isinstance(val, str):
+ return softwrap(val)
+ else:
+ return lambda: softwrap(val()) # type: ignore[operator]
|
codereview_new_python_data_7255
|
class RunShellCommandWorkdirField(StringField):
help = softwrap(
"Sets the current working directory of the command that is `run`. Values that begin with "
"`.` are relative to the directory you are running Pants from. Values that begin with `/` "
- "are from the root of your filesystem."
)
if `workdir` begin with `/` is it not rooted in the project root?
Seems like it would be a bad idea to support having paths outside the project in BUILD files..?
Yea, looking further down, iiuc it's rooted in the build root.
class RunShellCommandWorkdirField(StringField):
help = softwrap(
"Sets the current working directory of the command that is `run`. Values that begin with "
"`.` are relative to the directory you are running Pants from. Values that begin with `/` "
+ "are from your project root."
)
|
codereview_new_python_data_7256
|
class PythonSetup(Subsystem):
advanced=True,
)
tailor_py_typed_targets = BoolOption(
- default=False,
help=softwrap(
"""
If true, add `resource` targets for marker files named `py.typed` with the `tailor` goal.
I followed what we have set for other similar options and set it to `False`
class PythonSetup(Subsystem):
advanced=True,
)
tailor_py_typed_targets = BoolOption(
+ default=True,
help=softwrap(
"""
If true, add `resource` targets for marker files named `py.typed` with the `tailor` goal.
|
codereview_new_python_data_7257
|
def path(self) -> str:
"""Returns the build root for the current workspace."""
if self._root_dir is None:
# Do not remove/change this env var without coordinating with `pantsbuild/scie-pants` as
- # it is being used when running Pants from sources on a repo.
override_buildroot = os.environ.get("PANTS_BUILDROOT_OVERRIDE", None)
if override_buildroot:
self._root_dir = override_buildroot
This part is not true, its being used in all invocations because of legacy oddness. The current sentinels are `pants`, `BUILD_ROOT` and `BUILDROOT` but _not_ `pants.toml`. In a repo without `pants` checked in this means scie-pants needs to get sneaky.
def path(self) -> str:
"""Returns the build root for the current workspace."""
if self._root_dir is None:
# Do not remove/change this env var without coordinating with `pantsbuild/scie-pants` as
+ # it is being used when bootstrapping Pants.
override_buildroot = os.environ.get("PANTS_BUILDROOT_OVERRIDE", None)
if override_buildroot:
self._root_dir = override_buildroot
|
codereview_new_python_data_7258
|
from pants.backend.python.framework.stevedore import python_target_dependencies
from pants.backend.python.framework.stevedore import rules as stevedore_rules
-from pants.backend.python.framework.stevedore import setup_py_kwargs, target_types_rules
-from pants.backend.python.framework.stevedore.target_types import StevedoreExtension
-# TODO: add stevedore_namespaces field to python_sources?
def rules():
return [
- *target_types_rules.rules(),
*stevedore_rules.rules(),
*python_target_dependencies.rules(),
- *setup_py_kwargs.rules(),
]
-
-
-def target_types():
- return [StevedoreExtension]
sounds reasonable. I need to read up more on stevedore as well, though. :)
from pants.backend.python.framework.stevedore import python_target_dependencies
from pants.backend.python.framework.stevedore import rules as stevedore_rules
+from pants.backend.python.framework.stevedore.target_types import StevedoreNamespace
+from pants.build_graph.build_file_aliases import BuildFileAliases
+
+def build_file_aliases():
+ return BuildFileAliases(objects={"stevedore_namespace": StevedoreNamespace})
def rules():
return [
*stevedore_rules.rules(),
*python_target_dependencies.rules(),
]
|
codereview_new_python_data_7259
|
async def infer_stevedore_namespaces_dependencies(
for namespace, entry_points in distribution_entry_points.explicit_modules.items()
for name, entry_point in entry_points.items()
]
- all_module_owners = iter(
- await MultiGet(
- Get(PythonModuleOwners, PythonModuleOwnersRequest(entry_point.module, resolve=None))
- for _, _, _, entry_point, _ in all_module_entry_points
- )
)
module_owners: OrderedSet[Address] = OrderedSet()
for (address, namespace, name, entry_point, explicitly_provided_deps), owners in zip(
`iter` usages that go into named variables scare me a bit (in languages with aliasing), because they can only really safely be used by one consumer. In this case, you only have one consumer, but a refactor could accidentally add another, and it would be tremendously confusing.
In this case, it doesn't actually look like you need the `iter` call at all though... `MultiGet` is guaranteed to return a tuple, which should be fine for `zip`ing?
async def infer_stevedore_namespaces_dependencies(
for namespace, entry_points in distribution_entry_points.explicit_modules.items()
for name, entry_point in entry_points.items()
]
+ all_module_owners = await MultiGet(
+ Get(PythonModuleOwners, PythonModuleOwnersRequest(entry_point.module, resolve=None))
+ for _, _, _, entry_point, _ in all_module_entry_points
)
module_owners: OrderedSet[Address] = OrderedSet()
for (address, namespace, name, entry_point, explicitly_provided_deps), owners in zip(
|
codereview_new_python_data_7260
|
def test_no_expand_when_no_aliases() -> None:
pytest.raises(
CliAliasCycleError,
match=(
- r"CLI alias cycle detected in `\[cli\]\.alias` option:"
+ r"other-alias -> cycle -> other-alias"
),
),
Now there's no space after the colon.. I guess there should be a newline instead?
def test_no_expand_when_no_aliases() -> None:
pytest.raises(
CliAliasCycleError,
match=(
+ r"CLI alias cycle detected in `\[cli\]\.alias` option:\n"
+ r"other-alias -> cycle -> other-alias"
),
),
|
codereview_new_python_data_7261
|
def test_does_not_infer_dependency_when_docker_build_arg_overwrites(
)
tgt = rule_runner.get_target(Address("src/downstream", target_name="image"))
- rule_runner.set_options(["--docker-build-args=BASE_IMAGE=alpine:3.17.0"])
inferred = rule_runner.request(
InferredDependencies,
[InferDockerDependencies(tgt[DockerImageDependenciesField])],
I think you still need to preserve the `env_inherit={"PATH", "PYENV_ROOT", "HOME"},` here..
def test_does_not_infer_dependency_when_docker_build_arg_overwrites(
)
tgt = rule_runner.get_target(Address("src/downstream", target_name="image"))
+ rule_runner.set_options(
+ ["--docker-build-args=BASE_IMAGE=alpine:3.17.0"],
+ env_inherit={"PATH", "PYENV_ROOT", "HOME"},
+ )
inferred = rule_runner.request(
InferredDependencies,
[InferDockerDependencies(tgt[DockerImageDependenciesField])],
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.