id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_11832
self.string = string self.expr = expr - # def analyse_types doesn't exist because nothing needs doing - - def coerce_to_pyobject(self, env): - return self.string def analyse_as_type(self, env): # for compatibility when used as a return_type_node, have this interface too This is dangerous. Calling `coerce_to_pyobject()` is intended to be semantically like `coerce_to(py_object_type)`. Here, both would do entirely different things. self.string = string self.expr = expr + def analyse_types(self, env): + return self # nothing needs doing def analyse_as_type(self, env): # for compatibility when used as a return_type_node, have this interface too
codereview_python_data_11841
closed_orders = [] transactions = [] for sid, asset_orders in iteritems(self.open_orders): - asset = self.asset_finder.retrieve_asset(sid) - for order, txn in self.slippage_func(bar_data, asset, asset_orders): direction = math.copysign(1, txn.amount) per_share, total_commission = self.commission.calculate(txn) txn.price += per_share * direction This is making a SQL Query per order. You probably want `retrieve_all` to do this in one query. closed_orders = [] transactions = [] + assets = self.asset_finder.retrieve_all(self.open_orders.keys()) + asset_dict = {asset.sid: asset for asset in assets} + for sid, asset_orders in iteritems(self.open_orders): + asset = asset_dict[sid] + + for order, txn in \ + self.slippage_func(bar_data, asset, asset_orders): direction = math.copysign(1, txn.amount) per_share, total_commission = self.commission.calculate(txn) txn.price += per_share * direction
codereview_python_data_11844
assert before_resp["result"][DATA] == after_resp["result"][DATA] def test_reject_with_unacceptable_role_in_constraint(looper, sdk_wallet_trustee, sdk_pool_handle): Why do we remove `CONSTRAINT` here? assert before_resp["result"][DATA] == after_resp["result"][DATA] +def test_reject_with_empty_rules_list(looper, + sdk_wallet_trustee, + sdk_pool_handle): + with pytest.raises(RequestNackedException, + match="InvalidClientRequest.*length should be at least 1"): + sdk_send_and_check_auth_rules_request(looper, + sdk_wallet_trustee, + sdk_pool_handle, + rules=[]) + + def test_reject_with_unacceptable_role_in_constraint(looper, sdk_wallet_trustee, sdk_pool_handle):
codereview_python_data_11849
file_dict["path"] = path mime = mimetypes.guess_type(file_dict["path"])[0] or "application/octet-stream" - file_dict["mime-type"] = mime self.content_encoding = self.config.get('content-encoding', None) This changes the behavior. Original behavior was "set if not set", while new is "just set". file_dict["path"] = path mime = mimetypes.guess_type(file_dict["path"])[0] or "application/octet-stream" + file_dict.get("mime-type", mime, force_set=True) self.content_encoding = self.config.get('content-encoding', None)
codereview_python_data_11852
numerical stability. Default 1e-7. Returns: - AssignResult: The assigned result. """ assert gt_bboxes_ignore is None, \ 'Only case when gt_bboxes_ignore is None is supported.' ```python :obj:`AssignResult`: ``` numerical stability. Default 1e-7. Returns: + :obj:`AssignResult`: The assigned result. """ assert gt_bboxes_ignore is None, \ 'Only case when gt_bboxes_ignore is None is supported.'
codereview_python_data_11853
# major version and the minor version in the prebuilt plugin is lower than the requested one. self.can_install_prebuilt = not self.always_build and \ bool(self.tf_compiler) and \ - (StrictVersion(self.default_cpp_version) >= StrictVersion('5.0') and \ - StrictVersion(self.tf_compiler) >= StrictVersion('5.0')) and \ self.is_compatible_with_prebuilt_bin self.prebuilt_plugins_available = [] self.prebuilt_plugin_best_match = None Why you check the system compiler here? We don't need that to check prebuild. Only the second check is needed (regarding TF compiler version). # major version and the minor version in the prebuilt plugin is lower than the requested one. self.can_install_prebuilt = not self.always_build and \ bool(self.tf_compiler) and \ + StrictVersion(self.tf_compiler) >= StrictVersion('5.0') and \ self.is_compatible_with_prebuilt_bin self.prebuilt_plugins_available = [] self.prebuilt_plugin_best_match = None
codereview_python_data_11856
for apkg, advisory, installed in apkg_adv_insts: advisories.add(advisory2info(advisory, installed)) - for advisory in sorted(advisories, key=lambda x: x.lower()): - print(advisory) - print() This will create an emty line at the end. What about: print("\n".join(sorted(advisories, key=lambda x: x.lower())) for apkg, advisory, installed in apkg_adv_insts: advisories.add(advisory2info(advisory, installed)) + print("\n\n".join(sorted(advisories, key=lambda x: x.lower())))
codereview_python_data_11867
assert str(dt.min(f.A)) == str(f.A.min()) assert str(dt.min(f[:])) == str(f[:].min()) DT = dt.Frame(A=[2, 3, 5, 5, 9, -1, 2.2]) - assert_equals(DT[:, f.A.min()], DT[:, dt.min(f.A)]) \ No newline at end of file Newline could be again useful here. Btw, normally there is a setting in the editor that could make this automatic. assert str(dt.min(f.A)) == str(f.A.min()) assert str(dt.min(f[:])) == str(f[:].min()) DT = dt.Frame(A=[2, 3, 5, 5, 9, -1, 2.2]) \ No newline at end of file + assert_equals(DT[:, f.A.min()], DT[:, dt.min(f.A)])
codereview_python_data_11869
'locale/*/LC_MESSAGES/electrum.mo', ] }, - scripts=['electrum', 'privkey2electrum'], data_files=data_files, description="Lightweight Bitcoin Wallet", author="Thomas Voegtlin", Leave this out and move the script to the scripts folder. 'locale/*/LC_MESSAGES/electrum.mo', ] }, + scripts=['electrum'], data_files=data_files, description="Lightweight Bitcoin Wallet", author="Thomas Voegtlin",
codereview_python_data_11873
'artist_name': data['a{}'.format(append_key)], 'track_name': data['t{}'.format(append_key)], 'release_name': data['b{}'.format(append_key)], - 'additional_info': { - 'source': data.get('o{}'.format(append_key), '') - } } } except KeyError: This may add an empty key at times, which I would normally object to, but we don't actually store this data for a now listening request, right? 'artist_name': data['a{}'.format(append_key)], 'track_name': data['t{}'.format(append_key)], 'release_name': data['b{}'.format(append_key)], + 'additional_info': {} } } except KeyError:
codereview_python_data_11877
r'.*libc\+\+\.so', r'.*libc\+\+_shared\.so', r'.*libstdc\+\+\.so', - r'.*libc-\d+\.\d+(?:\.\d+)?\.so', ] IGNORE_CRASH_TYPES_FOR_ABRT_BREAKPOINT_AND_ILLS = [ Isnt this too specific, how about just i think libc-.*\.so should be better? r'.*libc\+\+\.so', r'.*libc\+\+_shared\.so', r'.*libstdc\+\+\.so', + r'.*libc-.*\.so', ] IGNORE_CRASH_TYPES_FOR_ABRT_BREAKPOINT_AND_ILLS = [
codereview_python_data_11879
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4476-SEA 1645523397 1698945951</p> <hr> <p>Varnish cache server</p> </body> This is not a standard library, and should be move down below, and separated from the standard libraries by a newline. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4454-SEA 1645523397 2001672727</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_11888
goal_state_fetched = True except Exception as e: msg = u"Exception retrieving the goal state: {0}".format(ustr(traceback.format_exc())) - logger.warn(msg) add_event(AGENT_NAME, op=WALAEventOperation.FetchGoalState, version=CURRENT_VERSION, is_success=False, message=msg) if goal_state_fetched: Wouldn't `add_event` also log the message if `is_success=False`? goal_state_fetched = True except Exception as e: msg = u"Exception retrieving the goal state: {0}".format(ustr(traceback.format_exc())) add_event(AGENT_NAME, op=WALAEventOperation.FetchGoalState, version=CURRENT_VERSION, is_success=False, message=msg) if goal_state_fetched:
codereview_python_data_11897
self.selinux = None self.disable_route_warning = False self.jit_enabled = False @staticmethod def get_service_name(): It'll be better to defile service_name here, rather than defining in all the child classes. ```suggestion self.service_name = self.get_service_name() ``` self.selinux = None self.disable_route_warning = False self.jit_enabled = False + self.service_name = self.get_service_name() @staticmethod def get_service_name():
codereview_python_data_11899
self.phases_enabled = False while not await self.is_closeable(): await asyncio.sleep(10) - await self.update_operation(services) # TODO: Call no longer needed if method continues to just have an agent update await self._run_phases(services, planner) await self._cleanup_operation(services) await self.close() confirmed. remove these 2 lines. self.phases_enabled = False while not await self.is_closeable(): await asyncio.sleep(10) await self._run_phases(services, planner) await self._cleanup_operation(services) await self.close()
codereview_python_data_11901
headers = {'content-type': 'application/xhtml+xml'} content_str = _list_dead_letter_source_queues(QUEUE_ATTRIBUTES, queue_url) - new_response = Response() - new_response.status_code = 200 - new_response.headers = headers - new_response._content = content_str - new_response.headers['content-length'] = len(new_response._content) - - return new_response if 'QueueName' in req_data: encoded_data = urlencode(req_data, doseq=True) if method == 'POST' else '' we can simplify lines 234-238 by adding an import at the top of the file and using `requests_response`: ``` from localstack.utils.aws.aws_responses import requests_response ... return requests_response(content_str, headers=headers) ``` headers = {'content-type': 'application/xhtml+xml'} content_str = _list_dead_letter_source_queues(QUEUE_ATTRIBUTES, queue_url) + return requests_response(content_str, headers=headers) if 'QueueName' in req_data: encoded_data = urlencode(req_data, doseq=True) if method == 'POST' else ''
codereview_python_data_11902
"""Make an educated guess as to whether it would be appropriate to print the blob. - The current rules are this will print if: 1. The output from Hypothesis appears to be unsuitable for use with :func:`~hypothesis.example`. Clarify that output is printed if both rules match: :func:`~hypothesis.example`, and """Make an educated guess as to whether it would be appropriate to print the blob. + The current rules are that this will print if both: 1. The output from Hypothesis appears to be unsuitable for use with :func:`~hypothesis.example`.
codereview_python_data_11904
min_y = np.maximum(0, min_y + offset) max_y = np.minimum(h, max_y + offset) - # clip box - min_x = np.clip(min_x, 0, w) - min_y = np.clip(min_y, 0, h) - max_x = np.clip(max_x, 0, w) - max_y = np.clip(max_y, 0, h) # the boxs translated outside of image will be filtered along with # the corresponding masks, by invoking ``_filter_invalid``. results[key] = np.concatenate([min_x, min_y, max_x, max_y], It seems `clip bbox` is redundant? Since already use `np.maximum` and `np.minimum` min_y = np.maximum(0, min_y + offset) max_y = np.minimum(h, max_y + offset) # the boxs translated outside of image will be filtered along with # the corresponding masks, by invoking ``_filter_invalid``. results[key] = np.concatenate([min_x, min_y, max_x, max_y],
codereview_python_data_11909
for r in Selection.unfold_entities(m, "R"): self.assertEqual(str(r), "R") if r in fm: - print(fm[r]) if __name__ == '__main__': runner = unittest.TextTestRunner(verbosity=2) You still have a print statement here. Again, can you use something like ``self.assertEqual(fm[r], expected_value)`` which might be tricky given this is in a loop. for r in Selection.unfold_entities(m, "R"): self.assertEqual(str(r), "R") if r in fm: + self.assertTrue(str(fm[r]).startswith("<Fragment length=5 id=")) if __name__ == '__main__': runner = unittest.TextTestRunner(verbosity=2)
codereview_python_data_11912
filtermodel = sortfilter.CompletionFilterModel(model, parent=completionview) completionview.set_model(filtermodel) - direction = 'prev' if count < 0 else 'next' - for _ in range(abs(count)): - completionview.completion_item_focus(direction) idx = completionview.selectionModel().currentIndex() assert filtermodel.data(idx) == expected Why remove this? It seems like we should keep this around as a regression test, unless we can guarantee this will never be called without a model set (does your new code guarantee that?) filtermodel = sortfilter.CompletionFilterModel(model, parent=completionview) completionview.set_model(filtermodel) + for _ in range(count): + completionview.completion_item_focus(which) idx = completionview.selectionModel().currentIndex() assert filtermodel.data(idx) == expected
codereview_python_data_11913
return execute_resource_action(resource_id, resources, stack_name, ACTION_DELETE) -def update_dynamodb_index_resource(resource): - if resource.get('Properties').get('BillingMode') == 'PAY_PER_REQUEST': - for index_iterator in range(0, len(resource.get('Properties').get('GlobalSecondaryIndexes'))): - if not resource['Properties']['GlobalSecondaryIndexes'][index_iterator].get('ProvisionedThroughput'): - resource['Properties']['GlobalSecondaryIndexes'][index_iterator]['ProvisionedThroughput'] = \ - {'ReadCapacityUnits': 99, 'WriteCapacityUnits': 99} - - def execute_resource_action(resource_id, resources, stack_name, action_name): resource = resources[resource_id] resource_type = get_resource_type(resource) nitpick: Could this be simplified to: ``` for glob_index in resource.get('Properties', {}).get('GlobalSecondaryIndexes', []): if not glob_index.get('ProvisionedThroughput'): glob_index['ProvisionedThroughput'] = {'ReadCapacityUnits': 99, 'WriteCapacityUnits': 99} ``` return execute_resource_action(resource_id, resources, stack_name, ACTION_DELETE) def execute_resource_action(resource_id, resources, stack_name, action_name): resource = resources[resource_id] resource_type = get_resource_type(resource)
codereview_python_data_11920
url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers("cloudwatch") - authorization = mock_aws_request_headers("monitoring")["Authorization"] headers.update( { ```suggestion authorization = aws_stack.mock_aws_request_headers("monitoring")["Authorization"] ``` just for consistency with L65 url = config.get_edge_url() headers = aws_stack.mock_aws_request_headers("cloudwatch") + authorization = aws_stack.mock_aws_request_headers("monitoring")["Authorization"] headers.update( {
codereview_python_data_11921
expect=["Nym {} added".format(idr)]) -def test_send_same_nyms_fails_when_batched( be, do, poolNodesStarted, newStewardCli): be(newStewardCli) Why the tests is called `fails`? It looks like the test checks a positive case. expect=["Nym {} added".format(idr)]) +def test_send_same_nyms_only_first_gets_written( be, do, poolNodesStarted, newStewardCli): be(newStewardCli)
codereview_python_data_11923
from localstack.utils.aws import aws_stack from localstack.utils.common import short_uid, to_str, save_file, TMP_FILES, mkdir from localstack.utils.tagging import TaggingService EVENTS_TMP_DIR = os.path.join(config.TMP_FOLDER, 'cw_events') nit: we could put this on line 24, to reduce duplication a bit (with lines 34, 38, and 43). For `POST /` requests, we can safely assume that the payload needs to be valid JSON. from localstack.utils.aws import aws_stack from localstack.utils.common import short_uid, to_str, save_file, TMP_FILES, mkdir from localstack.utils.tagging import TaggingService +from localstack.constants import APPLICATION_AMZ_JSON_1_1 EVENTS_TMP_DIR = os.path.join(config.TMP_FOLDER, 'cw_events')
codereview_python_data_11935
concat_data = obj.interface.concatenate([dense_data, obj], datatype=[dtype]) reindexed = concat_data.reindex([xdim, ydim], vdims) if pd: - df = reindexed.dframe(copy=False) df = df.groupby([xdim, ydim], sort=False).first().reset_index() agg = reindexed.clone(df) else: Why not use ``reindexed.interface.dframe(dimensions=None, copy=False)`` instead of exposing the copy keyword argument at the element level? For ``copy=False`` to work, you are already assuming a dataframe type interface is being used... concat_data = obj.interface.concatenate([dense_data, obj], datatype=[dtype]) reindexed = concat_data.reindex([xdim, ydim], vdims) if pd: + df = PandasInterface.as_dframe(reindexed) df = df.groupby([xdim, ydim], sort=False).first().reset_index() agg = reindexed.clone(df) else:
codereview_python_data_11936
# Don't prefix with project name if it's the default project. return binary - seperator = '/' if project.startswith('/') else '_' - project_prefix = project + seperator if binary.startswith(project_prefix): return binary nit: correct spelling is "separator" # Don't prefix with project name if it's the default project. return binary + # E.g. for project names that are in the form of paths like: + # //third_party/llvm, the result will be //third_party/llvm:fuzz_target + separator = ':' if project.startswith('/') else '_' + project_prefix = project + separator if binary.startswith(project_prefix): return binary
codereview_python_data_11939
e_val, e_vec = np.linalg.eig(atomgroup.moment_of_inertia(pbc=pbc)) # Sort - indices = np.argsort(e_val[::-1]) # Return transposed in more logical form. See Issue 33. return e_vec[:, indices].T Reverse the indices, not the e_val because according to the docs for `linalg.eig`, **The eigenvalues are not necessarily ordered.** ```python indices = np.argsort(e_val)[::-1] ``` e_val, e_vec = np.linalg.eig(atomgroup.moment_of_inertia(pbc=pbc)) # Sort + indices = np.argsort(e_val)[::-1] # Return transposed in more logical form. See Issue 33. return e_vec[:, indices].T
codereview_python_data_11940
self.logger.info("Update extension [{0}]".format(update_cmd)) self.launch_command(update_cmd, timeout=900, - extension_error_code=1008, env={'VERSION': version}, handler_configuration=handler_configuration) except ExtensionError: Is this error code not parametric? If not, can you create a descriptive parameter? self.logger.info("Update extension [{0}]".format(update_cmd)) self.launch_command(update_cmd, timeout=900, + extension_error_code=ExtensionErrorCodes.PluginUpdateProcessingFailed, env={'VERSION': version}, handler_configuration=handler_configuration) except ExtensionError:
codereview_python_data_11942
return fastquery.FastQuery(self.connection) def get_validators(self, height=None): - result = list(backend.query.get_validator_set(self.connection, height))[0] validators = result['validators'] for v in validators: v.pop('address') It is cumbersome to do `list(..)[0]` everywhere taken into account it always contains a single validator set. What do you think about returning the validators from `get_validator_set`? So that callers just do: ``` validator_set = get_validator_set(..) for v in validator_set['validators']: ... ``` return fastquery.FastQuery(self.connection) def get_validators(self, height=None): + result = backend.query.get_validator_set(self.connection, height) validators = result['validators'] for v in validators: v.pop('address')
codereview_python_data_11943
else: scroll_pos = '{:2}%'.format(y) - fields['scroll_pos'] = '{}'.format(scroll_pos) fmt = config.get('tabs', 'title-format') self.tabBar().setTabText(idx, fmt.format(**fields)) This should be `str(scroll_pos)` instead (they do the same, but it's more idiomatic) else: scroll_pos = '{:2}%'.format(y) + fields['scroll_pos'] = str(scroll_pos) fmt = config.get('tabs', 'title-format') self.tabBar().setTabText(idx, fmt.format(**fields))
codereview_python_data_11944
expected = assumed_iteration + (params_fit['early_stopping_rounds'] if eval_set_name != 'training' and assumed_iteration != gbm.n_estimators else 0) - assert expected == actual - assert (assumed_iteration - if eval_set_name != 'training' else gbm.n_estimators == gbm.best_iteration_) X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42) Please make this assert more readable by splitting into two asserts. ```suggestion if eval_set_name != 'training': assert assumed_iteration == gbm.best_iteration_ else: assert gbm.n_estimators == gbm.best_iteration_ ``` expected = assumed_iteration + (params_fit['early_stopping_rounds'] if eval_set_name != 'training' and assumed_iteration != gbm.n_estimators else 0) + if eval_set_name != 'training': + assert assumed_iteration == gbm.best_iteration_ + else: + assert gbm.n_estimators == gbm.best_iteration_ X, y = load_boston(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
codereview_python_data_11957
This matrix is symmetric with zeros on the diagonal. .. versionchanged:: 1.0.0 - ``save()`` method has been removed. You can use ``np.save`` on - :attr:`dist_matrix` instead. """ def __init__(self, u, select='all', metric=rmsd, cutoff=1E0-5, ``np.save`` --> ``np.save()`` for consistency? This matrix is symmetric with zeros on the diagonal. .. versionchanged:: 1.0.0 + ``save()`` method has been removed. You can use ``np.save()`` on + :attr:`DistanceMatrix.dist_matrix` instead. """ def __init__(self, u, select='all', metric=rmsd, cutoff=1E0-5,
codereview_python_data_11961
traceback.print_exc() async def generate_operation_report(self, op_id, save=False): - operations = await self.get_service('data_svc').explode_operation(dict(id=op_id)) - operation = operations[0] operation['result'] = [] for link in operation['chain']: results = await self.get_service('data_svc').explode_results(criteria=dict(link_id=link['id'])) let's make this: operations = (await ...)[0] traceback.print_exc() async def generate_operation_report(self, op_id, save=False): + operation = (await self.get_service('data_svc').explode_operation(dict(id=op_id)))[0] operation['result'] = [] for link in operation['chain']: results = await self.get_service('data_svc').explode_results(criteria=dict(link_id=link['id']))
codereview_python_data_11964
more. """ class DFrame(PandasDFrame): """ I think it looks better if you add `pass` as well. more. """ + pass + class DFrame(PandasDFrame): """
codereview_python_data_11965
"Policy": data.get("Policy"), "Region": region, "Description": data.get("Description"), - "Arn": "arn:aws:kms:%s:000000000000:key/%s" % (region, key_id), "_key_": key, } region_details.key_pairs[key_id] = result Let's better use the `kms_key_arn()` util function from `aws_stack.py` here. (We should avoid hardcoding the account ID `000000000000`, as it may change over time) "Policy": data.get("Policy"), "Region": region, "Description": data.get("Description"), + "Arn": aws_stack.kms_key_arn(key_id), "_key_": key, } region_details.key_pairs[key_id] = result
codereview_python_data_11966
# initialize, if not done yet if not hasattr(plugin, "_initialized"): - LOG.debug("Initializing Lambda executor plugin %s" % plugin.__class__) plugin.initialize() plugin._initialized = True nit: better to use variadic arguments with loggers (replace `%` with a `,`) # initialize, if not done yet if not hasattr(plugin, "_initialized"): + LOG.debug("Initializing Lambda executor plugin %s", plugin.__class__) plugin.initialize() plugin._initialized = True
codereview_python_data_11967
started=worker.started, state=worker.state, first_task_display_name=self._first_task_display_name(worker), - unread_messages=len(worker.rpc_messages), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True) Call this `num_unread_messages`? Or even better, `num_unread_rpc_messages`. Would be nice if a grep-search for `rpc_messages` turned out all relevant bits of code for the RPC messages. started=worker.started, state=worker.state, first_task_display_name=self._first_task_display_name(worker), + num_unread_rpc_messages=len(worker.rpc_messages), **worker.info ) for worker in self._state.get_active_workers()] workers.sort(key=lambda worker: worker['started'], reverse=True)
codereview_python_data_11970
else: index = index_len index_len = len(index) - index_chunksize = compute_chunksize((len(index), None), num_partitions, axis=0) if index_chunksize > index_len: row_lengths = [index_len] + [0 for _ in range(num_partitions - 1)] else: This feels a little hacky, instead of passing a tuple should we pass the values separately? else: index = index_len index_len = len(index) + index_chunksize = compute_chunksize( + row_count=index_len, num_splits=num_partitions + ) if index_chunksize > index_len: row_lengths = [index_len] + [0 for _ in range(num_partitions - 1)] else:
codereview_python_data_11974
assert r.request.url == pr.request.url assert r.request.headers == pr.request.headers def test_response_lines(self): """ - iter_lines should be able to handle data dribbling in which might - not be lined up ideally. """ mock_chunks = [ 'This \r\n', I'd like these asserts cleaned up if at all possible, they're kinda complex. Let's break some stuff up so the assert is somewhat self-describing. assert r.request.url == pr.request.url assert r.request.headers == pr.request.headers + def test_response_lines(self): """ + iter_lines should be able to handle data dribbling in which delimiters + might not be lined up ideally. """ mock_chunks = [ 'This \r\n',
codereview_python_data_11977
output = self._RunArgparseFormatHelp(argument_parser) - if tuple(sys.version_info[0:2]) < (3, 6): - self.assertEqual(output, self._EXPECTED_OUTPUT) else: - self.assertEqual(output, self._EXPECTED_OUTPUT_PY3_6) def testParseOptions(self): """Tests the ParseOptions function.""" Please move this to py2to3 instead Is this really specific to 3.6? Doesn't appear on 3.5? output = self._RunArgparseFormatHelp(argument_parser) + if py2to3.PY_3_5_AND_LATER: + self.assertEqual(output, self._EXPECTED_OUTPUT_PY_3_5_AND_LATER) else: + self.assertEqual(output, self._EXPECTED_OUTPUT) def testParseOptions(self): """Tests the ParseOptions function."""
codereview_python_data_11988
class PassFailStatus(Reporter, Service, AggregatorListener, WidgetProvider): def __init__(self): super(PassFailStatus, self).__init__() self.criteria = [] Why delete type hints? It helps IDE... class PassFailStatus(Reporter, Service, AggregatorListener, WidgetProvider): + """ + :type criteria: list[FailCriterion] + """ def __init__(self): super(PassFailStatus, self).__init__() self.criteria = []
codereview_python_data_11989
from mmcv.runner import auto_fp16, force_fp32 from ..builder import build_loss -from ..utils import up_sample_like class BaseSemanticHead(nn.Module, metaclass=ABCMeta): Need to deal with the semantic head in roi_heads. from mmcv.runner import auto_fp16, force_fp32 from ..builder import build_loss +from ..utils import upsample_like class BaseSemanticHead(nn.Module, metaclass=ABCMeta):
codereview_python_data_12000
q.title = "Save file to:" q.text = "Please enter a location for <b>{}</b>".format( html.escape(url.toDisplayString())) - q.yank_text = url.toString() q.mode = usertypes.PromptMode.download q.completed.connect(q.deleteLater) q.default = _path_suggestion(suggested_filename) This should be `toDisplayString()` to not contain e.g. passwords. q.title = "Save file to:" q.text = "Please enter a location for <b>{}</b>".format( html.escape(url.toDisplayString())) + q.url = url.toString(QUrl.RemoveUserInfo) q.mode = usertypes.PromptMode.download q.completed.connect(q.deleteLater) q.default = _path_suggestion(suggested_filename)
codereview_python_data_12003
@utils.benchmark('time', timeout=600) @utils.parametrize('feat_size', [32, 128, 512]) -@utils.parametrize('num_relations', [3, 6, 12]) -@utils.thread_wrapped_func -def track_time(feat_size, num_relations): device = utils.get_bench_device() dd = {} candidate_edges = [dgl.data.CoraGraphDataset(verbose=False)[0].edges(), dgl.data.PubmedGraphDataset(verbose=False)[ these configs are quite similar. use `[5, 50, 500]` @utils.benchmark('time', timeout=600) @utils.parametrize('feat_size', [32, 128, 512]) +@utils.parametrize('num_relations', [5, 50, 500]) +@utils.parametrize('multi_reduce_type', ["sum", "stuck"]) +def track_time(feat_size, num_relations, multi_reduce_type): device = utils.get_bench_device() dd = {} candidate_edges = [dgl.data.CoraGraphDataset(verbose=False)[0].edges(), dgl.data.PubmedGraphDataset(verbose=False)[
codereview_python_data_12006
print("Current version: %s. Latest released version: %s" % ( tools.__version__, last_release )) start_time = time() Why don't these checks run first? Won't this just tie up a container in Travis otherwise? print("Current version: %s. Latest released version: %s" % ( tools.__version__, last_release )) + if not tools.on_master(): + print("Not deploying due to not being on master") + sys.exit(0) + + if not tools.has_source_changes(last_release): + print("Not deploying due to no source changes") + sys.exit(0) start_time = time()
codereview_python_data_12007
props = config.get('properties', {}) metrics = props.get('metrics', {}) is_devshell = metrics.get('environment') == 'devshell' - is_service_account = 'iam.gserviceaccount.com' in authed_user print('Read gcloud info: Success') except ValueError as verr: print(verr) sys.exit(1) - return project_id, authed_user, is_devshell, is_service_account def _get_service_account_json_path(): """Search in the environment variables for Google Credentials I am wondering if it would be better to use a FLAG to tell the installer where the key file is, instead of having this method with these logic here? Such as `--service-account-key-path`. It would simplify things on our side, with less logic. Would a flag like this work on your side? Then above, you can check if this flag is configured, then authed_user is service account. ``` if not service_account_key_path: self.check_if_authed_user_in_domain( self.organization_id, authed_user) else: gcloud.activate_service_account(authed_user) ``` How do you think? props = config.get('properties', {}) metrics = props.get('metrics', {}) is_devshell = metrics.get('environment') == 'devshell' print('Read gcloud info: Success') except ValueError as verr: print(verr) sys.exit(1) + return project_id, authed_user, is_devshell def _get_service_account_json_path(): """Search in the environment variables for Google Credentials
codereview_python_data_12015
return self._get_option_section('ubsan') def get_hwasan_options(self): - """Return a list of UBSAN_OPTIONS overrides.""" return self._get_option_section('hwasan') def get_grammar_options(self): nit: typo in `UBSAN_OPTIONS` return self._get_option_section('ubsan') def get_hwasan_options(self): + """Return a list of HWSAN_OPTIONS overrides.""" return self._get_option_section('hwasan') def get_grammar_options(self):
codereview_python_data_12016
# name: possible binary names (linux/mac and windows) "vlc": ["vlc", "vlc.exe"], "mpv": ["mpv", "mpv.exe"], - "pot": ["potplayer", "potplayermini64.exe", "potplayermini.exe"] } if is_win32: is there even a `potplayer` binary? as this player is windows only. # name: possible binary names (linux/mac and windows) "vlc": ["vlc", "vlc.exe"], "mpv": ["mpv", "mpv.exe"], + "potplayer": ["potplayermini64.exe", "potplayermini.exe"] } if is_win32:
codereview_python_data_12020
fileName = normalizedWalletFileName(walletName) walletFilePath = self.walletSaver.saveWallet( wallet, getWalletFilePath(contextDir, fileName)) - self.logger.debug('Active wallet "{}" saved ({})'. - format(walletName, walletFilePath)) except IOError as ex: self.logger.info("Error occurred while saving wallet. " + "error no.{}, error.{}" Why isn't it INFO? Do we have INFO level messages for Agents? The current agents are rather test/demo one, so it makes sense to track some information (present in getting started?) in INFO level fileName = normalizedWalletFileName(walletName) walletFilePath = self.walletSaver.saveWallet( wallet, getWalletFilePath(contextDir, fileName)) + self.logger.info('Active wallet "{}" saved ({})'. + format(walletName, walletFilePath)) except IOError as ex: self.logger.info("Error occurred while saving wallet. " + "error no.{}, error.{}"
codereview_python_data_12023
class TypeName(_TypeName): name: str # name is used for types in named tuples maintype: ObjectRef - subtypes: typing.Union[typing.List[_TypeName], None] - dimensions: typing.Union[typing.List[int], None] class FuncParam(Base): `typing.Union[typing.List[_TypeName], None]` -> `typing.Optional[typing.List[_TypeName]]` class TypeName(_TypeName): name: str # name is used for types in named tuples maintype: ObjectRef + subtypes: typing.Optional[typing.List[_TypeName]] + dimensions: typing.Optional[typing.List[int]] class FuncParam(Base):
codereview_python_data_12025
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4441-SEA 1645544078 1005094299</p> <hr> <p>Varnish cache server</p> </body> can we keep this somewhere inside DGL? <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4453-SEA 1645544078 772888257</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_12027
self.comm = comm self._force = False self._updated = False # Whether the plot should be marked as updated - - # Setting up alias - if 'title' in params and 'title_format' in params: - if params['title'] != params['title_format']: - self.warning('The title and title_format parameters do not match. Using title.') - elif 'title_format' in params: - params['title'] = params['title_format'] - elif 'title' in params: - params['title_format'] = params['title'] - params = {k: v for k, v in params.items() if k in self.params()} super(DimensionedPlot, self).__init__(**params) This is not the right place to do these checks since it can be set dynamically and title format would currently be ignored in that case. Generally that means doing this where the parameter is actually used and not altering the actual parameter values on the plot. self.comm = comm self._force = False self._updated = False # Whether the plot should be marked as updated params = {k: v for k, v in params.items() if k in self.params()} super(DimensionedPlot, self).__init__(**params)
codereview_python_data_12040
import argparse import os import os.path as osp -import sys from mmcv import Config Maybe we should delete the imported module in `mmcv.Config`? import argparse import os import os.path as osp from mmcv import Config
codereview_python_data_12041
""" Checks iterator stop condition, gets DALI outputs and perform reset in case of StopIteration """ - if self._counter >= self._size and self._size > 0: if self._auto_reset: self.reset() raise StopIteration ```suggestion if self._size > 0 and self._counter >= self._size: ``` reads a little bit better """ Checks iterator stop condition, gets DALI outputs and perform reset in case of StopIteration """ + if self._size > 0 and self._counter >= self._size: if self._auto_reset: self.reset() raise StopIteration
codereview_python_data_12043
# Utility functions def setUp(self): try: - with self.assertRaisesRegex(AttributeError, r".+"): - raise AttributeError('x') except AttributeError: self.assertRaisesRegex = self.assertRaisesRegexp A cleaner solution is to just call `self.assertRaisesRegex` without actually using it. ```python try: self.assertRaisesRegex except AttributeError: self.assertRaisesRegex = self.assertRaisesRegexp ``` # Utility functions def setUp(self): try: + self.assertRaisesRegex except AttributeError: self.assertRaisesRegex = self.assertRaisesRegexp
codereview_python_data_12049
from six.moves import range import six -import logging as log import itertools import os.path import warnings Is this used anywhere? from six.moves import range import six import itertools import os.path import warnings
codereview_python_data_12050
import unittest from os import path from Bio import SeqIO -from Bio.SeqFeature import FeatureLocation, AfterPosition, BeforePosition, \ - CompoundLocation, UnknownPosition class TestReference(unittest.TestCase): Could you use two import lines, rather than the slash continuation here. import unittest from os import path from Bio import SeqIO +from Bio.SeqFeature import FeatureLocation, AfterPosition, BeforePosition +from Bio.SeqFeature import CompoundLocation, UnknownPosition class TestReference(unittest.TestCase):
codereview_python_data_12052
import tensorflow as tf from tensorflow_addons.image import utils as img_utils from tensorflow_addons.utils import keras_utils -from tensorflow_addons.utils.types import TensorLike -from typing import Optional -def _pad(image: TensorLike, filter_shape: TensorLike, mode: str = "CONSTANT", constant_values: TensorLike = 0) -> tf.Tensor: """Explicitly pad a 4-D image. filter_shape should be a list/tuple (I'm not sure it works with tensors). import tensorflow as tf from tensorflow_addons.image import utils as img_utils from tensorflow_addons.utils import keras_utils +from tensorflow_addons.utils.types import TensorLike, FloatTensorLike +from typing import Optional, Union, List, Tuple +def _pad(image: TensorLike, filter_shape: Union[List[int], Tuple[int]], mode: str = "CONSTANT", constant_values: TensorLike = 0) -> tf.Tensor: """Explicitly pad a 4-D image.
codereview_python_data_12068
@abstractmethod def cancel(self, order_id, relay_status=True): raise NotImplementedError('cancel') @abstractmethod Should we add a docstring to cancel? @abstractmethod def cancel(self, order_id, relay_status=True): + """Cancel a single order + + Parameters + ---------- + order_id : int + The id of the order + + relay_status : bool + Whether or not to record the status of the order + """ raise NotImplementedError('cancel') @abstractmethod
codereview_python_data_12071
gsuite_dwd_status = self._get_gsuite_dwd_status(summary_data) - email_content = BaseEmailConnector.render_from_template( 'inventory_summary.jinja', {'inventory_index_id': self.inventory_index_id, 'timestamp': timestamp, Should not be leaking the base connector here. Should be able to call this from the connector instance. gsuite_dwd_status = self._get_gsuite_dwd_status(summary_data) + email_content = email_connector.render_from_template( 'inventory_summary.jinja', {'inventory_index_id': self.inventory_index_id, 'timestamp': timestamp,
codereview_python_data_12072
} -def convert(in_file, in_format, out_file, out_format, mol_type=None): """Convert between two sequence file formats, return number of records. Arguments: perhaps we should call this argument `molecule_type` for consistency } +def convert(in_file, in_format, out_file, out_format, molecule_type=None): """Convert between two sequence file formats, return number of records. Arguments:
codereview_python_data_12073
content = fds.read() target_lines = [ - "for i in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]", "self.vars['i'] = str(i)", "self.loc_mng.get_locator([{'id': self.vars['i']" I doubt explicit list is the best way to define values. Could we use range() here? content = fds.read() target_lines = [ + "for i in %s" % str(list(range(1,11))), "self.vars['i'] = str(i)", "self.loc_mng.get_locator([{'id': self.vars['i']"
codereview_python_data_12074
class ServerConnection(tcp.TCPClient, stateobject.StateObject): def __init__(self, address, source_address=None): - if source_address: - source_address = (source_address, 0) tcp.TCPClient.__init__(self, address, source_address) self.via = None This is a bit confusing (i.e. inconsistent) to be honest. Can we just pass it through as-is? class ServerConnection(tcp.TCPClient, stateobject.StateObject): def __init__(self, address, source_address=None): tcp.TCPClient.__init__(self, address, source_address) self.via = None
codereview_python_data_12076
# AppEngine APPENGINE = 'appengine' - # KEngine - KENGINE = 'ke' resource_types = frozenset([ ORGANIZATION, This resource is really a cluster. It should be KE_CLUSTER # AppEngine APPENGINE = 'appengine' + # KE_CLUSTER + KE_CLUSTER = 'ke' resource_types = frozenset([ ORGANIZATION,
codereview_python_data_12078
'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', - 'ssl_keyfile': '/path/to/client.key'}, - redis_backend_use_ssl = { 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', Should we also check what happens if someone uses the string form? 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt', + 'ssl_keyfile': '/path/to/client.key'}, + redis_backend_use_ssl={ 'ssl_cert_reqs': ssl.CERT_REQUIRED, 'ssl_ca_certs': '/path/to/ca.crt', 'ssl_certfile': '/path/to/client.crt',
codereview_python_data_12079
from __future__ import absolute_import, division, print_function from django import forms from tests.django.toystore.models import ( CouldBeCharming, This might be worth a helper class to inherit the rest from, e.g. ```python class ReprModelForm(forms.ModelForm): def __repr__(self): """I recommend putting this in your form to show the failed cases.""" return repr(self.data) ``` from __future__ import absolute_import, division, print_function from django import forms +from django.forms import widgets from tests.django.toystore.models import ( CouldBeCharming,
codereview_python_data_12083
Returns ------- succ: iterator - (node, successors) iterator where `successors` is the list of - successors of `node`, `node` is a transitive successor of `source` - and `successors` is non-empty. Examples -------- Would it be better to define transitive successor here? Also, the first time I read this I couldn't tell if it is a list of 3 traits that successors has or if the latter phrases are simply describing other true statements. When I got to `successors is non-empty`, I decided it must be 3 ideas about successors. I suggest splitting the sentence: ```suggestion (node, successors) iterator where `successors` is the non-empty list of successors of `node` in a breadth first search from `source`. To appear in the iterator, `node` must have successors. ``` Returns ------- succ: iterator + (node, successors) iterator where `successors` is the non-empty list of + successors of `node` in a breadth first search from `source`. + To appear in the iterator, `node` must have successors. Examples --------
codereview_python_data_12085
def warn(ledger_name, directories_path): - print('The follow directories will be deleted:') for path in directories_path: print(str(path)) ```suggestion print('The following directories will be deleted:') ``` def warn(ledger_name, directories_path): + print('The following directories will be deleted:') for path in directories_path: print(str(path))
codereview_python_data_12091
'properties': { 'name': context.properties['database-name'], 'project': context.env['project'], - 'instance': '$(ref.{}.name)'.format(context.env['deployment']) } }) Don't we still want DM to wait to deploy this configuration until the instance is established? 'properties': { 'name': context.properties['database-name'], 'project': context.env['project'], + 'instance': '$(ref.cloudsql-instance.name)' } })
codereview_python_data_12092
# Delete the reduction attribute to inform Keras that it # should call this class by the __call__(...) method. - if 'reduction' in dir(self): delattr(self, 'reduction') def __call__(self, y_true, y_pred, sample_weight=None): `hasattr` seems more appropriate here? # Delete the reduction attribute to inform Keras that it # should call this class by the __call__(...) method. + if hasattr(self, 'reduction'): delattr(self, 'reduction') def __call__(self, y_true, y_pred, sample_weight=None):
codereview_python_data_12093
self.add_argument('--dataset', type=str, default='FB15k', help='dataset name, under data_path') self.add_argument('--format', type=str, default='built_in', - choices=['built_in', 'raw_udd', 'udd'], - help='the format of the dataset.') self.add_argument('--data_files', type=str, default=None, nargs='+', help='a list of data files, e.g. entity relation train valid test') self.add_argument('--model_path', type=str, default='ckpts', we need to support user's data. as we discussed last time, the format can be something like this: ``` --format {htr,hrt,dgl} ``` We need to support loading preprocessed data in DGLGraph. We should also support data in triplets. In this case, users need to tell us what is the format of triplets. self.add_argument('--dataset', type=str, default='FB15k', help='dataset name, under data_path') self.add_argument('--format', type=str, default='built_in', + help='the format of the dataset, it can be built_in,'\ + 'raw_udd_{{htr} and udd_{{htr}') self.add_argument('--data_files', type=str, default=None, nargs='+', help='a list of data files, e.g. entity relation train valid test') self.add_argument('--model_path', type=str, default='ckpts',
codereview_python_data_12095
def _test(): - """Run the Bio.Blast.Applications module's doctests.(PRIVATE).""" - import doctest doctest.testmod(verbose=1) You need a space not a full stop before the ``(PRIVATE)`` text. Also remove the extra blank link which will cause one of the style checks to fail. def _test(): + """Run the Bio.Blast.Applications module's doctests (PRIVATE).""" import doctest doctest.testmod(verbose=1)
codereview_python_data_12098
content_type = None length = None - if not data and json is not None: content_type = 'application/json' body = complexjson.dumps(json) This should really be `if data is None`. content_type = None length = None + if data == {} and json is not None: content_type = 'application/json' body = complexjson.dumps(json)
codereview_python_data_12106
gt_labels_list=None, cls_out_channels=1, sampling=True, - need_unmap=True): """Compute regression and classification targets for anchors. Args: Rename `need_unmap` to `unmap`. gt_labels_list=None, cls_out_channels=1, sampling=True, + unmap=True): """Compute regression and classification targets for anchors. Args:
codereview_python_data_12108
self.set_group(self.group) if self.get_label() is None: raise ValueError("Label should not be None.") - if isinstance(self._predictor, _InnerPredictor) and self._predictor != self.reference._predictor: self._set_init_score_by_predictor(self._predictor, self.data, used_indices) else: # create train Can predictors be compared with `!=` operation? self.set_group(self.group) if self.get_label() is None: raise ValueError("Label should not be None.") + if isinstance(self._predictor, _InnerPredictor) and self._predictor is not self.reference._predictor: self._set_init_score_by_predictor(self._predictor, self.data, used_indices) else: # create train
codereview_python_data_12109
that they wish to delete their listens. """ if request.method == 'POST': - if request.form.get('token') == current_user.auth_token: try: delete_listens_history(current_user.musicbrainz_id) except Exception as e: This message is shown to the user. Let's change it to something more user friendly. "We couldn't delete your listens due to an error. Please try again later." that they wish to delete their listens. """ if request.method == 'POST': + if request.form.get('token') and (request.form.get('token') == current_user.auth_token): try: delete_listens_history(current_user.musicbrainz_id) except Exception as e:
codereview_python_data_12124
import datetime from django.contrib import messages from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect Could you group the oscar imports? PEP8: ``` Imports should be grouped in the following order: standard library imports related third party imports local application/library specific imports You should put a blank line between each group of imports. ``` import datetime +from django.conf import settings from django.contrib import messages from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect
codereview_python_data_12131
self.emit_event( 'position_update', - formatted="Walking from {last_position} to {current_position}, distance left: ({distance} {distance_unit}) ..", data={ - 'last_position': (last_lat, last_lng, 0), - 'current_position': (lat, lng, 0), 'distance': dist, 'distance_unit': 'm' } 0 is not good. we have random altitude for now, please help to check if sending 0 to server. self.emit_event( 'position_update', + formatted="Walk to {last_position} now at {current_position}, distance left: ({distance} {distance_unit}) ..", data={ + 'last_position': (last_lat, last_lng, last_alt), + 'current_position': (lat, lng, alt), 'distance': dist, 'distance_unit': 'm' }
codereview_python_data_12132
-from torch.optim import * # noqa: F401, F403 - from .copy_of_sgd import CopyOfSGD -from .registry import OPTIMIZERS, TORCH_OPTIMIZERS -__all__ = ['OPTIMIZERS', 'CopyOfSGD', *TORCH_OPTIMIZERS] It may not be necessary to expose `TORCH_OPTIMIZERS`. +from .builder import build_optimizer from .copy_of_sgd import CopyOfSGD +from .registry import OPTIMIZERS +__all__ = ['OPTIMIZERS', 'build_optimizer', 'CopyOfSGD']
codereview_python_data_12137
new_weight = match.new_weight old_weight = old_match.new_weight # Always update the weight if the previous value is the default. This is # required to deal with specifications that are meant to set the weight above # 1.0. Otherwise, prioritize only the most penalizing match for this pairing. How does this work for new fuzzers where new weight is 5.0 and is > default. new_weight = match.new_weight old_weight = old_match.new_weight + # Rules that increase weights are expected to take precedence over any that + # lower the weight. Issues with new fuzzers may be fixed intraday and other + # issues like crashes shouldn't be penalized for them. + if old_weight > 1.0: + return + # Always update the weight if the previous value is the default. This is # required to deal with specifications that are meant to set the weight above # 1.0. Otherwise, prioritize only the most penalizing match for this pairing.
codereview_python_data_12139
if bins or normalize: raise NotImplementedError( - "OmniSci' 'value_counts' does not support 'bins' and 'normalize' parameters" ) new_frame = self._modin_frame.value_counts( ```suggestion "OmniSci' 'value_counts' does not support 'bins' and 'normalize' parameters." ``` if bins or normalize: raise NotImplementedError( + "OmniSci's 'value_counts' does not support 'bins' and 'normalize' parameters." ) new_frame = self._modin_frame.value_counts(
codereview_python_data_12140
is visible and bar is a single non-computed property, which we know will be stored as NULL in the database. """ - if ( ir_set.expr is None and not ir_set.path_id.is_objtype_path() and ir_set.rptr Why not just `return` the condition? is visible and bar is a single non-computed property, which we know will be stored as NULL in the database. """ + return ( ir_set.expr is None and not ir_set.path_id.is_objtype_path() and ir_set.rptr
codereview_python_data_12141
elif isinstance(data, str): # TODO - What about unicode? self._data = array.array("u", data) elif isinstance(data, MutableSeq): - self._data = array.array("u", data._data) - else: # Make no assumptions about the Seq subclass internal storage self._data = array.array("u", str(data)) else: Is this the most efficient way to take a copy of an array? Does this beat it? ```python self._data = data._data[:] # Take a copy ``` elif isinstance(data, str): # TODO - What about unicode? self._data = array.array("u", data) elif isinstance(data, MutableSeq): + self._data = data._data[:] # Take a copy + elif isinstance(data, Seq): # Make no assumptions about the Seq subclass internal storage self._data = array.array("u", str(data)) else:
codereview_python_data_12142
end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, conv_cfg=None, normalize=None, activation=None, - relu_extra_convs=False, caffe2_xavier_initialize=False): super(FPN, self).__init__() assert isinstance(in_channels, list) May be better to rename it to `relu_before_extra_convs`. This argument can be moved after `extra_convs_on_inputs`. end_level=-1, add_extra_convs=False, extra_convs_on_inputs=True, + relu_before_extra_convs=False, conv_cfg=None, normalize=None, activation=None, caffe2_xavier_initialize=False): super(FPN, self).__init__() assert isinstance(in_channels, list)
codereview_python_data_12144
pipeline, dynamic_scale=None, skip_type_keys=None): - warnings.warn('dynamic_scale is deprecated. will be removed ' - 'in future releases') assert isinstance(pipeline, collections.abc.Sequence) if skip_type_keys is not None: assert all([ If raising warning here, the _dynamic_scale function should not be removed. pipeline, dynamic_scale=None, skip_type_keys=None): + if dynamic_scale is not None: + raise RuntimeError( + 'dynamic_scale is deprecated. The function has ' + 'been implemented in mmdet/models/detectors/yolox.py') assert isinstance(pipeline, collections.abc.Sequence) if skip_type_keys is not None: assert all([
codereview_python_data_12151
""" # TODO: Examine other versions of the file format and if this parser should # support them. - return '#Version: 1.5' in line manager.ParsersManager.RegisterParser(WinFirewallParser) Why change this? """ # TODO: Examine other versions of the file format and if this parser should # support them. + return line == '#Version: 1.5\n' manager.ParsersManager.RegisterParser(WinFirewallParser)
codereview_python_data_12152
continue # Sanitizer regular crash (includes ills, abrt, etc). - if not is_golang: update_state_on_match( SAN_ADDR_REGEX, line, I don't think we should disable sanitizer stacks completely. what if we have a go crash without go stack and only asan stack (we want to create a asan stack signature). i think your intent is to not update with sanitizer stack if a go stack is found. this is easy to do with initing a bool found_golang_crash=false and then set it when you detect golang crash type. ``` if update_state_on_match( golang_crash_regex, line, state, new_type=golang_crash_type): found_golang_crash = True state.crash_state = '' ``` and then doing if not found_golang_crash here. continue # Sanitizer regular crash (includes ills, abrt, etc). + if not found_golang_crash: update_state_on_match( SAN_ADDR_REGEX, line,
codereview_python_data_12161
detectedPPMCol = datamap[detectedNuc + ".P"] + 1 # Make a list of the data lines involving the detected - if str(toResNum) in peaklist.residue_dict(detectedNuc) and str( - originResNum - ) in peaklist.residue_dict(detectedNuc): detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)] originList = peaklist.residue_dict(detectedNuc)[str(originResNum)] returnLine = detectedList[0] This is an example where I don't like how black split up a long if statement conditional. But, unless you can see a neat way to avoid this (e.g. brackets on the two arguments to and?), leave it as it is. detectedPPMCol = datamap[detectedNuc + ".P"] + 1 # Make a list of the data lines involving the detected + if (str(toResNum) in peaklist.residue_dict(detectedNuc)) and ( + str(originResNum) in peaklist.residue_dict(detectedNuc) + ): detectedList = peaklist.residue_dict(detectedNuc)[str(toResNum)] originList = peaklist.residue_dict(detectedNuc)[str(originResNum)] returnLine = detectedList[0]
codereview_python_data_12164
# pass a Mock object as argument. sig['immutable'] = True sig = Signature.from_dict(sig) - # Any child task might error so we need to ensure that they are all - # capable of calling the linked error signature. This opens the - # possibility that the task is called more than once but that's better - # than it not being called at all. - # - # We return a concretised tuple of the signatures actually applied to - # each child task signature, of which there might be none! - return tuple(child_task.link_error(sig) for child_task in self.tasks) def _prepared(self, tasks, partial_args, group_id, root_id, app, CallableSignature=abstract.CallableSignature, If I understand correctly this means that the chord error handler may fire for every error in the chord header. Is that accurate? # pass a Mock object as argument. sig['immutable'] = True sig = Signature.from_dict(sig) + return self.tasks[0].link_error(sig) def _prepared(self, tasks, partial_args, group_id, root_id, app, CallableSignature=abstract.CallableSignature,
codereview_python_data_12168
key_value['key'] for key_value in source.key_value_pairs]): if self.CheckKeyCompatibility(key_path): find_specs = self.BuildFindSpecsFromRegistryArtifact(key_path) - self.find_specs_per_source_type[ - artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY].extend( - find_specs) elif (source.type_indicator == artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP): My advise would be to have 1 statement per line ``` artifact_group = self.find_specs_per_source_type[artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY] artifact_group.extend(find_specs) ``` this make debugging easier key_value['key'] for key_value in source.key_value_pairs]): if self.CheckKeyCompatibility(key_path): find_specs = self.BuildFindSpecsFromRegistryArtifact(key_path) + artifact_group = self._find_specs_per_source_type[ + artifact_types.TYPE_INDICATOR_WINDOWS_REGISTRY_KEY] + artifact_group.extend(find_specs) elif (source.type_indicator == artifact_types.TYPE_INDICATOR_ARTIFACT_GROUP):
codereview_python_data_12176
import numpy as np import tensorflow as tf import tensorflow.compat.v1 as tf1 # TODO: port TF1 test files? -from tensorflow_addons.image import _get_boundary_locations from tensorflow_addons.image import sparse_image_warp -from tensorflow_addons.image import _get_grid_locations from tensorflow.python.training import momentum from tensorflow_addons.utils.resource_loader import get_path_to_datafile Hi Kyle, thanks for all your great work! Do you mind replacing these with for example: `from tensorflow_addons.image.sparse_image_warp import _get_boundary_locations` It's probably best we keep the underscore prefix methods out of the __init__ import numpy as np import tensorflow as tf import tensorflow.compat.v1 as tf1 # TODO: port TF1 test files? +from tensorflow_addons.image.sparse_image_warp import _get_boundary_locations +from tensorflow_addons.image.sparse_image_warp import _get_grid_locations from tensorflow_addons.image import sparse_image_warp from tensorflow.python.training import momentum from tensorflow_addons.utils.resource_loader import get_path_to_datafile
codereview_python_data_12177
import sys from botocore.exceptions import ClientError sys.path.append('../../..') from demo_tools.custom_waiter import CustomWaiter, WaitState Just FYI, this is a little magic and may create some oddities if users copy code elsewhere. That may be acceptable for the example though. import sys from botocore.exceptions import ClientError +# Add relative path to include demo_tools in this code example without need for setup. sys.path.append('../../..') from demo_tools.custom_waiter import CustomWaiter, WaitState
codereview_python_data_12178
for i, (X, y) in enumerate(eval_set): # when individual eval set is equivalent to training data, skip recomputing parts. - if id(X) == id(data) and id(y) == id(label): for parts_idx in range(n_parts): eval_sets[parts_idx].append('__train__') I totally support ths simplification. Could you make this a separate PR? Doesnt require new tests and we can merge it immediately for i, (X, y) in enumerate(eval_set): # when individual eval set is equivalent to training data, skip recomputing parts. + if X is data and y is label for parts_idx in range(n_parts): eval_sets[parts_idx].append('__train__')
codereview_python_data_12194
""" with csv_writer.write_csv(resource_name, data) as csv_file: try: - try: - snapshot_table_name = self._create_snapshot_table( - resource_name, timestamp) - except OperationalError as e: - # TODO: find a better way to handle this. I want this method - # to be resilient when the table has already been created - # so that it can support inserting new data. This will catch - # a sql 'table already exist' error and alter the flow. - snapshot_table_name = self._create_snapshot_table_name( resource_name, timestamp) load_data_sql = load_data_sql_provider.provide_load_data_sql( resource_name, csv_file.name, snapshot_table_name) Can you move table creation to a new function? I think that would make this more readable. """ with csv_writer.write_csv(resource_name, data) as csv_file: try: + snapshot_table_name = self._get_snapshot_table( resource_name, timestamp) load_data_sql = load_data_sql_provider.provide_load_data_sql( resource_name, csv_file.name, snapshot_table_name)
codereview_python_data_12195
-def spiral(n): pass Could you please change `n` to something more meaningful like `matrix_size`? +def spiral(size): pass
codereview_python_data_12214
self.bulk_size = self.settings.get("bulk-size", self.bulk_size) self.browser_open = self.settings.get("browser-open", self.browser_open) token = self.settings.get("token", "") - proxy_settings = self.settings.get("proxy", None) if proxy_settings: if proxy_settings.get("address"): proxy_url = urlsplit(proxy_settings.get("address")) In the talk we agreed to have taurus-global setting, not module-level setting self.bulk_size = self.settings.get("bulk-size", self.bulk_size) self.browser_open = self.settings.get("browser-open", self.browser_open) token = self.settings.get("token", "") + proxy_settings = self.engine.config.get("settings").get("proxy") if proxy_settings: if proxy_settings.get("address"): proxy_url = urlsplit(proxy_settings.get("address"))
codereview_python_data_12217
assert N == 2 assert_almost_equal(cutoff, 10.5, decimal=4) -def test_pdc_on_off(universe, lipid_heads): lfls_pbc_on = LeafletFinder(universe, lipid_heads, pbc=True) lfls_pbc_off = LeafletFinder(universe, lipid_heads, pbc=False) assert lfls_pbc_on.graph.size() > lfls_pbc_off.graph.size() Could you also test the inside of the file, i.e. say that the selection for say a vmd file would work here? assert N == 2 assert_almost_equal(cutoff, 10.5, decimal=4) +def test_pbc_on_off(universe, lipid_heads): lfls_pbc_on = LeafletFinder(universe, lipid_heads, pbc=True) lfls_pbc_off = LeafletFinder(universe, lipid_heads, pbc=False) assert lfls_pbc_on.graph.size() > lfls_pbc_off.graph.size()
codereview_python_data_12220
machine_id = 0 lines = [line.rstrip('\n') for line in open(filename)] for line in lines: - result = line.split(' ') if len(result) == 2: port = int(result[1]) elif len(result) == 1: it'll break if there are multiple spaces. machine_id = 0 lines = [line.rstrip('\n') for line in open(filename)] for line in lines: + result = line.split() if len(result) == 2: port = int(result[1]) elif len(result) == 1:
codereview_python_data_12226
for i, a in enumerate(FastaM10Iterator(open(os.path.join(path, filename)))): print("#%i, %s" % (i + 1, a)) for r in a: self.assertEqual(r.seq.alphabet.gap_char, "-") else: assert not hasattr(r.seq.alphabet, "gap_char") You lost the if-statement, does it still work? for i, a in enumerate(FastaM10Iterator(open(os.path.join(path, filename)))): print("#%i, %s" % (i + 1, a)) for r in a: + if "-" in r.seq: self.assertEqual(r.seq.alphabet.gap_char, "-") else: assert not hasattr(r.seq.alphabet, "gap_char")
codereview_python_data_12228
args.attn_drop, args.residual) - model.initialize() - - if cuda: - model.collect_params().reset_ctx(ctx) # use optimizer trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': args.lr}) Move evaluation code into a separate function so that it can be shared by validation and testing args.attn_drop, args.residual) + model.initialize(ctx=ctx) # use optimizer trainer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': args.lr})