id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_453
---------- angle : float Rotation angle in degrees. - axis : array_like or tuple of 2 AtomGroups - Rotation axis vector. If a tuple is given the axis will be - determined by the difference vector of the centroid for both - AtomGroups. point : array_like (optional) Center of rotation. If ``None`` then the center of geometry of this - group is used if ``axis`` is an array. If ``axis`` is a tuple of - atomgroups the centroid of the first atomgroup is used. Returns ------- You don't need to call unique before going onto universe here, just `self.universe` should be fine ---------- angle : float Rotation angle in degrees. + axis : array_like + Rotation axis vector. point : array_like (optional) Center of rotation. If ``None`` then the center of geometry of this + group is used. Returns -------
codereview_python_data_456
try: state = ext_handler.properties.state - self.get_artifact_error_state.reset() if self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Incarnation {0} did not change, not processing GoalState", etag) the error state should be reset after the "if decide_version" (because download succeeded) now, since with this change we short-circuit the logic if there is not a new goal state, it seems to me that the error state is not needed and we should always report errors (since it is a new goal state) -- could you review the code to check if this is true? try: state = ext_handler.properties.state if self.last_etag == etag: if self.log_etag: ext_handler_i.logger.verbose("Incarnation {0} did not change, not processing GoalState", etag)
codereview_python_data_461
@pyqtSlot(usertypes.KeyMode) def _on_mode_entered(self, mode): self._tab.run_js_async( - javascript.assemble('caret', 'setInitialCursor')) @pyqtSlot(usertypes.KeyMode) def _on_mode_left(self): You do `self._tab.run_js_async(javascript.assemble('caret', ...)))` a lot here. Why not add some `_js_call` method or so to simplify those? @pyqtSlot(usertypes.KeyMode) def _on_mode_entered(self, mode): self._tab.run_js_async( + javascript.assemble('caret', 'setInitialCursor', platform.platform())) @pyqtSlot(usertypes.KeyMode) def _on_mode_left(self):
codereview_python_data_465
err = "No atoms found in obj argument" with pytest.raises(TypeError, match=err): c = ParmEdConverter() - c.convert("we still don't support emojis :(") Do we not? I thought py3+ was UTF-8 compliant? err = "No atoms found in obj argument" with pytest.raises(TypeError, match=err): c = ParmEdConverter() + c.convert("🐍")
codereview_python_data_466
forseti_config_file_path (str): Path to Forseti configuration file. log_level (str): Sets the threshold for Forseti's logger. enable_console_log (bool): Enable console logging. - enable_debug_mode (bool): Enable console logging. max_workers (int): maximum number of workers for the crawler wait_shutdown_secs (int): seconds to wait before shutdown The description is the same as the variable above, can it be more specific? forseti_config_file_path (str): Path to Forseti configuration file. log_level (str): Sets the threshold for Forseti's logger. enable_console_log (bool): Enable console logging. + enable_debug_mode (bool): Enable debug mode. max_workers (int): maximum number of workers for the crawler wait_shutdown_secs (int): seconds to wait before shutdown
codereview_python_data_468
'google-auth-httplib2==0.0.3', 'Jinja2==2.10.1', 'jmespath==0.9.3', - 'mailjet-rest==1.3.3', 'netaddr==0.7.19', 'pyyaml==4.2b4', 'python-graph-core==1.8.2', I recommend that we move this to be optional, as other users might not need it. Can you look at `OPTIONAL_PACKAGES` section, around line 68? 'google-auth-httplib2==0.0.3', 'Jinja2==2.10.1', 'jmespath==0.9.3', 'netaddr==0.7.19', 'pyyaml==4.2b4', 'python-graph-core==1.8.2',
codereview_python_data_470
iters_num = 17 num_workers = 4 for prefetch_queue_depths in ((3, 1, 1), (1, 3, 1), (1, 1, 3), (1, 1, 1), (3, 3, 3)): - for cycle_policies in (("raise", "raise"), ("quiet", "raise"), ("raise", "quiet"), ("quiet", "quiet")): for epoch_sizes in ((8, 4, 6), (8, 6, 4), (4, 6, 8), (1, 1, 1)): yield _test_cycle_multiple_iterators, batch_size, iters_num, num_workers, \ prefetch_queue_depths, cycle_policies, epoch_sizes I'd suggest testing `True` as cycle policy at least once. After all, this value is supported (and equivalent to `"quiet"`). iters_num = 17 num_workers = 4 for prefetch_queue_depths in ((3, 1, 1), (1, 3, 1), (1, 1, 3), (1, 1, 1), (3, 3, 3)): + for cycle_policies in (("raise", "raise"), ("quiet", "raise"), ("raise", "quiet"), ("quiet", "quiet"), (True, True)): for epoch_sizes in ((8, 4, 6), (8, 6, 4), (4, 6, 8), (1, 1, 1)): yield _test_cycle_multiple_iterators, batch_size, iters_num, num_workers, \ prefetch_queue_depths, cycle_policies, epoch_sizes
codereview_python_data_474
"in minutes from current time. Prevents infinite loop when stop is none") minutes_interval = luigi.IntParameter( - default=5, description="separation between events in minutes" ) This seems way to arbitrary. Why not just `default=1`? "in minutes from current time. Prevents infinite loop when stop is none") minutes_interval = luigi.IntParameter( + default=1, description="separation between events in minutes" )
codereview_python_data_482
if config.val.qt.force_platform is not None: os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform if config.val.qt.highdpi: os.environ['QT_AUTO_SCREEN_SCALE_FACTOR'] = '1' You'll still need to do this (but with the new setting name), otherwise the window decoration shows up again on wayland. if config.val.qt.force_platform is not None: os.environ['QT_QPA_PLATFORM'] = config.val.qt.force_platform + if config.val.window.hide_wayland_decoration: + os.environ['QT_WAYLAND_DISABLE_WINDOWDECORATION'] = '1' + if config.val.qt.highdpi: os.environ['QT_AUTO_SCREEN_SCALE_FACTOR'] = '1'
codereview_python_data_490
def tick(self): self.health_record.heartbeat() self.cell = self.get_meta_cell() - inventory.refresh_inventory() now = time.time() * 1000 Refresh_inventory will trigger packet sent to server which is not the same behavior as original. def tick(self): self.health_record.heartbeat() self.cell = self.get_meta_cell() + inventory.update_web_inventory() now = time.time() * 1000
codereview_python_data_494
from datetime import date import os -from unittest.mock import patch, mock_open # pylint: disable=redefined-outer-name import pytest No blocker here but another way to write this could be: ``` mock_join = "test_api_report_yamls/complex_metadata.yaml" monkeypatch.setattr(os.path, "join", Mock(return_value=mock_join)) ``` from datetime import date import os +from unittest.mock import patch, mock_open, Mock # pylint: disable=redefined-outer-name import pytest
codereview_python_data_504
return console_handler, ram_handler -def change_loglevel(level): - value = LOG_LEVELS[level.upper()] - console_handler.setLevel(value) - - def _init_formatters(level, color, force_color, json_logging): """Init log formatters. I'd do this inside the command, no point in adding a new function in `log.py` as it isn't needed anywhere else. return console_handler, ram_handler def _init_formatters(level, color, force_color, json_logging): """Init log formatters.
codereview_python_data_505
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4482-SEA 1645521621 1839665831</p> <hr> <p>Varnish cache server</p> </body> Getting the below error for this import Unable to register plugins: cannot import name 'DEFAULT_PORT_STS_BACKEND' from 'localstack.constants' <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4431-SEA 1645521621 3669565527</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_521
# -*- TRACE -*- try: try: - from celery.concurrency.future import get_future_executor except RuntimeError: R = retval = fun(*args, **kwargs) state = SUCCESS What happens if tornado is installed but not used as the workers pool? # -*- TRACE -*- try: try: + from celery.concurrency.future import ( + get_future_executor) except RuntimeError: R = retval = fun(*args, **kwargs) state = SUCCESS
codereview_python_data_525
self._molecule.driver.testinfra_args, self._molecule.verifier_options) testinfra_options['env'] = ansible.env - testinfra_options['debug'] = self._molecule.args.get('debug', False) if self._molecule.args.get('sudo'): testinfra_options['sudo'] = True Would this affect the debug flag in the same way? self._molecule.driver.testinfra_args, self._molecule.verifier_options) testinfra_options['env'] = ansible.env + if self._molecule.args.get('debug'): + testinfra_options['debug'] = True if self._molecule.args.get('sudo'): testinfra_options['sudo'] = True
codereview_python_data_531
# If title is empty, it couldn't be generated. if not title: return WorkerResult.SUCCESS - self._update_title(title, _platform) - if(self.terminal is True): self._log_on_terminal(title) return WorkerResult.SUCCESS Parenthesis are not necessary. # If title is empty, it couldn't be generated. if not title: return WorkerResult.SUCCESS + + if self.terminal_title is True: + self._update_title(title, _platform) + + if self.terminal_log is True: self._log_on_terminal(title) return WorkerResult.SUCCESS
codereview_python_data_535
' number of rows is unknown. Make sure there is at least' ' one column in the frame so number of rows can be inferred.' % name) if self.initializer is None: - self.set_initializer() # TODO(minjie): directly init data on the targer device. init_data = self.initializer((self.num_rows,) + scheme.shape, scheme.dtype) init_data = F.to_context(init_data, ctx) You changed the warning behavior. Why do you think this is better? ' number of rows is unknown. Make sure there is at least' ' one column in the frame so number of rows can be inferred.' % name) if self.initializer is None: + self._warn_and_set_initializer() # TODO(minjie): directly init data on the targer device. init_data = self.initializer((self.num_rows,) + scheme.shape, scheme.dtype) init_data = F.to_context(init_data, ctx)
codereview_python_data_538
if __name__ == "__main__": from Bio._utils import run_doctest This line doesn't do anything - you import the function `run_doctest` but don't use it: ``` python from Bio._utils import run_doctest ``` if __name__ == "__main__": from Bio._utils import run_doctest + run_doctest(verbose=0)
codereview_python_data_545
run_keras_single_device('cpu', 0) -@raises(Exception, "TF device and DALI device mismatch. TF*: CPU, DALI*: GPU for output*") def test_keras_wrong_placement_gpu(): with tf.device('cpu:0'): model = keras_model() You don't need to add trailing `*` at the end of glob pattern. run_keras_single_device('cpu', 0) +@raises(Exception, "TF device and DALI device mismatch. TF*: CPU, DALI*: GPU for output") def test_keras_wrong_placement_gpu(): with tf.device('cpu:0'): model = keras_model()
codereview_python_data_552
yield { 'resource_id': violation.resource_id, 'resource_type': violation.resource_type, - 'resource_name': violation.resource_name, 'full_name': violation.full_name, 'rule_index': violation.rule_index, 'rule_name': violation.rule_name, Is it possible to have any other values for parent than kms_keyring? yield { 'resource_id': violation.resource_id, 'resource_type': violation.resource_type, + 'resource_name': violation.resource_id, 'full_name': violation.full_name, 'rule_index': violation.rule_index, 'rule_name': violation.rule_name,
codereview_python_data_553
headers["Content-Type"] = "application/json" self.log.debug("Request: %s %s %s", log_method, url, data[:self.logger_limit] if data else None) - with log_std_streams(logger=self.log): - response = self.http_request(method=log_method, url=url, data=data, - headers=headers, cookies=self._cookies, timeout=self.timeout) resp = response.content if not isinstance(resp, str): Let's not call this risky fd juggling for each API call headers["Content-Type"] = "application/json" self.log.debug("Request: %s %s %s", log_method, url, data[:self.logger_limit] if data else None) + + response = self.http_request(method=log_method, url=url, data=data, headers=headers, cookies=self._cookies, + timeout=self.timeout) resp = response.content if not isinstance(resp, str):
codereview_python_data_560
family=task.task_family, module=task.task_module, retry_policy_dict=_get_retry_policy_dict(task), - deps_retry_policy_dicts=deps_retry_policy_dicts) def _validate_dependency(self, dependency): if isinstance(dependency, Target): As I've asked you in every iteration before. Why do we need the `deps_retry_policy_dicts`? family=task.task_family, module=task.task_module, retry_policy_dict=_get_retry_policy_dict(task), + ) def _validate_dependency(self, dependency): if isinstance(dependency, Target):
codereview_python_data_561
Parameters ---------- row_labels : list, optional - Indices of rows to select. row_positions : list-like of ints, optional Numeric indices of rows to select. col_labels : list, optional - Indices of columns to select. col_positions : list-like of ints, optional Numeric indices of columns to select. Why we don't add the type hints here as it is already done for `ModinDataframe`/`PandasDataFrame`? Parameters ---------- row_labels : list, optional + Row labels to select. row_positions : list-like of ints, optional Numeric indices of rows to select. col_labels : list, optional + Column labels to select. col_positions : list-like of ints, optional Numeric indices of columns to select.
codereview_python_data_563
from google.cloud.security.common.data_access import csv_writer from google.cloud.security.common.data_access import firewall_rule_dao -from google.cloud.security.common.gcp_type.resource import ResourceType from google.cloud.security.common.gcp_type import resource_util from google.cloud.security.scanner.audit import fw_rules_engine from google.cloud.security.scanner.scanners import base_scanner I think our general pattern for using modules is to import the module (not the classname), then use module.ClassName(...) so you'd have `from google.cloud.security.common.gcp_type import resource_type` and on line 191, you'd have: ``` resource_type.ResourceType.FIREWALL_RULE: ... ``` from google.cloud.security.common.data_access import csv_writer from google.cloud.security.common.data_access import firewall_rule_dao +from google.cloud.security.common.gcp_type import resource from google.cloud.security.common.gcp_type import resource_util from google.cloud.security.scanner.audit import fw_rules_engine from google.cloud.security.scanner.scanners import base_scanner
codereview_python_data_565
ordered = [] newscripts = [] for s in scripts: - if s[-2:] != "py": - continue if s in current: ordered.append(current[s]) else: Any reason to hard-code and enforce only `.py` files? I could imagine users want to write scripts (without extensions) and be able to load them into mitmproxy at the same time? ordered = [] newscripts = [] for s in scripts: if s in current: ordered.append(current[s]) else:
codereview_python_data_567
testcase.get_metadata('last_tested_crash_revision') or testcase.crash_revision) fuzzer_display = get_fuzzer_display(testcase) - fuzzer_name = fuzzer_display.name or '' - fuzz_target = fuzzer_display.target or '' - engine = (fuzzer_display.engine or '').lower() - sanitizer = environment.get_memory_tool_name(testcase.job_type).lower() result = help_format.replace('%TESTCASE%', testcase_id) result = result.replace('%PROJECT%', project_name) one small concern here: why should these be lowered by default? I know we need this to make things work, but we shouldn't be doing this here, since it's an implementation detail tied to another system. testcase.get_metadata('last_tested_crash_revision') or testcase.crash_revision) fuzzer_display = get_fuzzer_display(testcase) + fuzzer_name = fuzzer_display.name or 'NA' + fuzz_target = fuzzer_display.target or 'NA' + engine = fuzzer_display.engine or 'NA' + sanitizer = environment.get_memory_tool_name(testcase.job_type) result = help_format.replace('%TESTCASE%', testcase_id) result = result.replace('%PROJECT%', project_name)
codereview_python_data_569
""" universe = MDAnalysis.Universe(topology_path) for element in elements: - assert element in universe._topology[topology_section] def test_all_bonds(): If you've gone to the effort of writing down all the bond names and passing them this far, you may as well include some sort of detailed error message, `"Bond type {} not found".format(name)` """ universe = MDAnalysis.Universe(topology_path) for element in elements: + assert element in universe._topology[topology_section], \ + 'Interaction type "{}" not found'.format(name) def test_all_bonds():
codereview_python_data_574
def validateFloat(value): - return isinstance(value, float) def validateInteger(value): What happens if we pass an "integer" value to a parameter that expects a float? I suspect that the evaluator (since it doesn't interact with the param definition at all) will pass it in as an integer type and this will fail. def validateFloat(value): + return isinstance(value, float) or validateInteger(value) def validateInteger(value):
codereview_python_data_576
if self.is_cpp_class_scope and function and function.scope is self: # for C++ classes we can have both member and non-member operators # and we really want to consider both - outer_scope = self.outer_scope - while outer_scope and not outer_scope.is_module_scope: - outer_scope = outer_scope.module_scope - if outer_scope: - global_func = outer_scope.lookup_here("operator%s" % operator) if global_func: nonmember_alternatives = global_func.all_alternatives() Shouldn't this just read `global_scope = self.global_scope()`? Without the loop? if self.is_cpp_class_scope and function and function.scope is self: # for C++ classes we can have both member and non-member operators # and we really want to consider both + global_scope = self.global_scope() + if global_scope: + global_func = global_scope.lookup_here("operator%s" % operator) if global_func: nonmember_alternatives = global_func.all_alternatives()
codereview_python_data_579
"""List members by prefix. Args: - member_name_prefix(str): the prefix of member_name to query Returns: proto: the returned proto message of list_members Same thing, it's not clear exactly what is the `prefix` here. So it would be nice to have an short example. """List members by prefix. Args: + member_name_prefix (str): the prefix of member_name to query Returns: proto: the returned proto message of list_members
codereview_python_data_585
# https://github.com/mitmproxy/mitmproxy/issues/2197 if hf.request.http_version == "HTTP/2.0": hf.request.http_version = "HTTP/1.1" - host = hf.request.headers.pop(":authority", hf.request.pretty_host) - hf.request.headers.insert(0, "host", host) self.q.put(hf) ctx.master.addons.trigger("update", lst) If there is no authority header (i.e. someone intentionally deleted it), I would argue we probably don't want a Host header in the replay either. How about we only add it if it exists, and do nothing otherwise? # https://github.com/mitmproxy/mitmproxy/issues/2197 if hf.request.http_version == "HTTP/2.0": hf.request.http_version = "HTTP/1.1" + host = hf.request.headers.pop(":authority", None) + if host is not None: + hf.request.headers.insert(0, "host", host) self.q.put(hf) ctx.master.addons.trigger("update", lst)
codereview_python_data_588
) return None - evm_assets = ( - AssetType.ETHEREUM_TOKEN, - AssetType.POLYGON_TOKEN, - AssetType.XDAI_TOKEN, - AssetType.AVALANCHE_TOKEN, - ) if asset_type in evm_assets: cursor.execute( Move that to a constant. No need to initialize this list every time this function is run. Or even as a function of AssetType. Whatever you prefer. ) return None + evm_assets = AssetType.evm_assets() if asset_type in evm_assets: cursor.execute(
codereview_python_data_589
import dgl -import sys -import random import time import numpy as np from multiprocessing import Process What's the type of `g` here? `GraphStore`? import dgl import time import numpy as np from multiprocessing import Process
codereview_python_data_594
from bzt.modules.siege import SiegeExecutor, DataLogReader from tests import BZTestCase from tests.mocks import EngineEmul -from bzt.utils import is_windows -def tool_name(): - if is_windows(): - return 'siege.bat' - else: - return 'siege.sh' def get_res_path(resource): Look how it's done everywhere with utils.EXE_SUFFIX constant. from bzt.modules.siege import SiegeExecutor, DataLogReader from tests import BZTestCase from tests.mocks import EngineEmul +from bzt.utils import EXE_SUFFIX +TOOL_NAME = 'siege' + EXE_SUFFIX def get_res_path(resource):
codereview_python_data_599
# Generate autosummary pages. Output should be set with: `:toctree: pythonapi/` autosummary_generate = ['Python-API.rst'] -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# source_suffix = ['.rst', '.md'] # The master toctree document. master_doc = 'index' The only change I would suggest is moving these lines back below the `templates_path` variable to keep the diffs smaller. # Generate autosummary pages. Output should be set with: `:toctree: pythonapi/` autosummary_generate = ['Python-API.rst'] +# Only the class' docstring is inserted. +autoclass_content = 'class' +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False # The master toctree document. master_doc = 'index'
codereview_python_data_614
os.environ, ) - create_args(x, zipline.extension_args) - def extract_option_object(option): """Convert a click.option call into a click.Option object. should we parse the args before loading the extension? I could imagine the extension code wanting access to this to find resources os.environ, ) def extract_option_object(option): """Convert a click.option call into a click.Option object.
codereview_python_data_615
@singledispatch def get_validator_set(conn, height): - """Get validator set at which are not synced""" raise NotImplementedError The docstring is not very clear. @singledispatch def get_validator_set(conn, height): + """Get validator set for a given `height`, if `height` is not specified + then return the latest validator set""" raise NotImplementedError
codereview_python_data_618
worker_chunk = chunk_size + (minibatch_i < remainder) if worker_chunk == 0: break - sample_slice = sample_range.get_slice(queued_no, queued_no + worker_chunk) minibatch = TaskArgs(minibatch_i, sample_range=sample_slice) minibatches.append(minibatch) queued_no += worker_chunk Assuming that `sample_range` is a full range, not a slice (and that you've followed the suggestion in `SampleRange`): ```suggestion sample_slice = sample_range[queued_no:queued_no + worker_chunk] ``` worker_chunk = chunk_size + (minibatch_i < remainder) if worker_chunk == 0: break + sample_slice = sample_range[queued_no:queued_no + worker_chunk] minibatch = TaskArgs(minibatch_i, sample_range=sample_slice) minibatches.append(minibatch) queued_no += worker_chunk
codereview_python_data_624
def run_tasks(services): loop = asyncio.get_event_loop() loop.create_task(build_docs()) - loop.run_until_complete(app_svc.validate_requirements()) loop.run_until_complete(data_svc.restore_state()) loop.run_until_complete(RestApi(services).enable()) loop.run_until_complete(app_svc.register_contacts()) this can likely be a create_task instead of run_until_complete def run_tasks(services): loop = asyncio.get_event_loop() loop.create_task(build_docs()) + loop.create_task(app_svc.validate_requirements()) loop.run_until_complete(data_svc.restore_state()) loop.run_until_complete(RestApi(services).enable()) loop.run_until_complete(app_svc.register_contacts())
codereview_python_data_626
if not self._schema.IsDeprecatedArg(arg_name): continue meta = self._schema.DeprecatedArgMeta(arg_name) with warnings.catch_warnings(): warnings.simplefilter("default") Do I understand it correctly that now Python only issues warnings, but the errors are left to the backend? if not self._schema.IsDeprecatedArg(arg_name): continue meta = self._schema.DeprecatedArgMeta(arg_name) + msg = meta['msg'] with warnings.catch_warnings(): warnings.simplefilter("default")
codereview_python_data_632
for entry in py_entries: if entry.is_cglobal: code.put_var_gotref(entry) - code.put_decref_set(entry.cname, entry.type, "Py_None") else: code.put_var_xdecref_clear(entry) This kind of code doesn't seem to appear anywhere else, but should there be a `put_var_decref_set()`, just for consistency? for entry in py_entries: if entry.is_cglobal: code.put_var_gotref(entry) + code.put_var_decref_set(entry, "Py_None") else: code.put_var_xdecref_clear(entry)
codereview_python_data_638
model_name = request.handle try: self.modeller.delete_model(model_name) - success = model_pb2.DeleteModelReply.Status.Value('SUCCESS') - reply = model_pb2.DeleteModelReply(status=success) except Exception: LOGGER.exception('Unable to delete model: %s', model_name) - fail = model_pb2.DeleteModelReply.Status.Value('FAIL') - reply = model_pb2.DeleteModelReply(status=fail) - return reply def ListModel(self, request, _): """List all models. nit: i would change this line: ``` status = model_pb2.DeleteModelReply.Status.Value('SUCCESS') ``` So that you can save two lines of code like this: ``` try: self.modeller.delete_model(model_name) status = model_pb2.DeleteModelReply.Status.Value('SUCCESS') except Exception: LOGGER.exception('Unable to delete model: %s', model_name) status = model_pb2.DeleteModelReply.Status.Value('FAIL') return model_pb2.DeleteModelReply(status=status) ``` model_name = request.handle try: self.modeller.delete_model(model_name) + status = model_pb2.DeleteModelReply.Status.Value('SUCCESS') except Exception: LOGGER.exception('Unable to delete model: %s', model_name) + status = model_pb2.DeleteModelReply.Status.Value('FAIL') + return model_pb2.DeleteModelReply(status=status) def ListModel(self, request, _): """List all models.
codereview_python_data_640
# See the License for the specific language governing permissions and # limitations under the License. -"""Creates a Cloud SQL instance template for forseti_inventory.""" def GenerateConfig(context): Update the pydoc as we're not creating Cloud SQL. # See the License for the specific language governing permissions and # limitations under the License. +"""Creates a GCE instance template for Forseti Security.""" def GenerateConfig(context):
codereview_python_data_642
try: stream = openfunction(filename, mode=mode) except (IOError, OSError) as err: if errno.errorcode[err.errno] in ['ENOENT', 'EACCES']: six.reraise(*sys.exc_info()) return None Aha I missed this change. So the error is getting raised here, when it should (apparently) be silencing the error here and raising it later (after `None` has been returned). We shouldn't be changing how errors are propagated here try: stream = openfunction(filename, mode=mode) except (IOError, OSError) as err: + # An exception might be raised due to two reasons, first the openfunction is unable to open the file, in this + # case we have to ignore the error and return None. Second is when openfunction can't open the file because + # either the file isn't there or the permissions don't allow access. if errno.errorcode[err.errno] in ['ENOENT', 'EACCES']: six.reraise(*sys.exc_info()) return None
codereview_python_data_643
def get_ann_info(self, idx): """Get annotation of concatenated dataset by index. - This is needed by MixUp. - Args: idx (int): Index of data. line 83-92 can be encapsulated into a function like get_sample_idx and we can use it in many places. def get_ann_info(self, idx): """Get annotation of concatenated dataset by index. Args: idx (int): Index of data.
codereview_python_data_644
matrix = [column] * nd out_types = [ltype.int] * nd + [ltype.int] out_value = [list(range(div))] * nd + \ - [[nrows // div for i in range(div)]] d_in = dt.Frame(matrix) d_members = aggregate(d_in, min_rows=0, nd_max_bins=div, seed=1, could be `[nrows // div] * div` too matrix = [column] * nd out_types = [ltype.int] * nd + [ltype.int] out_value = [list(range(div))] * nd + \ + [[nrows // div] * div] d_in = dt.Frame(matrix) d_members = aggregate(d_in, min_rows=0, nd_max_bins=div, seed=1,
codereview_python_data_656
defaults_tuple = TupleNode( self.pos, args=[ - arg.default.arg if hasattr(arg.default, "arg") else arg.default for arg in default_args ] ) Haven't looked into the details, but this is a bit too hackish for a fix. We need to be more specific than "happens to have an attribute with name `arg`". defaults_tuple = TupleNode( self.pos, args=[ + arg.default for arg in default_args ] )
codereview_python_data_665
if asset in self.sources_map: # go find this asset in our custom sources try: - return self.sources_map[asset].loc[self.current_day].\ - loc[column] except: log.error( "Could not find price for asset={0}, current_day={1}," you don't use dt anywhere below. if asset in self.sources_map: # go find this asset in our custom sources try: + # TODO: Change to index both dt and column at once. + return self.sources_map[asset].loc[dt].loc[column] except: log.error( "Could not find price for asset={0}, current_day={1},"
codereview_python_data_670
to the number of vertices minus one), making it possible to assign a meaningful value to all graphs. Parameters ---------- G : NetworkX graph This would not work for `weighted` graphs, is `harmonic_diameter` defined for a weighted graph? to the number of vertices minus one), making it possible to assign a meaningful value to all graphs. + Note that in [1] the harmonic diameter is called "connectivity length": + however, "harmonic diameter" is a more standard name from the + theory of metric spaces. + Parameters ---------- G : NetworkX graph
codereview_python_data_677
return self.has_run def run(self): - if self.set_tracking_url is not None: - self.set_tracking_url(tracking_url) self.has_run = True a = A() Wait. Do the user really have to do this check before using the tracking_url? Can't we somehow guarantee that it's always present? return self.has_run def run(self): + self.set_tracking_url(tracking_url) self.has_run = True a = A()
codereview_python_data_678
iterator = tf_v1.data.make_initializable_iterator(daliset) images, labels = iterator.get_next() - images = tf_v1.reshape(images, [BATCH_SIZE, IMAGE_SIZE*IMAGE_SIZE]) labels = tf_v1.reshape( tf_v1.one_hot(labels, NUM_CLASSES), [BATCH_SIZE, NUM_CLASSES]) out of curiosity: Why do we need to reshape? Aren't the outputs of the dataset already shaped? iterator = tf_v1.data.make_initializable_iterator(daliset) images, labels = iterator.get_next() + # images = tf_v1.reshape(images, [BATCH_SIZE, IMAGE_SIZE*IMAGE_SIZE]) labels = tf_v1.reshape( tf_v1.one_hot(labels, NUM_CLASSES), [BATCH_SIZE, NUM_CLASSES])
codereview_python_data_681
Returns: user_recommendations_top_artist: list of recommended recordings of top artist. - user_recommendations_top_artist: list of recommended recordings of similar artist. """ top_artists_recordings = top_artists_candidate_set.select('user_id', 'recording_id') \ .where(col('user_id') == user_id) ```suggestion user_recommendations_similar_artist: list of recommended recordings of similar artist. ``` Returns: user_recommendations_top_artist: list of recommended recordings of top artist. + user_recommendations_similar_artist: list of recommended recordings of similar artist. """ top_artists_recordings = top_artists_candidate_set.select('user_id', 'recording_id') \ .where(col('user_id') == user_id)
codereview_python_data_684
"PDB_CHECK_RIGHTHAND_PA", # for testing right handedness of principal_axes "MMTF_NOCRYST", # File with meaningless CRYST1 record (Issue #2679, PR #2685) "FHIAIMS", # to test FHIAIMS coordinate files - "SDF_molecule" # MDL SDFile for rdkit "PDBX", # PDBxfile ] ```suggestion "SDF_molecule", # MDL SDFile for rdkit ``` This missing comma is just breaking all the tests :-) "PDB_CHECK_RIGHTHAND_PA", # for testing right handedness of principal_axes "MMTF_NOCRYST", # File with meaningless CRYST1 record (Issue #2679, PR #2685) "FHIAIMS", # to test FHIAIMS coordinate files + "SDF_molecule", # MDL SDFile for rdkit "PDBX", # PDBxfile ]
codereview_python_data_688
.concat_map(lambda tx: tx['outputs']['public_keys']) .reduce(lambda l, r: l + r), multi=True)) - # secondary index on inputs/transaction links (txid, cid) connection.run( r.db(dbname) .table('bigchain') Maybe use `output id` instead of `cid`. .concat_map(lambda tx: tx['outputs']['public_keys']) .reduce(lambda l, r: l + r), multi=True)) + # secondary index on inputs/transaction links (txid, output) connection.run( r.db(dbname) .table('bigchain')
codereview_python_data_703
name='recommended_dict', probability=0.10, manually_enable=False) VALUE_PROFILE_STRATEGY = Strategy( name='value_profile', probability=0.33, manually_enable=False) PEACH_GRAMMAR_MUTATION_STRATEGY = Strategy( name='peach_grammar_mutation', probability=0.10, manually_enable=True) This is fine to start, but we should likely increase it later. Since relatively few jobs will support this, we'll want to run it more often so that it's actually used when it can be used. name='recommended_dict', probability=0.10, manually_enable=False) VALUE_PROFILE_STRATEGY = Strategy( name='value_profile', probability=0.33, manually_enable=False) +# TODO(mpherman): Increase the probability of peach mutation strategy. PEACH_GRAMMAR_MUTATION_STRATEGY = Strategy( name='peach_grammar_mutation', probability=0.10, manually_enable=True)
codereview_python_data_712
""" Replace all variables with facts from the combo to build a single test variant """ - score, rewards, combo_set_id, combo_link_id = 0, [], set(), set() for var in combo: score += (score + var['score']) rewards.append(var['id']) declare the set and list the same way list() or [], set() or {} """ Replace all variables with facts from the combo to build a single test variant """ + score, rewards, combo_set_id, combo_link_id = 0, list(), set(), set() for var in combo: score += (score + var['score']) rewards.append(var['id'])
codereview_python_data_717
self, data, print_example=False, is_final=False, - expected_failure=None, ): text_repr = [None] if self.settings.deadline is None: Cleaner to just refer to `self.collector` in the next few lines without the assignment. self, data, print_example=False, is_final=False, + expected_failure=None, collect=False, ): text_repr = [None] if self.settings.deadline is None:
codereview_python_data_720
# Services def debug(self, lvl, msg): if self.debug_level >= lvl: - if conf.interactive: - log_interactive.debug(msg) - else: - print(msg) def send(self, pkt): if self.state.state in self.interception_points: Is there a reason why we don't use logging for this? # Services def debug(self, lvl, msg): if self.debug_level >= lvl: + log_runtime.debug(msg) def send(self, pkt): if self.state.state in self.interception_points:
codereview_python_data_723
new_db_name = db_name + '_new' old_path = os.path.join(db_dir, db_name) new_path = os.path.join(db_dir, new_db_name) - new_seqno_db_name = config.stateTsDbName + '_new' - # new_seq_no_path = os.path.join(db_dir, new_seqno_db_name) try: dest_seq_no_db_storage = initKeyValueStorage(config.reqIdToTxnStorage, db_dir, Why is it not `reqIdToTxnStorage` new_db_name = db_name + '_new' old_path = os.path.join(db_dir, db_name) new_path = os.path.join(db_dir, new_db_name) + new_seqno_db_name = config.seqNoDbName + '_new' try: dest_seq_no_db_storage = initKeyValueStorage(config.reqIdToTxnStorage, db_dir,
codereview_python_data_726
) def get_config(self): - super_config = super().get_config() - super_config = {k: v for k, v in super_config.items() if k != "scale_fn"} - return {**super_config} @tf.keras.utils.register_keras_serializable(package="Addons") Would you mind making these serializations explicit for each subclass. It's a bit brittle to depend on that check going forward. ) def get_config(self): + return { + "initial_learning_rate": self.initial_learning_rate, + "maximal_learning_rate": self.maximal_learning_rate, + "step_size": self.step_size, + "scale_mode": self.scale_mode, + } @tf.keras.utils.register_keras_serializable(package="Addons")
codereview_python_data_730
out_size = self.roi_layers[0].output_size num_levels = len(feats) roi_feats = feats[0].new_zeros( - rois.size(0), self.out_channels, *out_size) # TODO: remove this when parrots supports if torch.__version__ == 'parrots': roi_feats.requires_grad = True Does parrots support: ```python roi_feats = feats[0].new_zeros( rois.size(0), self.out_channels, *out_size, requires_grad=True) ``` out_size = self.roi_layers[0].output_size num_levels = len(feats) roi_feats = feats[0].new_zeros( + rois.size(0), self.out_channels, *out_size, requires_grad=True) # TODO: remove this when parrots supports if torch.__version__ == 'parrots': roi_feats.requires_grad = True
codereview_python_data_732
-# Copyright 2020 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. Are you missing something in the config dictionary e.g. like `use_bias` or initializes? +# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.
codereview_python_data_733
apigateway_models_Stage_init_orig(self, name=None, deployment_id=None, variables=None, description='', cacheClusterEnabled=False, cacheClusterSize=None) - if cacheClusterSize or cacheClusterEnabled: self['cacheClusterStatus'] = 'AVAILABLE' apigateway_models.Stage.__init__ = apigateway_models_Stage_init nit: Perhaps better change this to (to avoid overwriting if status exists): ``` if (cacheClusterSize or cacheClusterEnabled) and not self.get('cacheClusterStatus'): ... ``` apigateway_models_Stage_init_orig(self, name=None, deployment_id=None, variables=None, description='', cacheClusterEnabled=False, cacheClusterSize=None) + if (cacheClusterSize or cacheClusterEnabled) and not self.get('cacheClusterStatus'): self['cacheClusterStatus'] = 'AVAILABLE' apigateway_models.Stage.__init__ = apigateway_models_Stage_init
codereview_python_data_735
if batch_norm: self.bn = nn.BatchNorm(in_channels=out_feat) - def message(self, edges): - r"""The message computation function - """ - theta_x = self.theta(edges.dst['x'] - edges.src['x']) - phi_x = self.phi(edges.dst['x']) - return {'e': theta_x + phi_x} - def set_allow_zero_in_degree(self, set_value): r""" Also removing the message function in MXNet impl. if batch_norm: self.bn = nn.BatchNorm(in_channels=out_feat) def set_allow_zero_in_degree(self, set_value): r"""
codereview_python_data_739
-from networkx.algorithms.applications import * from networkx.algorithms.assortativity import * from networkx.algorithms.block import * from networkx.algorithms.boundary import * Maybe this would be better in a `networkx.algorithms.tsp` module; all the other algorithms modules are named for the type of problem they solve (clique, independent set, etc.). from networkx.algorithms.assortativity import * from networkx.algorithms.block import * from networkx.algorithms.boundary import *
codereview_python_data_745
serialization='pickle', ) - market_data = ('^GSPC_benchmark.csv', 'treasury_curves.csv') for data in market_data: update_modified_time( cls.tmpdir.getpath( Shall we make a global name for the default benchmark symbol, so we don't have to change it in 5 places? serialization='pickle', ) + market_data = ( + '{}_benchmark.csv'.format(BENCHMARK_SYMBOL), + 'treasury_curves.csv' + ) for data in market_data: update_modified_time( cls.tmpdir.getpath(
codereview_python_data_762
with open(QemuProcess.LOG_PATH) as f: # Strip non-printable characters at beginning of qemu log qemu_log = ''.join(c for c in f.read() if c in string.printable) - # Only report the tail of the log; otherwise we would only end up seeing - # the beginning of it once the logging library later truncates it to the - # STACKDRIVER_LOG_MESSAGE_LIMIT. - logs.log_warn(qemu_log[-64 * 1024:]) else: logs.log_error('Qemu log not found in {}'.format(QemuProcess.LOG_PATH)) nit: put `64 * 1024` in a constant in a suitable platforms/fuchsia/ module and use in both places. with open(QemuProcess.LOG_PATH) as f: # Strip non-printable characters at beginning of qemu log qemu_log = ''.join(c for c in f.read() if c in string.printable) + logs.log_warn(qemu_log[-undercoat.QEMU_LOG_LIMIT:]) else: logs.log_error('Qemu log not found in {}'.format(QemuProcess.LOG_PATH))
codereview_python_data_766
-def hey(self, stimulus): if _is_silence(stimulus): return 'Fine. Be that way!' elif _is_shouting(stimulus): You need to remove the `self`. +def hey(stimulus): if _is_silence(stimulus): return 'Fine. Be that way!' elif _is_shouting(stimulus):
codereview_python_data_769
from abc import abstractmethod -from pprint import pformat from bzt.engine import EngineModule from bzt.utils import BetterDict, iteritems class FunctionalAggregator(EngineModule): def __init__(self): super(FunctionalAggregator, self).__init__() self.underlings = [] It's reporter's job to report. from abc import abstractmethod from bzt.engine import EngineModule from bzt.utils import BetterDict, iteritems class FunctionalAggregator(EngineModule): + """ + :type listeners: list[FunctionalAggregatorListener] + :type underlings: list[FunctionalResultsReader] + :type cumulative_results: ResultsTree + """ + def __init__(self): super(FunctionalAggregator, self).__init__() self.underlings = []
codereview_python_data_770
class Meta(DashboardTable.Meta): model = Category fields = ('name', 'description', 'is_public') - sequenze = ('name', 'description', '...', 'is_public', 'actions') class AttributeOptionGroupTable(DashboardTable): `sequenze` should be `sequence`. Also please check the failing lint errors. class Meta(DashboardTable.Meta): model = Category fields = ('name', 'description', 'is_public') + sequence = ('name', 'description', '...', 'is_public', 'actions') class AttributeOptionGroupTable(DashboardTable):
codereview_python_data_782
return self._process(el, key) if isinstance(self.p.operation, ElementOperation): return OperationCallable(dynamic_operation, inputs=[map_obj], operation=self.p.operation) else: - return Callable(dynamic_operation, inputs=[map_obj]) def _make_dynamic(self, hmap, dynamic_fn): You could inline ``map_obj[key]`` into ``self._process``... return self._process(el, key) if isinstance(self.p.operation, ElementOperation): return OperationCallable(dynamic_operation, inputs=[map_obj], + link_inputs=self.p.link_inputs, operation=self.p.operation) else: + return Callable(dynamic_operation, inputs=[map_obj], + link_inputs=self.p.link_inputs) def _make_dynamic(self, hmap, dynamic_fn):
codereview_python_data_783
'scripts/enable_bls', 'scripts/create_dirs.sh', 'scripts/indy_old_cli_export_dids', - 'scripts/setup_indy_node_iptable'] ) missing 's' at the end of file name 'scripts/enable_bls', 'scripts/create_dirs.sh', 'scripts/indy_old_cli_export_dids', + 'scripts/setup_indy_node_iptables'] )
codereview_python_data_787
try: from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy - from nvidia.dali.pipeline import pipeline import nvidia.dali.types as types import nvidia.dali.fn as fn except ImportError: have we decided that we want to replace examples? try: from nvidia.dali.plugin.pytorch import DALIClassificationIterator, LastBatchPolicy + from nvidia.dali.pipeline import pipeline_def import nvidia.dali.types as types import nvidia.dali.fn as fn except ImportError:
codereview_python_data_789
msg_aggregator=self.msg_aggregator, ) - def _initialize_uniswap(self, premium: Optional[Premium]) -> None: - self.eth_modules['uniswap'] = Uniswap( - ethereum_manager=self.ethereum, - database=self.database, - premium=premium, - msg_aggregator=self.msg_aggregator, - ) - def get_zerion(self) -> Zerion: """Returns the initialized zerion. If it's not ready it waits for 5 seconds and then times out. This should really never happen so this is not needed msg_aggregator=self.msg_aggregator, ) def get_zerion(self) -> Zerion: """Returns the initialized zerion. If it's not ready it waits for 5 seconds and then times out. This should really never happen
codereview_python_data_795
def test_hostname(host): - assert host.check_output('hostname -s') == 'instance' def test_etc_molecule_directory(host): Why is this flipped? Looks unrelated and our pattern is `expected == returned`. def test_hostname(host): + assert 'instance' == host.check_output('hostname -s') def test_etc_molecule_directory(host):
codereview_python_data_796
if check_exception_type: assert isinstance( md_e.value, type(pd_e) - ), "Got Modin Exception type {}, but pandas Exception type {}".format( type(md_e.value), type(pd_e) ) if raising_exceptions: This isn't super clear - perhaps say `Got Modin Exception type {}, but pandas Exception type {} was expected`? if check_exception_type: assert isinstance( md_e.value, type(pd_e) + ), "Got Modin Exception type {}, but pandas Exception type {} was expected".format( type(md_e.value), type(pd_e) ) if raising_exceptions:
codereview_python_data_799
try: from Bio.Align import _aligners except ImportError as e: - new_exc = ImportError("""{}: you should not import directly from the - biopython source directory; please exit the source - tree and re-launch your code from there""".format(e)) new_exc.__cause__ = None raise new_exc Don't use a triple quote string, which inserts the new lines breaks as is. Use single quotes (which will auto-concatenate) try: from Bio.Align import _aligners except ImportError as e: + new_exc = ImportError("{}: you should not import directly from the" + "biopython source directory; please exit the source" + "tree and re-launch your code from there".format(e)) new_exc.__cause__ = None raise new_exc
codereview_python_data_807
# TODO(crbug.com/920355): Reenable this when fork mode works with ChromeOS's # MSAN. - memory_tool = environment.get_memory_tool_name( - environment.get_value('JOB_NAME')) if memory_tool == 'MSAN' and environment.is_chromeos_system_job(): return False nit: move environment.get_value('JOB_NAME') to its own line for readability. # TODO(crbug.com/920355): Reenable this when fork mode works with ChromeOS's # MSAN. + job_name = environment.get_value('JOB_NAME') + memory_tool = environment.get_memory_tool_name(job_name) if memory_tool == 'MSAN' and environment.is_chromeos_system_job(): return False
codereview_python_data_809
verbose_name = _('Catalogue reviews') include_urls_in_parent = True - hidable_feature_name = 'reviews' - def ready(self): self.detail_view = get_class('catalogue.reviews.views', 'ProductReviewDetail') self.create_view = get_class('catalogue.reviews.views', 'CreateProductReview') You forgot to remove `oscar.apps.catalogue.reviews.apps.CatalogueReviewsConfig.hidable_feature_name`. verbose_name = _('Catalogue reviews') include_urls_in_parent = True def ready(self): self.detail_view = get_class('catalogue.reviews.views', 'ProductReviewDetail') self.create_view = get_class('catalogue.reviews.views', 'CreateProductReview')
codereview_python_data_810
return graph_data if readonly: gidx = GraphIndex(None, multigraph, readonly) else: handle = _CAPI_DGLGraphCreateMutable(multigraph) To avoid this awkward `None`, the solution is to first process all the graph data. There are functions to convert different types of graph data to `src, dst, edge_id, num_nodes` first, so the handler could be created and then the python shim `GraphIndex`. Leave this for now. But would you put a `FIXME` here? return graph_data if readonly: + # FIXME(zhengda): we should construct a C graph index before constructing GraphIndex. gidx = GraphIndex(None, multigraph, readonly) else: handle = _CAPI_DGLGraphCreateMutable(multigraph)
codereview_python_data_819
Using the unnormalized Laplacion, the layout shows possible clusters of nodes which are an approximation of the ratio cut. The positions are the entries of the second and third eigenvectors corresponding to the - eigenvalues in ascending order. Parameters ---------- maybe add: starting from the second eigenvalue. Using the unnormalized Laplacion, the layout shows possible clusters of nodes which are an approximation of the ratio cut. The positions are the entries of the second and third eigenvectors corresponding to the + ascending eigenvalues starting from the second one. Parameters ----------
codereview_python_data_822
if not self.is_sig_count_accepted(request, auth_constraint): return False, "Not enough signatures" if not self.is_owner_accepted(auth_constraint, auth_action): - if auth_action.txn_type == NYM: - return False, "{} can not touch verkey field since only the owner can modify it".\ - format(self.get_named_role_from_req(request)) else: return False, "{} can not edit {} txn since only owner can modify it".\ format(self.get_named_role_from_req(request), Maybe we should rather check if this is `AuthActionEdit` action and get the field from there (action.field) instead of having hard-coded `NYM` and `verkey`? if not self.is_sig_count_accepted(request, auth_constraint): return False, "Not enough signatures" if not self.is_owner_accepted(auth_constraint, auth_action): + if auth_action.field != '*': + return False, "{} can not touch {} field since only the owner can modify it".\ + format(self.get_named_role_from_req(request), + auth_action.field) else: return False, "{} can not edit {} txn since only owner can modify it".\ format(self.get_named_role_from_req(request),
codereview_python_data_824
from qutebrowser.utils import docutils from qutebrowser.browser import pdfjs -from end2end.features.test_scroll_bdd import check_scrolled, check_not_scrolled - bdd.scenarios('misc.feature') Hmm, I'd really expect this to work, and yet it doesn't. I'll investigate later, though it might get Monday until I get the time. from qutebrowser.utils import docutils from qutebrowser.browser import pdfjs bdd.scenarios('misc.feature')
codereview_python_data_825
self.module = module self.params = params self.description = description - self.stopping_conditions = [] - if stopping_conditions: - self.stopping_conditions = [Fact(trait, value) for sc in stopping_conditions for trait, value in - sc.items()] def store(self, ram): existing = self.retrieve(ram['planners'], self.unique) these 2 lines are weird self.module = module self.params = params self.description = description + self.stopping_conditions = self._set_stopping_conditions(stopping_conditions) def store(self, ram): existing = self.retrieve(ram['planners'], self.unique)
codereview_python_data_828
Tensor from which to copy `arr` : mxnet.nd.NDArray Destination of the copy - `cuda_stream` : Any value that can be cast to cudaStream_t CUDA stream to be used for the copy (if not provided, an internal user stream will be selected) In most cases, using the default internal user stream or stream 0 As mentioned elsewhere, maybe: ```suggestion `cuda_stream` : Any value that can be cast or represents cudaStream_t ``` Tensor from which to copy `arr` : mxnet.nd.NDArray Destination of the copy + `cuda_stream` : cudaStream_t handle or any value that can be cast to cudaStream_t. CUDA stream to be used for the copy (if not provided, an internal user stream will be selected) In most cases, using the default internal user stream or stream 0
codereview_python_data_831
for index, reporter in enumerate(reporting): reporter = ensure_is_dict(reporting, index, "module") cls = reporter.get('module', ValueError()) - if cls != 'blazemeter': new_reporting.append(reporter) self.engine.config[Reporter.REP] = new_reporting config = self.get_config_for_cloud() Display warning when removing it for index, reporter in enumerate(reporting): reporter = ensure_is_dict(reporting, index, "module") cls = reporter.get('module', ValueError()) + if cls == 'blazemeter': + self.log.warning("Explicit blazemeter reporting is skipped for cloud") + else: new_reporting.append(reporter) + self.engine.config[Reporter.REP] = new_reporting config = self.get_config_for_cloud()
codereview_python_data_856
] -def laplacian_spectrum(G, weight="weight", - =False): """Returns eigenvalues of the Laplacian of G Parameters looks like a syntax /typo error here at top of spectrum.py. ] +def laplacian_spectrum(G, weight="weight", signless=False): """Returns eigenvalues of the Laplacian of G Parameters
codereview_python_data_859
upper_saturation: Number = 1, lower_value: Number = 1, upper_value: Number = 1, - seed: Optional[str] = None, name: Optional[str] = None, ) -> tf.Tensor: """Adjust hue, saturation, value of an RGB image randomly in YIQ color ```suggestion seed: Optional[int] = None, ``` upper_saturation: Number = 1, lower_value: Number = 1, upper_value: Number = 1, + seed: Optional[int] = None, name: Optional[str] = None, ) -> tf.Tensor: """Adjust hue, saturation, value of an RGB image randomly in YIQ color
codereview_python_data_862
assert len(parts.dml_ctes) == 1 cte = next(iter(parts.dml_ctes.values()))[0] relctx.add_type_rel_overlay( - ir_stmt.subject.typeref, 'unIon', cte, dml_stmts=dml_stack, path_id=ir_stmt.subject.path_id, ctx=ctx) elif isinstance(ir_stmt, irast.DeleteStmt): relctx.add_type_rel_overlay( ```suggestion ir_stmt.subject.typeref, 'union', cte, ``` assert len(parts.dml_ctes) == 1 cte = next(iter(parts.dml_ctes.values()))[0] relctx.add_type_rel_overlay( + ir_stmt.subject.typeref, 'union', cte, dml_stmts=dml_stack, path_id=ir_stmt.subject.path_id, ctx=ctx) elif isinstance(ir_stmt, irast.DeleteStmt): relctx.add_type_rel_overlay(
codereview_python_data_865
http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) if http_error_msg: - if isinstance(body_text, basestring): - http_error_msg += u' Response Body: %s' % body_text raise HTTPError(http_error_msg, response=self) def close(self): The body text may be *enormous*: megabytes in size. We absolutely do not want to build a string that long. If we're adding the body text to the exception message, we should add only a segment of it. http_error_msg = u'%s Server Error: %s for url: %s' % (self.status_code, reason, self.url) if http_error_msg: + if isinstance(self.text, basestring) and include_text: + http_error_msg += u' Response Body: %s' % body_text[:max_text_length] raise HTTPError(http_error_msg, response=self) def close(self):
codereview_python_data_868
self.rule_name = rule_name self.rule_index = rule_index self.rule = rule def rule_requirements(self): """Used to create violation reason. Break down the code to check if violation is returned or not and then form the violation reason. self.rule_name = rule_name self.rule_index = rule_index self.rule = rule + self.blacklist_violation_reason = "rule specified ({}) together is \ + not allowed" + self.whitelist_violation_reason = "rule specified ({}) is required" def rule_requirements(self): """Used to create violation reason.
codereview_python_data_869
data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data, feature_name, categorical_feature, self.pandas_categorical) label = _label_from_pandas(label) self.data_has_header = False - """process for args""" params = {} if params is None else params self.max_bin = max_bin self.predictor = predictor I think `next` may not be clear. Can we give a specific version number? data, feature_name, categorical_feature, self.pandas_categorical = _data_from_pandas(data, feature_name, categorical_feature, self.pandas_categorical) label = _label_from_pandas(label) self.data_has_header = False + # process for args params = {} if params is None else params self.max_bin = max_bin self.predictor = predictor
codereview_python_data_872
""" if isinstance(u, groups.UpdatingAtomGroup): - raise TypeError("""UpdatingAtomGroups are not valid for MSD - computation""") self.u = u super(EinsteinMSD, self).__init__(self.u.universe.trajectory, **kwargs) Why have a newline in your error message? Maybe suggest using a static atom group instead. ```suggestion raise TypeError("UpdatingAtomGroups are not valid for MSD " "computation") ``` """ if isinstance(u, groups.UpdatingAtomGroup): + raise TypeError("UpdatingAtomGroups are not valid for MSD " + "computation") self.u = u super(EinsteinMSD, self).__init__(self.u.universe.trajectory, **kwargs)
codereview_python_data_876
'creating-managing-organization') MESSAGE_RUN_FREQUENCY = ( - 'Forseti will run once every 8 hours, you can update the run ' 'frequency in the server deployment template field "run-frequency"' - ' or edit the cron job scheduled on the server VM directly.') # Questions templates QUESTION_ENABLE_WRITE_ACCESS = ( I don't think that we should suggest `OR` here when editing the cron job directly on the VM. Because it will get overwritten on update or reset. What else would be cool? Should we recommend something like update the deployment template, and then do a gcloud update of the server? 'creating-managing-organization') MESSAGE_RUN_FREQUENCY = ( + 'Forseti will run once every 12 hours, you can configure the run ' 'frequency in the server deployment template field "run-frequency"' + ' and update the deployment using the deployment manager.') # Questions templates QUESTION_ENABLE_WRITE_ACCESS = (
codereview_python_data_888
# These need to happen after the other imports. from . algorithm import TradingAlgorithm from . import api -import zipline.extensions as ext # PERF: Fire a warning if calendars were instantiated during zipline import. # Having calendars doesn't break anything per-se, but it makes zipline imports Shall we make this a relative import like the others? # These need to happen after the other imports. from . algorithm import TradingAlgorithm from . import api +from zipline import extensions as ext # PERF: Fire a warning if calendars were instantiated during zipline import. # Having calendars doesn't break anything per-se, but it makes zipline imports
codereview_python_data_894
def crop_func(image): return function(image, layout=self.data_layout, shape=self.data_shape) - self.crop = ops.PythonFunction(function = crop_func, output_layouts[data_layout]) def define_graph(self): self.data = self.inputs() ```suggestion self.crop = ops.PythonFunction(function=crop_func, output_layouts=[data_layout]) ``` def crop_func(image): return function(image, layout=self.data_layout, shape=self.data_shape) + self.crop = ops.PythonFunction(function=crop_func, output_layouts=data_layout) def define_graph(self): self.data = self.inputs()
codereview_python_data_895
def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "dhcpcd"]) - def restart_if(self, ifname, unused_retries=None, unused_wait=None): logger.info('restarting {} (sort of, actually SIGHUPing dhcpcd)'.format(ifname)) pid = self.get_dhcp_pid() if pid != None: # pylint: disable=C0121 we should keep the parameter names consistent, too, in case somebody ever does "restart_if(..., retries=...) def get_dhcp_pid(self): return self._get_dhcp_pid(["pidof", "dhcpcd"]) + def restart_if(self, ifname, retries=None, wait=None): logger.info('restarting {} (sort of, actually SIGHUPing dhcpcd)'.format(ifname)) pid = self.get_dhcp_pid() if pid != None: # pylint: disable=C0121
codereview_python_data_904
- Component properties - Transplant methods """ - self._Group._add_prop(attr) try: self._classes[attr.level]._add_prop(attr) The reason for this try-except isn't clear to me. What happens without it? - Component properties - Transplant methods """ + self._classes['group']._add_prop(attr) try: self._classes[attr.level]._add_prop(attr)
codereview_python_data_906
warnings = Column(Text(16777215)) def __init__(self, *args, **kwargs): - """Args: *args (list): Arguments. **kwargs (dict): Arguments. """ Please add a proper title, rather than `Args` ``` """Initialize Args: *args (list): Arguments. **kwargs (dict): Arguments. ``` warnings = Column(Text(16777215)) def __init__(self, *args, **kwargs): + """Initialize + + Args: *args (list): Arguments. **kwargs (dict): Arguments. """
codereview_python_data_907
str: GCP project id str: GCP Authenticated user bool: Whether or not the installer is running in cloudshell - bool: Whether or not authenticated user is a service account """ return_code, out, err = utils.run_command( ['gcloud', 'info', '--format=json']) Remove as this is not needed anymore. str: GCP project id str: GCP Authenticated user bool: Whether or not the installer is running in cloudshell """ return_code, out, err = utils.run_command( ['gcloud', 'info', '--format=json'])