id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_4587
_req_types = ["121"] def _rand_data(self): - return ('1', 'ADD', 'role') async def _gen_req(self, submitter_did, req_data): - return await ledger.build_get_auth_rule_request(None, req_data[0], req_data[1], req_data[2], None, None) I think it would be better (more close to production use cases) to do GET_AUTH_RULE for adding a new CLAIM_DEF. _req_types = ["121"] def _rand_data(self): + return ('102', 'ADD', '*', '*') async def _gen_req(self, submitter_did, req_data): + return await ledger.build_get_auth_rule_request(submitter_did, req_data[0], req_data[1], + req_data[2], None, req_data[3])
codereview_python_data_4591
self._settings.setDefaultTextEncoding(encoding) return old_value != encoding - def set_unknown_url_scheme_policy(self, policy: int) -> bool: - """Set the UnknownUrlSchemePolicy to use. - - Return: - True if there was a change, False otherwise. - """ - assert policy is not usertypes.UNSET # type: ignore - old_value = self._settings.unknownUrlSchemePolicy() - self._settings.setUnknownUrlSchemePolicy(policy) - return old_value != policy - def _update_setting(self, setting: str, value: typing.Any) -> bool: """Update the given setting/value. This should go to `webenginesettings.py` instead, as it's QtWebEngine-specific code. self._settings.setDefaultTextEncoding(encoding) return old_value != encoding def _update_setting(self, setting: str, value: typing.Any) -> bool: """Update the given setting/value.
codereview_python_data_4593
if len(amf_response.messages) != 1 or amf_response.messages[0].target_uri != "/1/onResult": raise PluginError("unexpected response from amf gate") - stream_source_info = parse_json(json.dumps(amf_response.messages[0].value), schema=amf_msg_schema) self.logger.debug("source stream info:\n{}", stream_source_info) stream_params = { FYI, I think you could do `amf_msg_schema.validate(amf_msg_schema)` instead of dumping and parsing again :-) if len(amf_response.messages) != 1 or amf_response.messages[0].target_uri != "/1/onResult": raise PluginError("unexpected response from amf gate") + stream_source_info = amf_msg_schema.validate(amf_response.messages[0].value) self.logger.debug("source stream info:\n{}", stream_source_info) stream_params = {
codereview_python_data_4599
return self.set_font_family(setting, value) elif setting == 'content.default_encoding': return self.set_default_text_encoding(value) - elif setting == 'unknown_url.scheme.policy': - # QtWebKit and QWebEngine < 5.11 doesn't provide interfaces - # for processing UnknownUrlSchemePolicy. - # - # AttributeError is expected for such cases. - try: - return self.set_unknown_url_scheme_policy(value) - except AttributeError: - pass return False def update_setting(self, setting: str) -> None: Might be cleaner to override the `_update_setting` method in `WebEngineSettings` - something like this (untested): ```python3 def _update_setting(self, setting: str, value: typing.Any) -> bool: if setting == 'unknown_url.scheme.policy': return self.set_unknown_url_scheme_policy(value) return super()._update_setting(setting, value) ``` You don't need to take care to handle Qt < 5.10 compatibility if you set the `backend` key correctly in `configdata.yml` as mentioned above, at least if I'm not mistaken. return self.set_font_family(setting, value) elif setting == 'content.default_encoding': return self.set_default_text_encoding(value) return False def update_setting(self, setting: str) -> None:
codereview_python_data_4600
def destroy(self): self._write_vagrant_file() - for status in self._vagrant.status(): - if status[1] == 'running': - self._vagrant.destroy(vm_name=status[0]) os.remove(self.m._config.config['molecule']['vagrantfile_file']) Would be nice if we can look at the state file's status vs another heavy call to `vagrant status`. def destroy(self): self._write_vagrant_file() + if self.m._state.get('created'): + self._vagrant.destroy() os.remove(self.m._config.config['molecule']['vagrantfile_file'])
codereview_python_data_4601
import pytest from tests.common.utils import raises, capture_out from hypothesis.internal.compat import print_unicode -from hypothesis.tests.cover.test_stateful import bad_machines @pytest.mark.parametrize( This should just be `from tests.cover.test_stateful import bad_machines` import pytest from tests.common.utils import raises, capture_out +from tests.cover.test_stateful import bad_machines from hypothesis.internal.compat import print_unicode @pytest.mark.parametrize(
codereview_python_data_4603
updated_goal = Goal(target='updated target', value='complete') objective_data.update(dict(name='an updated test objective', description='a test objective that has been updated', - goals=[updated_goal.schema.dump(test_goal)])) return objective_data Should this be `updated_goal.schema.dump(updated_goal)`? updated_goal = Goal(target='updated target', value='complete') objective_data.update(dict(name='an updated test objective', description='a test objective that has been updated', + goals=[updated_goal.schema.dump(updated_goal)])) return objective_data
codereview_python_data_4604
return '%s(%r)' % (self.__class__.__name__, dict(self.items())) def __str__(self): - return '%s' % (dict(self.items())) class LookupDict(dict): This could be more simply: ``` python def __str__(self): return str(dict(self.items())) ``` This isn't a merge blocker though. return '%s(%r)' % (self.__class__.__name__, dict(self.items())) def __str__(self): + return str(dict(self.items())) class LookupDict(dict):
codereview_python_data_4610
init_db_connection(app) messybrainz.db.init_db_engine(app.config['MESSYBRAINZ_SQLALCHEMY_DATABASE_URI']) - # Connections to external servers - @app.before_request - def before_reqeust(): - g.kafka = kafka_connection.init_kafka_connection(app.config['KAFKA_CONNECT']) - g.listenstore = listenstore_connection.init_listenstore(current_app.config['CASSANDRA_SERVER'], current_app.config['CASSANDRA_KEYSPACE']) - - # OAuth from webserver.login import login_manager, provider login_manager.init_app(app) Does this really make sense? So far only one of our endpoints talks to kafka, but this will require every connection to have a kafka handle. That's kinda kafkaesque, no? Maybe we should make this a decorator that we apply to kafka using endpoints. init_db_connection(app) messybrainz.db.init_db_engine(app.config['MESSYBRAINZ_SQLALCHEMY_DATABASE_URI']) # OAuth from webserver.login import login_manager, provider login_manager.init_app(app)
codereview_python_data_4619
name: str """Parameter name""" - optional: bool """Whether parameter is OPTIONAL or REQUIRED""" schema_type: s_types.Type Let's use `required: bool` instead for consistency with pointers and the cardinality inference machinery. name: str """Parameter name""" + required: bool """Whether parameter is OPTIONAL or REQUIRED""" schema_type: s_types.Type
codereview_python_data_4620
} def to_query(self): - if self.txid is None and self.output is None: - return None - else: - return [{'transaction_id': self.txid, - 'output_index': self.output}, - {'output_index': self.output, - 'transaction_id': self.txid}] def to_uri(self, path=''): if self.txid is None and self.output is None: Is it possible for just one of these to be `None`? } def to_query(self): + return [{'transaction_id': self.txid, + 'output_index': self.output}, + {'output_index': self.output, + 'transaction_id': self.txid}] def to_uri(self, path=''): if self.txid is None and self.output is None:
codereview_python_data_4621
raise TypeError('Data must be either Dask array or dataframe. Got %s.' % str(type(data))) -class _LGBMModel: def _fit(self, model_factory, X, y, sample_weight=None, group=None, client=None, **kwargs): if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)): raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask') if this check is being moved out of the constructor, then can you please put it in the `_predict()` function as well? If someone tries to load a saved `DaskLGBMClassifier` from a pickle file (for example) and then use its `.predict()` method, I think we also want them to get an informative error about `dask` not being available. They won't get an `ImportError` on `pickle.load()` because of the magic of `.compat`. raise TypeError('Data must be either Dask array or dataframe. Got %s.' % str(type(data))) +class _DaskLGBMModel: def _fit(self, model_factory, X, y, sample_weight=None, group=None, client=None, **kwargs): if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)): raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask')
codereview_python_data_4622
builds() - that is, a tuple of values and a dict of names: values. """ try: - spec = getfullargspec( - target.__init__ if inspect.isclass(target) and '__init__' in target.__dict__ else target) except TypeError: # pragma: no cover return None # self appears in the argspec of __init__ and bound methods, but it's an This line has become complex enough that I'd split it out into explicit cases: ```python if inspect.isclass(target) and hasattr(target, '__init__'): spec = getfullargspec(target.__init__) else: spec = getfullargspec(target) ``` (this will also fix the lint errors you're seeing) builds() - that is, a tuple of values and a dict of names: values. """ try: + if inspect.isclass(target) and hasattr(target, '__init__'): + spec = getfullargspec(target.__init__) + else: + spec = getfullargspec(target) except TypeError: # pragma: no cover return None # self appears in the argspec of __init__ and bound methods, but it's an
codereview_python_data_4623
response = self.client.get(creation_url) self.assertEqual(response.status_code, 200) - if sys.version_info[0] >= 3: - # Python 3: 'strings' - expected_re = b'^<html>.+<title>Events</title>.+Something happened.+<td>\\[&#39;foo&#39;, &#39;bar&#39;\\]</td>.+</html>$' - else: - # Python 2: u'strings' - expected_re = b'^<html>.+<title>Events</title>.+Something happened.+<td>\\[u&#39;foo&#39;, u&#39;bar&#39;\\]</td>.+</html>$' self.assertRegexpMatches(response.content, re.compile(expected_re, re.DOTALL)) def test_tag_as_str(self): We should make the output consistent between the 2 versions instead, I'd be in favor of dropping the `u` prefix in py2 output response = self.client.get(creation_url) self.assertEqual(response.status_code, 200) + expected_re = b'^<html>.+<title>Events</title>.+Something happened.+<td>\\[&#39;foo&#39;, &#39;bar&#39;\\]</td>.+</html>$' self.assertRegexpMatches(response.content, re.compile(expected_re, re.DOTALL)) def test_tag_as_str(self):
codereview_python_data_4624
for i in range(num_samples): grads0 = tf.constant(db_grad[i]) cg_opt.apply_gradients(zip([grads0], [var0])) - np.allclose(np.array(db_out[i]), var0.numpy()) @pytest.mark.usefixtures("maybe_run_functions_eagerly") ```suggestion np.testing.assert_allclose(np.array(db_out[i]), var0.numpy()) ``` np.allclose returns a boolean. for i in range(num_samples): grads0 = tf.constant(db_grad[i]) cg_opt.apply_gradients(zip([grads0], [var0])) + np.testing.assert_allclose( + np.array(db_out[i]), var0.numpy(), rtol=1e-06, atol=1e-06 + ) @pytest.mark.usefixtures("maybe_run_functions_eagerly")
codereview_python_data_4627
@api_bp.route('/2.0/', methods=['POST', 'GET']) @ratelimit() -@api_listenstore_needed def api_methods(): """ Receives both (GET & POST)-API calls and redirects them to appropriate methods. """ data = request.args if request.method == 'GET' else request.form method = data['method'].lower() if method in ('track.updatenowplaying', 'track.scrobble'): return record_listens(request, data) elif method == 'auth.getsession': future improvement - some methods in this endpoint need ts and some don't. If we plan to support this API into the future then we should allow submissions to work. This endpoint returns XML on failure not json @api_bp.route('/2.0/', methods=['POST', 'GET']) @ratelimit() def api_methods(): """ Receives both (GET & POST)-API calls and redirects them to appropriate methods. """ data = request.args if request.method == 'GET' else request.form method = data['method'].lower() + listenstore_required_methods = ['user.getinfo'] + if method in listenstore_required_methods and timescale_connection._ts is None: + raise InvalidAPIUsage(CompatError.SERVICE_UNAVAILABLE, output_format=data.get('format', "xml")) + if method in ('track.updatenowplaying', 'track.scrobble'): return record_listens(request, data) elif method == 'auth.getsession':
codereview_python_data_4629
logger = logging.getLogger() logger.info('load model from: {}'.format(pretrained)) - @auto_fp16(apply_to=('img', )) def forward_test(self, imgs, img_metas, **kwargs): for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if not isinstance(var, list): This line is unnecessary. logger = logging.getLogger() logger.info('load model from: {}'.format(pretrained)) def forward_test(self, imgs, img_metas, **kwargs): for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]: if not isinstance(var, list):
codereview_python_data_4636
@staticmethod def test_PDB_atom_repr(): - u = mda.Universe(PDB) # should execute without error u.atoms[0].__repr__() Can we use one of the cheaper PDB universes? This is 47k atoms iirc @staticmethod def test_PDB_atom_repr(): + u = mda.Universe(PDB_small) # should execute without error u.atoms[0].__repr__()
codereview_python_data_4649
_base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' -img_norm_cfg = dict( - mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) model = dict( pretrained='open-mmlab://msra/hrnetv2_w32', backbone=dict( This will not change the real img_norm_cfg used during training and testing because img_norm_cfg here is an intermediate variable, if you modify img_norm_cfg, you should also modify `train_pipeline` and `test_pipeline`. See configs/hrnet/fcos_hrnetv2p_w32_gn-head_mstrain_640-800_4x4_2x_coco.py _base_ = '../fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w32', backbone=dict(
codereview_python_data_4663
kwargs['proposals'] = kwargs['proposals'][0] return self.simple_test(imgs[0], img_metas[0], **kwargs) else: - assert imgs[0].size(0) == 1, 'aug test does not support batch ' \ - 'inference' # TODO: support test augmentation for predefined proposals assert 'proposals' not in kwargs return self.aug_test(imgs, img_metas, **kwargs) Add the batch size info in the AssertionError message. kwargs['proposals'] = kwargs['proposals'][0] return self.simple_test(imgs[0], img_metas[0], **kwargs) else: + assert imgs[0].size(0) == 1, 'aug test does not support ' \ + 'inference with batch size ' \ + f'{imgs[0].size(0)}' # TODO: support test augmentation for predefined proposals assert 'proposals' not in kwargs return self.aug_test(imgs, img_metas, **kwargs)
codereview_python_data_4665
class UnicodeProcessRunner(UnicodeProcessRunnerMixin, ProcessRunner): """ProcessRunner which always returns unicode output.""" - - def run_and_wait(self, *args, **kwargs): # pylint: disable=arguments-differ - """Overridden run_and_wait which always decodes the output.""" - result = ProcessRunner.run_and_wait(self, *args, **kwargs) - if result.output is not None: - result.output = utils.decode_to_unicode(result.output) - - return result Why did we create UnicodeProcessRunnerMixin ? it has the same run_and_wait as UnicodeProcessRunner ? class UnicodeProcessRunner(UnicodeProcessRunnerMixin, ProcessRunner): """ProcessRunner which always returns unicode output."""
codereview_python_data_4668
return {'new_feat': h} -class GraphOp(nn.Module): """ The Transition Down Module """ def __init__(self, in_channels, out_channels, n_neighbor=64): - super(GraphOp, self).__init__() self.frnn_graph = KNNGraphBuilder(n_neighbor) self.message = KNNMessage(n_neighbor) self.conv = KNNConv([in_channels, out_channels, out_channels]) Giving it the name `TransitionDown` is better than this information-less `GraphOp`. return {'new_feat': h} +class TransitionDown(nn.Module): """ The Transition Down Module """ def __init__(self, in_channels, out_channels, n_neighbor=64): + super(TransitionDown, self).__init__() self.frnn_graph = KNNGraphBuilder(n_neighbor) self.message = KNNMessage(n_neighbor) self.conv = KNNConv([in_channels, out_channels, out_channels])
codereview_python_data_4669
help='The schema of the graph') parser.add_argument('--num-parts', required=True, type=int, help='The number of partitions') parser.add_argument('--num-node-weights', required=True, type=int, help='The number of node weights used by METIS.') parser.add_argument('--workspace', type=str, default='/tmp', Remember to add package dependencies in the README. help='The schema of the graph') parser.add_argument('--num-parts', required=True, type=int, help='The number of partitions') +parser.add_argument('--num-ntypes', type=int, required=True, + help='The number of node types in the graph.') parser.add_argument('--num-node-weights', required=True, type=int, help='The number of node weights used by METIS.') parser.add_argument('--workspace', type=str, default='/tmp',
codereview_python_data_4674
logger = logging.getLogger() logger.setLevel(logging.INFO) -# Define a list of lambdas that are called by our Lambda. ACTIONS = { 'square': lambda x: x * x, 'square root': lambda x: math.sqrt(x), ...of **Lambda functions** that are....our **Lambda function** logger = logging.getLogger() logger.setLevel(logging.INFO) +# Define a list of Python lambda functions that are called by this AWS Lambda function. ACTIONS = { 'square': lambda x: x * x, 'square root': lambda x: math.sqrt(x),
codereview_python_data_4676
A dictionary containing the keys 'name' and 'ident' which are mapped to the 'name' and 'id' node elements in cyjs format. All other keys are ignored. Default is `None` which results in the default mapping - ``{name="name", ident="id"}``. Returns ------- Should this be ``if attrs is None``? A dictionary containing the keys 'name' and 'ident' which are mapped to the 'name' and 'id' node elements in cyjs format. All other keys are ignored. Default is `None` which results in the default mapping + ``dict(name="name", ident="id")``. Returns -------
codereview_python_data_4678
self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} # send cat ids to the get img id # in case we only need to train on several classes - if self.custom_classes: - self.img_ids = self.get_imgs_by_cat(catIds=self.cat_ids) - else: - self.img_ids = self.coco.getImgIds() data_infos = [] for i in self.img_ids: Is the condition necessary? self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} # send cat ids to the get img id # in case we only need to train on several classes + self.img_ids = self.get_subset_by_classes(class_ids=self.cat_ids) data_infos = [] for i in self.img_ids:
codereview_python_data_4682
mock_issue = self._make_mock_issue() issue_filer.update_issue_impact_labels(self.testcase, mock_issue) - six.assertCountEqual(self, ['Security_Impact-ExtendedStable'], mock_issue.labels.added) six.assertCountEqual(self, [], mock_issue.labels.removed) Can you update the function name and docstring to say "ExtendedStable" ? mock_issue = self._make_mock_issue() issue_filer.update_issue_impact_labels(self.testcase, mock_issue) + six.assertCountEqual(self, ['Security_Impact-Stable'], mock_issue.labels.added) six.assertCountEqual(self, [], mock_issue.labels.removed)
codereview_python_data_4687
class _Action(_Token): - code: str = None - help: str = None @classmethod def make(klass, s, loc, toks): A bit adjacent to this PR, is there a particular use case for this? class _Action(_Token): + code: ClassVar[str] + help: ClassVar[str] @classmethod def make(klass, s, loc, toks):
codereview_python_data_4689
e_repr = np.zeros((4, 5)) n_repr[[1, 3]] = 1 e_repr[[1, 3]] = 1 - n_repr = F.zerocopy_from_numpy(n_repr) - e_repr = F.zerocopy_from_numpy(e_repr) g.ndata['a'] = n_repr g.edata['a'] = e_repr Why change this? F.zeros should enable allocating the tensors on GPU for the corresponding unit tests. e_repr = np.zeros((4, 5)) n_repr[[1, 3]] = 1 e_repr[[1, 3]] = 1 + n_repr = F.copy_to(F.zerocopy_from_numpy(n_repr), F.ctx()) + e_repr = F.copy_to(F.zerocopy_from_numpy(e_repr), F.ctx()) g.ndata['a'] = n_repr g.edata['a'] = e_repr
codereview_python_data_4692
# limitations under the License. """Wrapper for Admin Directory API client.""" -from httplib2 import HttpLib2Error from googleapiclient import errors from google.cloud.forseti.common.gcp_api import _base_repository from google.cloud.forseti.common.gcp_api import api_helpers Why is this under httplib2? Should be above when alpha sorted. # limitations under the License. """Wrapper for Admin Directory API client.""" from googleapiclient import errors +from httplib2 import HttpLib2Error from google.cloud.forseti.common.gcp_api import _base_repository from google.cloud.forseti.common.gcp_api import api_helpers
codereview_python_data_4696
cloudsql_acl.ssl_enabled) should_raise_violation = ( - (is_instance_name_violated is not None and\ - is_instance_name_violated) and\ - (is_authorized_networks_violated is not None and\ is_authorized_networks_violated) and (is_ssl_enabled_violated is not None and is_ssl_enabled_violated)) nit: I think the backslashes are not needed here? cloudsql_acl.ssl_enabled) should_raise_violation = ( + (is_instance_name_violated is not None and + is_instance_name_violated) and + (is_authorized_networks_violated is not None and is_authorized_networks_violated) and (is_ssl_enabled_violated is not None and is_ssl_enabled_violated))
codereview_python_data_4697
overwrite_path_rvar: bool=False, pull_namespace: bool=True, flavor: str='normal', - aspects: Optional[Collection[str]]=None, ctx: context.CompilerContextLevel) -> pgast.PathRangeVar: """Ensure that *rvar* is visible in *stmt* as a value/source aspect. Alas, `Collection[str]` includes `str` itself, and I'm always uneasy when there's a signature like this. Perhaps `Tuple[str, ...] | AbstractSet[str] | None`? overwrite_path_rvar: bool=False, pull_namespace: bool=True, flavor: str='normal', + aspects: Optional[Tuple[str, ...] | AbstractSet[str]]=None, ctx: context.CompilerContextLevel) -> pgast.PathRangeVar: """Ensure that *rvar* is visible in *stmt* as a value/source aspect.
codereview_python_data_4701
create_node_and_not_start): node = create_node_and_not_start req_entry = build_txn_for_revoc_def_entry_by_default - req_handler = node.init_domain_req_handler() req_handler.apply(Request(**req_entry), int(time.time())) with pytest.raises(InvalidClientRequest, match="must be equal to the last accumulator value"): req_handler.validate(Request(**req_entry)) Why do we re-init it here? Should we just get already existent req handler? create_node_and_not_start): node = create_node_and_not_start req_entry = build_txn_for_revoc_def_entry_by_default + req_handler = node.get_req_handler(DOMAIN_LEDGER_ID) req_handler.apply(Request(**req_entry), int(time.time())) with pytest.raises(InvalidClientRequest, match="must be equal to the last accumulator value"): req_handler.validate(Request(**req_entry))
codereview_python_data_4704
# TODO: Reactor LIMITED_API struct decl closer to the static decl code.putln("#if CYTHON_COMPILING_IN_LIMITED_API") code.putln('typedef struct {') - code.putln('PyObject *__pyx_CyFunctionType;') - code.putln('PyObject *__pyx_FusedFunctionType;') code.putln('PyObject *%s;' % Naming.builtins_cname) code.putln('PyObject *%s;' % Naming.cython_runtime_cname) code.putln('PyObject *%s;' % Naming.empty_tuple) These look like good candidates for the `Naming` module now, which defines name constants that get reused across the code base. `Naming.cyfunction_type_cname` and `Naming.fusedfunction_type_cname` ? Also, their declaration should depend on `#ifdef __Pyx_CyFunction_USED` and `__Pyx_FusedFunction_USED`, i.e. actual usage of these types in the module. # TODO: Reactor LIMITED_API struct decl closer to the static decl code.putln("#if CYTHON_COMPILING_IN_LIMITED_API") code.putln('typedef struct {') code.putln('PyObject *%s;' % Naming.builtins_cname) code.putln('PyObject *%s;' % Naming.cython_runtime_cname) code.putln('PyObject *%s;' % Naming.empty_tuple)
codereview_python_data_4705
worker_count: int = 4, head_node_type: str = None, worker_node_type: str = None, - conda_environment: list = None, ): """ Prepare the cluster manager. It needs to know a few things: I would suggest renaming to `add_conda_packages` or smth similar worker_count: int = 4, head_node_type: str = None, worker_node_type: str = None, + add_conda_packages: list = None, ): """ Prepare the cluster manager. It needs to know a few things:
codereview_python_data_4712
super(CloudProvisioning, self).startup() self.client.start_taurus(self.test_id) self.log.info("Started cloud test: %s", self.client.results_url) - if not self.detach and self.client.results_url: if self.browser_open in ('start', 'both'): open_browser(self.client.results_url) Detaching Taurus from started test... super(CloudProvisioning, self).startup() self.client.start_taurus(self.test_id) self.log.info("Started cloud test: %s", self.client.results_url) + if self.client.results_url: if self.browser_open in ('start', 'both'): open_browser(self.client.results_url)
codereview_python_data_4713
(r"/settings", Settings), (r"/clear", ClearAll), (r"/options", Options), - (r"/options/dump", DumpOptions) ] settings = dict( template_path=os.path.join(os.path.dirname(__file__), "templates"), Really no strong opinion, just curious - why "dump" and not "save"? (r"/settings", Settings), (r"/clear", ClearAll), (r"/options", Options), + (r"/options/save", SaveOptions) ] settings = dict( template_path=os.path.join(os.path.dirname(__file__), "templates"),
codereview_python_data_4714
def test_requests_history_is_saved(self): r = requests.get('https://httpbin.org/redirect/5') - count = 0 for item in r.history: - assert len(item.history) == count - count = count + 1 class TestContentEncodingDetection(unittest.TestCase): Can we make this test a bit more rigorous? We should be able to assert that each item has the correct slice of the history as well. def test_requests_history_is_saved(self): r = requests.get('https://httpbin.org/redirect/5') + total = r.history[-1].history + i = 0 for item in r.history: + assert item.history == total[0:i] + i=i+1 class TestContentEncodingDetection(unittest.TestCase):
codereview_python_data_4722
def random_id(stream_arn, kinesis_shard_id): - if six.PY2: - kinesis_shard_id = kinesis_shard_id.encode('utf-8') namespace = uuid.UUID(bytes=hashlib.sha1(stream_arn.encode('utf-8')).digest()[:16]) - return uuid.uuid5(namespace, kinesis_shard_id).hex def shard_id(stream_arn, kinesis_shard_id): Can we simply use the following (and import `to_bytes` from `commons` at the top of the file`): ``` return uuid.uuid5(namespace, to_bytes(kinesis_shard_id)).hex ``` def random_id(stream_arn, kinesis_shard_id): namespace = uuid.UUID(bytes=hashlib.sha1(stream_arn.encode('utf-8')).digest()[:16]) + return uuid.uuid5(namespace, to_bytes(kinesis_shard_id)).hex def shard_id(stream_arn, kinesis_shard_id):
codereview_python_data_4723
self.cloudsql_instance = '{}-{}'.format( 'forseti-security', self.datetimestamp) - self.cloudsql_region = kwargs.get('cloudsql_region') or 'us-central1' # forseti_conf_server.yaml.in properties self.sendgrid_api_key = kwargs.get('sendgrid_api_key') After discussion with Joe, we should set these defaults in the flags. It will be easier to reference and centralized, rather than scattered throughout out the code. self.cloudsql_instance = '{}-{}'.format( 'forseti-security', self.datetimestamp) + self.cloudsql_region = kwargs.get('cloudsql_region') # forseti_conf_server.yaml.in properties self.sendgrid_api_key = kwargs.get('sendgrid_api_key')
codereview_python_data_4731
An HDF5 daily pricing file. """ return cls({ - country: HDF5DailyBarReader(h5_file[country]) for country in h5_file.keys() }) Should we use `HDF5DailyBarReader.from_file()`? An HDF5 daily pricing file. """ return cls({ + country: HDF5DailyBarReader.from_file(h5_file, country) for country in h5_file.keys() })
codereview_python_data_4732
total = self.squared_sum - self.sum * mean raw_scores = 1 - (self.res / total) raw_scores = tf.where( - tf.equal(raw_scores, float("-inf")), tf.zeros_like(raw_scores), raw_scores ) if self.multioutput == "raw_values": Probably you could use ``` raw_scores = tf.where(tf.math.is_inf(raw_scores), tf.zeros_like(raw_scores), raw_scores) ``` total = self.squared_sum - self.sum * mean raw_scores = 1 - (self.res / total) raw_scores = tf.where( + tf.math.is_inf(raw_scores), tf.zeros_like(raw_scores), raw_scores ) if self.multioutput == "raw_values":
codereview_python_data_4733
for name, info in executors.items(): for e in name.split(','): encoded_test = b64encode(info['command'].strip().encode('utf-8')) - creates_fact_relationship = dict(property1=ab['relationships'].split(',')[0].strip(), - relationship=ab['relationships'].split(',')[1].strip(), - property2=ab['relationships'].split(',')[2].strip(), - relationship_type='creates') if ab.get('relationships') else None - requires_fact_relationship = dict(property1=ab['requires'].split(',')[0].strip(), - relationship=ab['requires'].split(',')[1].strip(), - property2=ab['requires'].split(',')[2].strip(), - relationship_type='requires') if ab.get('requires') else None await self.create_ability(ability_id=ab.get('id'), tactic=ab['tactic'].lower(), technique_name=ab['technique']['name'], technique_id=ab['technique']['attack_id'], we should move this to a separate function for name, info in executors.items(): for e in name.split(','): encoded_test = b64encode(info['command'].strip().encode('utf-8')) + fact_relationships = await self._get_fact_relationships(ab) await self.create_ability(ability_id=ab.get('id'), tactic=ab['tactic'].lower(), technique_name=ab['technique']['name'], technique_id=ab['technique']['attack_id'],
codereview_python_data_4735
class Impact(object): """Represents impact on a build type.""" - def __init__(self, - version='', - likely=False, - extra_trace='', - milestone_only=False): self.version = str(version) self.likely = likely self.extra_trace = extra_trace - self.milestone_only = milestone_only def is_empty(self): """Return True if empty.""" Would prefer omitting this to keep the code simpler, since this is test only (and it doesn't provide much value their either, because you can infer that from the "version"). class Impact(object): """Represents impact on a build type.""" + def __init__(self, version='', likely=False, extra_trace=''): self.version = str(version) self.likely = likely self.extra_trace = extra_trace def is_empty(self): """Return True if empty."""
codereview_python_data_4736
passed value. """ for feature in self.features.values(): - try: # If the feature has the attribute, set it to the passed value setattr(feature, attr, value) - except AttributeError: - pass # For backwards compatibility, we support both colour and color. # As a quick hack, make "colour" set both "colour" and "color". I can see no obvious reason for the original code here checking if the value was different, versus just setting it as you do. Should be fine. passed value. """ for feature in self.features.values(): + if hasattr(track, attr): # If the feature has the attribute, set it to the passed value setattr(feature, attr, value) # For backwards compatibility, we support both colour and color. # As a quick hack, make "colour" set both "colour" and "color".
codereview_python_data_4749
def __init__(self, filename, **kwargs): super(XYZReader, self).__init__(filename, **kwargs) - - # the filename has been parsed to be either be foo.xyz or foo.xyz.bz2 by - # coordinates::core.py so the last file extension will tell us if it is - # bzipped or not - if util.isstream(self.filename): - root = str(self.filename) - ext = 'xyz' - else: - root, ext = os.path.splitext(self.filename) self.xyzfile = util.anyopen(self.filename) - self.compression = ext[1:] if ext[1:] != "xyz" else None self._cache = dict() self.ts = self._Timestep(self.n_atoms, **self._ts_kwargs) `root` in the old and new code are not the same. It might not matter here. However, a cleaner way would be to have our own `splitext` that works on streams and use it here. The best solution would be to think about `NamedStream` and see what makes sense with Python 3. def __init__(self, filename, **kwargs): super(XYZReader, self).__init__(filename, **kwargs) self.xyzfile = util.anyopen(self.filename) self._cache = dict() self.ts = self._Timestep(self.n_atoms, **self._ts_kwargs)
codereview_python_data_4772
return [qresult] def _unique_hit_id(self, hit_id, existing_ids, separator='_'): - """Return a unique hit id. (PRIVATE). Always append a numeric id to each hit as there may be multiple with the same id. """ Minor, but you can remove the full stop after "id" here. return [qresult] def _unique_hit_id(self, hit_id, existing_ids, separator='_'): + """Return a unique hit id (PRIVATE). Always append a numeric id to each hit as there may be multiple with the same id. """
codereview_python_data_4773
return st.just(param.default) # If there's no annotation and no default value, we check against a table # of guesses of simple strategies for common argument names. - if "string" in param.name: return st.text() for strategy, names in _GUESS_STRATEGIES_BY_NAME: if param.name in names: I'm not so sure this is a reliable guess. I feel like `as_string` (bool) might be a common failcase. And it might be substantially more confusing for ghostwriter to guess a bad strategy than not hazard a guess. This is your call, ultimately return st.just(param.default) # If there's no annotation and no default value, we check against a table # of guesses of simple strategies for common argument names. + if "string" in param.name and "as" not in param.name: return st.text() for strategy, names in _GUESS_STRATEGIES_BY_NAME: if param.name in names:
codereview_python_data_4792
# # Skeleton file for the Python "Bob" exercise, to get you coding more quickly. # -def hey(stimulus) return Could we have a less interesting argument name than `stimulus`? I'm worried that if we provide a name that is already good, people won't spend any time thinking about it themselves. Maybe something as terrible as `s` or `arg`? # # Skeleton file for the Python "Bob" exercise, to get you coding more quickly. # +def hey(what) return
codereview_python_data_4793
return res class CircularLR_beta(LR_Updater): - ''' - ??? highly unsure ??? - CLR learning rate updater, but not using pct percentage of data. - ''' def __init__(self, layer_opt, nb, div=10, pct=10, on_cycle_end=None, momentums=None): self.nb,self.div,self.pct,self.on_cycle_end = nb,div,pct,on_cycle_end self.cycle_nb = int(nb * (1-pct/100) / 2) Probably best to remove any docs you're not sure of return res class CircularLR_beta(LR_Updater): def __init__(self, layer_opt, nb, div=10, pct=10, on_cycle_end=None, momentums=None): self.nb,self.div,self.pct,self.on_cycle_end = nb,div,pct,on_cycle_end self.cycle_nb = int(nb * (1-pct/100) / 2)
codereview_python_data_4796
}, { 'source': 'gs://bucket-dbg/projects.json', - 'suffix': '_dbg', 'build_type': 'FUZZ_TARGET_BUILD_BUCKET_PATH', 'build_buckets': { 'afl': 'clusterfuzz-builds-afl-dbg', suffix feels too generic ? maybe name_suffix or config_suffix or something else ? }, { 'source': 'gs://bucket-dbg/projects.json', + 'job_suffix': '_dbg', 'build_type': 'FUZZ_TARGET_BUILD_BUCKET_PATH', 'build_buckets': { 'afl': 'clusterfuzz-builds-afl-dbg',
codereview_python_data_4812
@hook def receive_menu(self, menu, addrs, wallet): - keystore = wallet.get_keystore() if len(addrs) != 1: return for keystore in wallet.get_keystores(): This line is no longer needed as the loop below overwrites the value of `keystore`. @hook def receive_menu(self, menu, addrs, wallet): if len(addrs) != 1: return for keystore in wallet.get_keystores():
codereview_python_data_4817
i.split(split_by_first)[-1].split(split_by_second)[-1], ] ) - for i in content.split("+ python3 -m pytest -n=48 ") ) if len(full_comment) > 65_000: full_comment = full_comment[-65_000:] + "\n\n<b>Remaining output truncated<b>\n\n" Is it possible not to hardcode `-n=48` here? It may make it impossible to change number of workers which we had to do just recently because of ray race condition bug. Also servers with different number of CPUs may require different number of workers. i.split(split_by_first)[-1].split(split_by_second)[-1], ] ) + for i in content.split("+ python3 -m pytest ") ) if len(full_comment) > 65_000: full_comment = full_comment[-65_000:] + "\n\n<b>Remaining output truncated<b>\n\n"
codereview_python_data_4826
formatted='Server is throttling, reconnecting in {:d} seconds'.format(wait_time) ) time.sleep(wait_time) -# sys.exit() except PermaBannedException: bot.event_manager.emit( 'api_error', should be removed formatted='Server is throttling, reconnecting in {:d} seconds'.format(wait_time) ) time.sleep(wait_time) except PermaBannedException: bot.event_manager.emit( 'api_error',
codereview_python_data_4834
'atime': atime, 'redirect': redirect}) - if any(pattern.matches(url) - for pattern in config.val.history.exclude): - return if redirect: return self.completion.insert({ 'url': self._format_completion_url(url), nit: the redirect check could go first to avoid attempting pattern matches on redirects. 'atime': atime, 'redirect': redirect}) if redirect: return + if any(pattern.matches(url) + for pattern in config.val.completion.web_history.exclude): + return self.completion.insert({ 'url': self._format_completion_url(url),
codereview_python_data_4848
props.merge(self._get_load_props()) props.merge(self._get_scenario_props()) for key in sorted(props.keys()): - if isinstance(props[key], string_types): - self.env.add_java_param({"JAVA_OPTS": "-D%s='%s'" % (key, props[key])}) - else: - self.env.add_java_param({"JAVA_OPTS": "-D%s=%s" % (key, props[key])}) self.env.set({"NO_PAUSE": "TRUE"}) self.env.add_java_param({"JAVA_OPTS": self.settings.get("java-opts", None)}) maybe use %r here, since string with quote inside will break it again props.merge(self._get_load_props()) props.merge(self._get_scenario_props()) for key in sorted(props.keys()): + self.env.add_java_param({"JAVA_OPTS": "-D%s=%r" % (key, props[key])}) self.env.set({"NO_PAUSE": "TRUE"}) self.env.add_java_param({"JAVA_OPTS": self.settings.get("java-opts", None)})
codereview_python_data_4851
assert contents == ref_contents(fnames[index]) -alias_batch_size=64 -@pipeline_def(batch_size=alias_batch_size, device_id=0, num_threads=4) def file_pipe(file_op, file_list): files, labels = file_op(file_list=file_list) return files, labels I think that in general a smaller batch_size would do assert contents == ref_contents(fnames[index]) +batch_size_alias_test=64 +@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4) def file_pipe(file_op, file_list): files, labels = file_op(file_list=file_list) return files, labels
codereview_python_data_4858
access_target = None target_id = None user_can_grant_roles = True - firewall_rules_to_be_deleted = ["default-allow-icmp", "default-allow-internal", "default-allow-rdp", "default-allow-ssh"] def __init__(self, config, previous_installer=None): """Init. This exceeds the 80-char limiit. You can do: ``` firewall_rules_to_be_deleted = [ 'default-allow-icmp', 'default-allow-internal', 'default-allow-rdp', 'default-allow-ssh', ] ``` access_target = None target_id = None user_can_grant_roles = True def __init__(self, config, previous_installer=None): """Init.
codereview_python_data_4860
self.app_svc.application.router.add_route('POST', '/file/upload', self.upload_exfil_http) async def get_endpoint_by_access(self, request, endpoint): - allowed = [p for p in await self.auth_svc.get_permissions(request) if p in self.modules] - for a in allowed: try: - return await self.modules[a][endpoint](self, request) except Exception as e: self.log.debug(e) return await self.login(request) can you use better named variables (allowed and a) in this function? self.app_svc.application.router.add_route('POST', '/file/upload', self.upload_exfil_http) async def get_endpoint_by_access(self, request, endpoint): + access = [p for p in await self.auth_svc.get_permissions(request) if p in self.modules] + for module in access: try: + return await self.modules[module][endpoint](self, request) except Exception as e: self.log.debug(e) return await self.login(request)
codereview_python_data_4862
cmdutils.check_overflow(new_idx, 'int') self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx) - @cmdutils.register(instance='command-dispatcher', scope='window', - debug=True) @cmdutils.argument('choice', completion=miscmodels.suggest) def suggest(self, command: str, suggestions: str, choice: str): I don't think the `debug=True` makes sense here. I guess it'll just show up in the completion, no biggie. cmdutils.check_overflow(new_idx, 'int') self._tabbed_browser.tabBar().moveTab(cur_idx, new_idx) + @cmdutils.register(instance='command-dispatcher', scope='window') @cmdutils.argument('choice', completion=miscmodels.suggest) def suggest(self, command: str, suggestions: str, choice: str):
codereview_python_data_4870
response = self.client.datasets().get(projectId=dataset.project_id, datasetId=dataset.dataset_id).execute() if dataset.location is not None: - fetched_location = response.get('location', '') - if not fetched_location: - fetched_location = 'undefined' if dataset.location != fetched_location: - raise Exception('''Dataset already exists with regional location {}. Can't use {}.'''.format(fetched_location, dataset.location)) except http.HttpError as ex: if ex.resp.status == 404: If we want `'undefined'` as the default value then we should specify that instead of `''`... response = self.client.datasets().get(projectId=dataset.project_id, datasetId=dataset.dataset_id).execute() if dataset.location is not None: + fetched_location = response.get('location') if dataset.location != fetched_location: + raise Exception('''Dataset already exists with regional location {}. Can't use {}.'''.format( + fetched_location if fetched_location is not None else 'unspecified', + dataset.location)) except http.HttpError as ex: if ex.resp.status == 404:
codereview_python_data_4871
return getattr(self._handle, attr) def __enter__(self): - """Open File handle with WITH statement.""" return self def __exit__(self, type, value, traceback): - """Close File handle with WITH statement.""" self._handle.close() Repetitive use of the word 'with' here and in ``__exit__`` (same in ``bgzf.py`` but I didn't notice till now). How about: ```python """Special method called when opening the file using a with-statement.""" ``` and ```python """Special method called when closing the file using a with-statement.""" ``` return getattr(self._handle, attr) def __enter__(self): + """Call special method when opening the file using a with-statement.""" return self def __exit__(self, type, value, traceback): + """Call special method when closing the file using a with-statement.""" self._handle.close()
codereview_python_data_4877
max_clique = max(max_clique,len(clique)) return max_clique - 1 def chordal_simplicial_vertex(G): """Returns the simplicial vertex of the chordal graph G. - Parameters ---------- G : graph Use the decorator `@not_implemnted_for('directed','multigraph')` here. This will raise `NetworkXNotImplemented`, which seems much more relevant. max_clique = max(max_clique,len(clique)) return max_clique - 1 +@not_implemented_for('directed','multigraph') def chordal_simplicial_vertex(G): """Returns the simplicial vertex of the chordal graph G. + Parameters ---------- G : graph
codereview_python_data_4885
GTTGCTTCTGGCGTGGGTGGGGGGG <BLANKLINE> """ - in_mode = "rb" if in_format in _BinaryFormats else "rU" out_mode = "wb" if out_format in _BinaryFormats else "w" I believe `"rU"` should be `"r"`. GTTGCTTCTGGCGTGGGTGGGGGGG <BLANKLINE> """ + in_mode = "rb" if in_format in _BinaryFormats else "r" out_mode = "wb" if out_format in _BinaryFormats else "w"
codereview_python_data_4889
self._on_msg(self.decode(msg)) -class SimpleJupyterComm(Comm): """ - SimpleJupyterComm provides a Comm for simple unidirectional communication from the python process to a frontend. The Comm is opened before the first event is sent to the frontend. """ How about calling it `JupyterPushComm`? From the sound of it, we may not need it in future one bokeh works with bi-directional comms. self._on_msg(self.decode(msg)) +class JupyterPushComm(Comm): """ + JupyterPushComm provides a Comm for simple unidirectional communication from the python process to a frontend. The Comm is opened before the first event is sent to the frontend. """
codereview_python_data_4892
f.write('value = 1\n') - # The number of dependencies should be 2: "a.pxd" and "a.pyx" - self.assertEqual(2, len(dep_tree.all_dependencies(a_pyx))) # Cythonize to create a.c fresh_cythonize(a_pyx) So, why not check for the two file names instead of just their count? f.write('value = 1\n') + # The dependencies for "a.pyx" are "a.pxd" and "a.pyx". + self.assertEqual({a_pxd, a_pyx}, dep_tree.all_dependencies(a_pyx)) # Cythonize to create a.c fresh_cythonize(a_pyx)
codereview_python_data_4903
try: avg_size_of_message //= num_of_messages except ZeroDivisionError: current_app.logger.warn("No messages calculated", exc_info=True) current_app.logger.info("Done!") If you get to this line avg_size_of_message is an undefined value, yet you use it below. You you should set this value to something in the exception block. try: avg_size_of_message //= num_of_messages except ZeroDivisionError: + avg_size_of_message = 0 current_app.logger.warn("No messages calculated", exc_info=True) current_app.logger.info("Done!")
codereview_python_data_4905
no_cmd_split: If true, ';;' to split sub-commands is ignored. backend: Which backend the command works with (or None if it works with both) - no_replace_variables: Whether or not to replace variables like {url} _qute_args: The saved data from @cmdutils.argument _needs_js: Whether the command needs javascript enabled _modes: The modes the command can be executed in. I think "Don't replace variables ..." would be cleaner. no_cmd_split: If true, ';;' to split sub-commands is ignored. backend: Which backend the command works with (or None if it works with both) + no_replace_variables: Don't replace variables like {url} _qute_args: The saved data from @cmdutils.argument _needs_js: Whether the command needs javascript enabled _modes: The modes the command can be executed in.
codereview_python_data_4906
default value of ``False``. """ def __init__(self, *args, **kwargs): super(BoolParameter, self).__init__(*args, **kwargs) if self._default == _no_value: self._default = False What is the importance of `nargs` and `const`? default value of ``False``. """ + improved_parsing = False + def __init__(self, *args, **kwargs): + self.improved_parsing = kwargs.pop("improved_parsing", self.__class__.improved_parsing) super(BoolParameter, self).__init__(*args, **kwargs) if self._default == _no_value: self._default = False
codereview_python_data_4911
def main(): """ Main function """ - pub = '9RaWxppkP9UyYWA7NJb5FcgkzfJNPfvPX3FCNw2T5Pwb' asset = Asset(None, 'e6969f87-4fc9-4467-b62a-f0dfa1c85002') - tx = Transaction.create([pub], [([pub], 1)], asset=asset) tx_json = json.dumps(tx.to_dict(), indent=2, sort_keys=True) base_path = os.path.join(os.path.dirname(__file__), Maybe use a more descriptive name for this variable (currently `pub`). Elsewhere in BigchainDB, I often see `pubkey` or `public_key`. def main(): """ Main function """ + pubkey = '9RaWxppkP9UyYWA7NJb5FcgkzfJNPfvPX3FCNw2T5Pwb' asset = Asset(None, 'e6969f87-4fc9-4467-b62a-f0dfa1c85002') + tx = Transaction.create([pubkey], [([pubkey], 1)], asset=asset) tx_json = json.dumps(tx.to_dict(), indent=2, sort_keys=True) base_path = os.path.join(os.path.dirname(__file__),
codereview_python_data_4916
@staticmethod def get_file_handler(file_name): file_fmt = Formatter("[%(asctime)s %(levelname)s %(name)s] %(message)s") - file_handler = logging.FileHandler(file_name) file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(file_fmt) return file_handler Why change root log level? @staticmethod def get_file_handler(file_name): file_fmt = Formatter("[%(asctime)s %(levelname)s %(name)s] %(message)s") + file_handler = logging.FileHandler(filename=file_name, encoding="utf-8") file_handler.setLevel(logging.DEBUG) file_handler.setFormatter(file_fmt) return file_handler
codereview_python_data_4925
def authenticate_presign_url_signv4(method, path, headers, data, url, query_params, request_dict): # Calculating Signature - LOGGER.debug('Calculating the version 4 signature.') aws_request = create_request_object(request_dict) ReadOnlyCredentials = namedtuple('ReadOnlyCredentials', ['access_key', 'secret_key', 'token']) I'd say let's remove this log output (to avoid being too verbose). def authenticate_presign_url_signv4(method, path, headers, data, url, query_params, request_dict): # Calculating Signature aws_request = create_request_object(request_dict) ReadOnlyCredentials = namedtuple('ReadOnlyCredentials', ['access_key', 'secret_key', 'token'])
codereview_python_data_4926
# IO of PDB files (including flexible selective output) from .PDBIO import PDBIO, Select -from .MMCIFIO import MMCIFIO - # Some methods to eg. get a list of Residues # from a list of Atoms. from . import Selection Is this needed? The ``Bio.PDB`` module already does too many automatic imports (and name shadowing) so I'd prefer to avoid it. # IO of PDB files (including flexible selective output) from .PDBIO import PDBIO, Select # Some methods to eg. get a list of Residues # from a list of Atoms. from . import Selection
codereview_python_data_4935
def test_percentchange(self, seed_value, window_length): pct_change = PercentChange( - inputs=(), window_length=window_length, ) I think we still probably want a length-1 list of inputs here rather than an empty tuple. Ideally, we'd use `SingleInputMixin` to require exactly one input. def test_percentchange(self, seed_value, window_length): pct_change = PercentChange( + inputs=[EquityPricing.close, ], window_length=window_length, )
codereview_python_data_4943
# add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import sys import sphinx from sphinx.errors import VersionRequirementError -import datetime curr_path = os.path.dirname(os.path.realpath(__file__)) libpath = os.path.join(curr_path, '../python-package/') Please move this import upper to other similar imports # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # +import datetime import os import sys import sphinx + from sphinx.errors import VersionRequirementError curr_path = os.path.dirname(os.path.realpath(__file__)) libpath = os.path.join(curr_path, '../python-package/')
codereview_python_data_4953
def __init__(self, mode): super(CornerPool, self).__init__() assert mode in self.pool_functions - if torch.__version__ >= '1.5.0': - self.corner_pool = self.cummax_dim_flip[mode] - else: - self.corner_pool = self.pool_functions[mode] def forward(self, x): if torch.__version__ >= '1.5.0': - dim, flip = self.corner_pool if flip: x = x.flip(dim) pool_tensor, _ = torch.cummax(x, dim=dim) This is quite weird. I suggest saving `mode` and get dim and flip in `forward()`. def __init__(self, mode): super(CornerPool, self).__init__() assert mode in self.pool_functions + self.mode = mode + self.corner_pool = self.pool_functions[mode] def forward(self, x): if torch.__version__ >= '1.5.0': + dim, flip = self.cummax_dim_flip[self.mode] if flip: x = x.flip(dim) pool_tensor, _ = torch.cummax(x, dim=dim)
codereview_python_data_4960
self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN) if self.data_init and self.is_rnn: - print( - "WeightNormalization: Using `data_init=True` with RNNs is not advised" - ) def build(self, input_shape): """Build `Layer`""" Might be better to use logging here and emit a WARN. Also could you expand on the rationale for the user (maybe just note that its advised from the paper). self.is_rnn = isinstance(self.layer, tf.keras.layers.RNN) if self.data_init and self.is_rnn: + logging.warn( + "WeightNormalization: Using `data_init=True` with RNNs " + "is advised against by the paper. Use `data_init=False`.") def build(self, input_shape): """Build `Layer`"""
codereview_python_data_4966
def handle_put_rule(data): schedule = data.get('ScheduleExpression') - enabled = True - if data.get('State') and data.get('State') == 'DISABLED': - enabled = False if schedule: job_func = get_scheduled_rule_func(data) nitpick: lines 85-87 could be simplified to: ``` enabled = data.get('State') != 'DISABLED' ``` def handle_put_rule(data): schedule = data.get('ScheduleExpression') + enabled = data.get('State') != 'DISABLED' if schedule: job_func = get_scheduled_rule_func(data)
codereview_python_data_4967
__all__ = ['gsddmm', 'copy_u', 'copy_v', 'copy_e'] def reshape_lhs_rhs(lhs_data, rhs_data): - r""" Reshape the dimension of lhs and rhs data Parameters ---------- can you give an example of the output shape? it seems you add a new dimension in the tensor, but the value of the added dimension isn't 1. __all__ = ['gsddmm', 'copy_u', 'copy_v', 'copy_e'] def reshape_lhs_rhs(lhs_data, rhs_data): + r""" Expand dims so that there will be no broadcasting issues with different + number of dimensions. For example, given two shapes (N, 3, 1), (E, 5, 3, 4) + that are valid broadcastable shapes, change them to (N, 1, 3, 1) and + (E, 5, 3, 4) Parameters ----------
codereview_python_data_4972
""" proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme if proxy: proxy_scheme = urlparse(proxy).scheme You conditionally create `proxy_scheme` but unconditionally reference it here unless my coffee hasn't kicked in. Shouldn't this block look like: ``` py proxy_scheme = '' if proxy: proxy_scheme = urlparse(proxy).scheme if proxy and proxy_scheme.lower().startswith('socks'): url = request.path_url # ... ``` """ proxy = select_proxy(request.url, proxies) scheme = urlparse(request.url).scheme + proxy_scheme = '' if proxy: proxy_scheme = urlparse(proxy).scheme
codereview_python_data_4978
# Initalizing THttpClient may raise an exception if proxy settings # are used but the port number is not a valid integer. pass - finally: - # Thrift do not handle the use case when invalid proxy format is - # used (e.g.: no schema is specified). For this reason we need to - # verify the proxy format in our side. - self._validate_proxy_format() self.protocol = TJSONProtocol.TJSONProtocol(self.transport) self.client = None If THttpClient throws TTransportException for some reason but not because something bad with proxy settings then this finally branch try to validate proxy settings. Please call self._validate_proxy_format() in the ValueError branch not in the finally branch. # Initalizing THttpClient may raise an exception if proxy settings # are used but the port number is not a valid integer. pass + + # Thrift do not handle the use case when invalid proxy format is + # used (e.g.: no schema is specified). For this reason we need to + # verify the proxy format in our side. + self._validate_proxy_format() self.protocol = TJSONProtocol.TJSONProtocol(self.transport) self.client = None
codereview_python_data_4981
G = nx.cycle_graph(7) C = [[0, 1, 6], [0, 1, 5]] b = nx.group_betweenness_centrality( - G, C, weight=None, endpoints=False, normalized=False ) b_answer = [0.0, 6.0] assert b == b_answer Do you need to put `endpoints=False` on these tests? It is the default. And can you why the answer is not the same value (2.0 instead of 1.0) as the old function? G = nx.cycle_graph(7) C = [[0, 1, 6], [0, 1, 5]] b = nx.group_betweenness_centrality( + G, C, weight=None, normalized=False ) b_answer = [0.0, 6.0] assert b == b_answer
codereview_python_data_4983
"""Get machine ID""" return self._machine_id - @property - def num_clients(self): - """Get total number of clients""" - return self._num_clients - def barrier(self): """Barrier for all client nodes. is this necessary? we can call rpc.get_num_client to get the number of clients. """Get machine ID""" return self._machine_id def barrier(self): """Barrier for all client nodes.
codereview_python_data_4986
with tf.compat.v1.Session() as sess: sess.run(initializers) - try: for _ in range(iterations): dataset_results.append(sess.run(ops_to_run)) - except tf.errors.OutOfRangeError: - if to_stop_iter: - return dataset_results - else: - raise return dataset_results This try/except could be a helper function, and be reused. with tf.compat.v1.Session() as sess: sess.run(initializers) + with expect_iter_end(not to_stop_iter, tf.errors.OutOfRangeError): for _ in range(iterations): dataset_results.append(sess.run(ops_to_run)) return dataset_results
codereview_python_data_4994
-# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. this is already in consensus/poet/core/sawtooth_poet/journal/block_wrapper.py isn't it? +# Copyright 2018 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License.
codereview_python_data_4995
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4458-SEA 1645545158 1857731548</p> <hr> <p>Varnish cache server</p> </body> User `Timer` instead? Though I understand the time measured this way is correct in end-to-end setting. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4465-SEA 1645545158 345716483</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_5000
# filter and crop the masks if 'gt_masks' in results: valid_gt_masks = [] - for i, is_valid in enumerate(valid_inds): - if is_valid: - gt_mask = results['gt_masks'][i] - valid_gt_masks.append( - gt_mask[crop_y1:crop_y2, crop_x1:crop_x2]) results['gt_masks'] = valid_gt_masks return results This may look simpler. ```python for i in np.where(valid_inds)[0]: gt_mask = results['gt_masks'][i][crop_y1:crop_y2, crop_x1: crop_x2] valid_gt_masks.append(gt_mask) ``` # filter and crop the masks if 'gt_masks' in results: valid_gt_masks = [] + for i in np.where(valid_inds)[0]: + gt_mask = results['gt_masks'][i][crop_y1:crop_y2, crop_x1: + crop_x2] + valid_gt_masks.append(gt_mask) results['gt_masks'] = valid_gt_masks return results
codereview_python_data_5009
'stream parameter of the same name' % v) return mapping - def _listener(self, *events): self._memoize = not any(e.type == 'triggered' for e in events) self.trigger([self]) self._memoize = True I think it might be clearer if this method were called `watcher`. 'stream parameter of the same name' % v) return mapping + def _watcher(self, *events): self._memoize = not any(e.type == 'triggered' for e in events) self.trigger([self]) self._memoize = True
codereview_python_data_5018
else: raise TypeError('eval_sample_weight, eval_class_weight, eval_init_score, and eval_group should be dict or list') valid_weight = get_meta_data(eval_sample_weight, i) - if self.class_weight is not None: valid_class_sample_weight = _LGBMComputeSampleWeight(get_meta_data(eval_class_weight, i), valid_data[1]) if valid_weight is None or len(valid_weight) == 0: valid_weight = valid_class_sample_weight @guolinke I suppose you mean here: `if get_meta_data(eval_class_weight, i) is not None: ` else: raise TypeError('eval_sample_weight, eval_class_weight, eval_init_score, and eval_group should be dict or list') valid_weight = get_meta_data(eval_sample_weight, i) + if get_meta_data(eval_class_weight, i) is not None: valid_class_sample_weight = _LGBMComputeSampleWeight(get_meta_data(eval_class_weight, i), valid_data[1]) if valid_weight is None or len(valid_weight) == 0: valid_weight = valid_class_sample_weight
codereview_python_data_5032
"hyperframe>=5.0, <6", "jsbeautifier>=1.6.3, <1.7", "kaitaistruct>=0.7, <0.8", - "ldap3>=2.2.0, <2.2.1", "passlib>=1.6.5, <1.8", "pyasn1>=0.1.9, <0.3", "pyOpenSSL>=16.0, <17.1", Is there any issue with 2.2.3? If not this should be `<2.3`. "hyperframe>=5.0, <6", "jsbeautifier>=1.6.3, <1.7", "kaitaistruct>=0.7, <0.8", + "ldap3>=2.2.0, <2.2.3", "passlib>=1.6.5, <1.8", "pyasn1>=0.1.9, <0.3", "pyOpenSSL>=16.0, <17.1",
codereview_python_data_5039
'%s%s' % (constants.COLLECT_DATA_FLOW_FLAG, dataflow_binary_path)) fuzzing_strategies.append(strategy.DATAFLOW_TRACING_STRATEGY.name) else: - logs.log_warn('Fuzz target is not found in dataflow build.') # DataFlow Tracing requires fork mode, always use it with DFT strategy. if use_dataflow_tracing or strategy_pool.do_strategy(strategy.FORK_STRATEGY): log_error as otherwise we will never see it on our dashboards. also, can add ", skip strategy" in message. '%s%s' % (constants.COLLECT_DATA_FLOW_FLAG, dataflow_binary_path)) fuzzing_strategies.append(strategy.DATAFLOW_TRACING_STRATEGY.name) else: + logs.log_error( + 'Fuzz target is not found in dataflow build, skiping strategy.') # DataFlow Tracing requires fork mode, always use it with DFT strategy. if use_dataflow_tracing or strategy_pool.do_strategy(strategy.FORK_STRATEGY):
codereview_python_data_5040
params = dict(get_param_values(element), kdims=[x, y], datatype=['xarray'], bounds=bounds) - if self.vdim_prefix is not None: kdim_list = '_'.join(str(kd) for kd in params['kdims']) vdim_prefix = self.vdim_prefix.format(kdims=kdim_list) else: Not real different but no point doing the kdims join if there is no prefix string: ```suggestion if self.vdim_prefix: ``` params = dict(get_param_values(element), kdims=[x, y], datatype=['xarray'], bounds=bounds) + if self.vdim_prefix: kdim_list = '_'.join(str(kd) for kd in params['kdims']) vdim_prefix = self.vdim_prefix.format(kdims=kdim_list) else:
codereview_python_data_5054
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4479-SEA 1645523384 2894604451</p> <hr> <p>Varnish cache server</p> </body> Instead of `success`, this should be more clear as `is_success`, so that we can avoid something where `success = False` <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4434-SEA 1645523384 2041687414</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_5059
Inquirer()._ethereum = ethereum @staticmethod - def is_cache_valid(cache: CachedPriceEntry) -> bool: - return ts_now() - cache.time <= CURRENT_PRICE_CACHE_SECS @staticmethod def set_oracles_order(oracles: List[CurrentPriceOracle]) -> None: Since you went ahead and made a function then why not just copy all logic inside the function? Pass the `cache_key` here and add both the cache retrieval and the check for `None` in the function itself. Inquirer()._ethereum = ethereum @staticmethod + def get_cached_price_entry(cache_key: Tuple[Asset, Asset]) -> Optional[CachedPriceEntry]: + cache = Inquirer()._cached_current_price.get(cache_key, None) + if cache is None or ts_now() - cache.time > CURRENT_PRICE_CACHE_SECS: + return None + + return cache @staticmethod def set_oracles_order(oracles: List[CurrentPriceOracle]) -> None:
codereview_python_data_5066
self.logger.info("Not a valid room url.") return - if room["status"] != 4: - self.logger.info("Stream current unavailable.") return hls_url = "http://dlhls.cdn.zhanqi.tv/zqlive/{room[videoId]}_1024/index.m3u8?Dnion_vsnae={room[videoId]}".format(room=room) Why remove the constant? Seems to me that having the code read `room["status"] != STATUS_ONLINE` is more understandable than `room["status"] != 4`. self.logger.info("Not a valid room url.") return + if room["status"] != STATUS_ONLINE: + self.logger.info("Stream currently unavailable.") return hls_url = "http://dlhls.cdn.zhanqi.tv/zqlive/{room[videoId]}_1024/index.m3u8?Dnion_vsnae={room[videoId]}".format(room=room)
codereview_python_data_5083
from future import standard_library standard_library.install_aliases() -from urllib import parse from config import db_config from datastore import data_handler we usually do "import urllib.parse" in other places so that a generic parse function can't conflict. from future import standard_library standard_library.install_aliases() +import urllib.parse from config import db_config from datastore import data_handler
codereview_python_data_5094
## Load our local_settings try: SETTINGS_MODULE = os.environ['GRAPHITE_SETTINGS_MODULE'] -except: SETTINGS_MODULE = 'graphite.local_settings' try: Agreed with Codacy here - please specify exception type here ## Load our local_settings try: SETTINGS_MODULE = os.environ['GRAPHITE_SETTINGS_MODULE'] +except KeyError: SETTINGS_MODULE = 'graphite.local_settings' try:
codereview_python_data_5099
name = models.CharField(_('Name'), max_length=255, db_index=True) description = models.TextField(_('Description'), blank=True) - meta_title = models.CharField(_('Meta Title'), max_length=255, db_index=True) meta_description = models.TextField(_('Meta Description'), blank=True) image = models.ImageField(_('Image'), upload_to='categories', blank=True, null=True, max_length=255) I don't think we need a `db_index` here - this is not a field we will be querying/filtering on. We also need to allow this to be blank. name = models.CharField(_('Name'), max_length=255, db_index=True) description = models.TextField(_('Description'), blank=True) + meta_title = models.CharField(_('Meta Title'), max_length=255, blank=True, db_index=True) meta_description = models.TextField(_('Meta Description'), blank=True) image = models.ImageField(_('Image'), upload_to='categories', blank=True, null=True, max_length=255)
codereview_python_data_5104
else: msg = f'Unexpected {token.text()!r}' elif rule == 'for iterator': - msg = ("Complex expressions in a FOR query must be " - "parenthesized") if i > 0: context = pctx.merge_context([ ```suggestion msg = ("Complex expressions in a FOR iterator clause must be " ``` else: msg = f'Unexpected {token.text()!r}' elif rule == 'for iterator': + msg = ("Complex expressions in a FOR iterator clause must " + "be parenthesized") if i > 0: context = pctx.merge_context([
codereview_python_data_5105
def _format_msg(msg: str) -> str: """Convert message to HTML suitable for rendering.""" - ret = msg - ret = html.escape(ret) - ret = ret.strip() - ret = ret.replace('\n', '<br />') - return ret def javascript_confirm(url, js_msg, abort_on): Why not use `msg` directly below? def _format_msg(msg: str) -> str: """Convert message to HTML suitable for rendering.""" + return html.escape(msg).strip().replace('\n', '<br />') def javascript_confirm(url, js_msg, abort_on):