id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_6437
for ace_contig in Ace.parse(source): # Convert the ACE contig record into a SeqRecord... consensus_seq_str = ace_contig.sequence - # Assume its DNA unless there is a U in it, - molecule_type = "DNA" - if "U" in consensus_seq_str and "T" not in consensus_seq_str: - molecule_type = "RNA" if "*" in consensus_seq_str: # For consistency with most other file formats, map If it's not written in the file, don't set the `molecule_type`. for ace_contig in Ace.parse(source): # Convert the ACE contig record into a SeqRecord... consensus_seq_str = ace_contig.sequence if "*" in consensus_seq_str: # For consistency with most other file formats, map
codereview_python_data_6442
assert not bias assert in_channels % groups == 0, \ - f'in_channels {in_channels} cannot be divisible by groups {groups}' assert out_channels % groups == 0, \ - f'out_channels {out_channels} cannot be divisible ' \ f'by groups {groups}' self.in_channels = in_channels `cannot be` -> `is not` assert not bias assert in_channels % groups == 0, \ + f'in_channels {in_channels} is not divisible by groups {groups}' assert out_channels % groups == 0, \ + f'out_channels {out_channels} is not divisible ' \ f'by groups {groups}' self.in_channels = in_channels
codereview_python_data_6446
The value of each BigchainDB Server configuration setting is determined according to the following rules: -* If it’s set by an environment variable, then use that value -* Otherwise, if it’s set in a local config file, then use that value * Otherwise, use the default value (contained in ``bigchaindb.__init__``) Might sound OCD-ish, but can we use `'` instead of `'`? I say that because we never used `'` in docstrings. The value of each BigchainDB Server configuration setting is determined according to the following rules: +* If it's set by an environment variable, then use that value +* Otherwise, if it's set in a local config file, then use that value * Otherwise, use the default value (contained in ``bigchaindb.__init__``)
codereview_python_data_6452
self.barrier() shape = list(shape) # One of the clients in each machine will issue requests to the local server. - if self._client_id % part_policy.partition_book.num_partitions() == 0: part_shape = shape.copy() part_shape[0] = part_policy.get_data_size() request = InitDataRequest(name, we can use:` if self._client_id % self._machine_count == 0` self.barrier() shape = list(shape) # One of the clients in each machine will issue requests to the local server. + num_clients_per_part = rpc.get_num_client() / part_policy.partition_book.num_partitions() + if self._client_id % num_clients_per_part == 0: part_shape = shape.copy() part_shape[0] = part_policy.get_data_size() request = InitDataRequest(name,
codereview_python_data_6453
# See the License for the specific language governing permissions and # limitations under the License. # -from .getter import get_config from .cfg_parser import LuigiConfigParser from .toml_parser import LuigiTomlParser -__all__ = ['get_config', 'LuigiConfigParser', 'LuigiTomlParser'] Shouldn't these dates include 2018. Probably not that important # See the License for the specific language governing permissions and # limitations under the License. # +from .getter import get_config, add_config_path from .cfg_parser import LuigiConfigParser from .toml_parser import LuigiTomlParser +__all__ = [ + 'add_config_path', + 'get_config', + 'LuigiConfigParser', + 'LuigiTomlParser', +]
codereview_python_data_6461
'B' * config.FILE_TRANSFER_CHUNK_SIZE + 'C' * (config.FILE_TRANSFER_CHUNK_SIZE / 2)) -TEST_BUCKET_BUNDLE = 'cf_test_bundle' def _dirs_equal(dircmp): nit: maybe just bundle-bucket to match others ? 'B' * config.FILE_TRANSFER_CHUNK_SIZE + 'C' * (config.FILE_TRANSFER_CHUNK_SIZE / 2)) +TEST_BUNDLE_BUCKET = 'clusterfuzz-test-bundle' def _dirs_equal(dircmp):
codereview_python_data_6463
def calculate(self, basket): base_charge = self.method.calculate(basket) - discount = self.offer.shipping_discount(base_charge.excl_tax, basket.currency) excl_tax = base_charge.excl_tax - discount return prices.Price( currency=base_charge.currency, I think passing `base_charge.currency` may be better for consistency, in all these methods. def calculate(self, basket): base_charge = self.method.calculate(basket) + discount = self.offer.shipping_discount(base_charge.excl_tax, base_charge.currency) excl_tax = base_charge.excl_tax - discount return prices.Price( currency=base_charge.currency,
codereview_python_data_6467
-import os, sys, math, time, uuid, json, warnings from unittest import SkipTest -import numpy as np - -from matplotlib import pyplot as plt -from matplotlib.backends.backend_nbagg import CommSocket, new_figure_manager_given_figure try: import IPython Have you tested this with IPython 3? As we sure this will be stable? +import os, sys, math, time, uuid, json from unittest import SkipTest +try: + from matplotlib.backends.backend_nbagg import CommSocket, new_figure_manager_given_figure +except: + CommSocket = object try: import IPython
codereview_python_data_6470
def abbreviate(words): - regex = '[A-Z]+[\'a-z]*|[\'a-z]+' return ''.join(word[0].upper() for word in re.findall(regex, words)) Again, switching to double quotes for this string literal removes the need for escaping apostrophes. def abbreviate(words): + regex = "[A-Z]+['a-z]*|['a-z]+" return ''.join(word[0].upper() for word in re.findall(regex, words))
codereview_python_data_6475
'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}').format(custom_artifacts_path, exception)) - setattr(configuration_object, '_artifacts_registry', registry) setattr(configuration_object, '_artifact_definitions_path', artifacts_path) setattr( configuration_object, '_custom_artifacts_path', custom_artifacts_path) This is not a configuration value I opt we solve this differently. 'Unable to read artifact definitions from: {0:s} with error: ' '{1!s}').format(custom_artifacts_path, exception)) setattr(configuration_object, '_artifact_definitions_path', artifacts_path) setattr( configuration_object, '_custom_artifacts_path', custom_artifacts_path)
codereview_python_data_6477
self.log = logging.getLogger('') self.prev_errors = BetterDict() self.cur_errors = BetterDict() - self.treat_errors = True def _get_err_diff(self): # find diff of self.prev_errors and self.cur_errors can we refactor out this if-else? looks too nested... self.log = logging.getLogger('') self.prev_errors = BetterDict() self.cur_errors = BetterDict() + self.handle_errors = True def _get_err_diff(self): # find diff of self.prev_errors and self.cur_errors
codereview_python_data_6481
def get_container_network_for_lambda(): global LAMBDA_CONTAINER_NETWORK - if not config.LAMBDA_DOCKER_NETWORK and LAMBDA_CONTAINER_NETWORK is None: try: if config.is_in_docker: networks = DOCKER_CLIENT.get_networks(bootstrap.get_main_container_name()) tiny nit: We could use the "Return Early" pattern here to slightly untangle the combined `if` checks: ``` if config.LAMBDA_DOCKER_NETWORK: return config.LAMBDA_DOCKER_NETWORK if LAMBDA_CONTAINER_NETWORK is None: ... return LAMBDA_CONTAINER_NETWORK def get_container_network_for_lambda(): global LAMBDA_CONTAINER_NETWORK + if config.LAMBDA_DOCKER_NETWORK: + return config.LAMBDA_DOCKER_NETWORK + if LAMBDA_CONTAINER_NETWORK is None: try: if config.is_in_docker: networks = DOCKER_CLIENT.get_networks(bootstrap.get_main_container_name())
codereview_python_data_6482
# - deck_cost is a computable based on sum # - count also has cardinality 1 of the return set <int64>(F.deck_cost / count(F.deck)) ); ''', [ [ Missing `LIMIT 1` # - deck_cost is a computable based on sum # - count also has cardinality 1 of the return set <int64>(F.deck_cost / count(F.deck)) + LIMIT 1 ); ''', [ [
codereview_python_data_6488
Does validations common to vmSettings and ExtensionsConfig """ if self._status_upload_blob_type not in ["BlockBlob", "PageBlob"]: - logger.info("Status Blob type '{0}' is ot valid, assuming BlockBlob", self._status_upload_blob) self._status_upload_blob_type = "BlockBlob" Small typo , 'not' Does validations common to vmSettings and ExtensionsConfig """ if self._status_upload_blob_type not in ["BlockBlob", "PageBlob"]: + logger.info("Status Blob type '{0}' is not valid, assuming BlockBlob", self._status_upload_blob) self._status_upload_blob_type = "BlockBlob"
codereview_python_data_6497
from __future__ import print_function import tensorflow as tf -from tensorflow.python.framework import tensor_shape # TODO: better import? EPSILON = 0.0000000001 I'd suppose we could access the static shape directly, and thus, there is no need to import `tensor_shape`. Like ```python if train.shape[-1] is None: ``` from __future__ import print_function import tensorflow as tf EPSILON = 0.0000000001
codereview_python_data_6498
class SeqFileRandomAccess(_IndexedSeqFileProxy): - """Random Access File generic.""" def __init__(self, filename, format, alphabet): """Initialize the class.""" How about something more like this?: ``Base class for defining random access to sequence files.`` class SeqFileRandomAccess(_IndexedSeqFileProxy): + """Base class for defining random access to sequence files.""" def __init__(self, filename, format, alphabet): """Initialize the class."""
codereview_python_data_6499
for i in range(len(other)): - # TODO: If we don't need to reindex, don't. It is expensive. - # The challenge with avoiding reindexing is that we need to make sure that - # the internal indices line up (i.e. if a drop or a select was just - # performed, the internal indices may not match). if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition): reindex_left = None else: ```suggestion if (i != 0 or (left_old_idx.equals(joined_index)) and not force_repartition): ``` parenthesis for readability for i in range(len(other)): + # If the indices are equal we can skip partitioning so long as we are not + # forced to repartition. See note above about `force_repartition`. if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition): reindex_left = None else:
codereview_python_data_6504
from .. import utils __all__ = [ - 'random_walk'] -def random_walk(g, nodes, *, metapath=None, length=None, prob=None): """Generate random walk traces from an array of seed nodes (or starting nodes), based on the given metapath. Give a simple code example in the docstring. from .. import utils __all__ = [ + 'random_walk', + 'pack_traces'] +def random_walk(g, nodes, *, metapath=None, length=None, prob=None, restart_prob=None): """Generate random walk traces from an array of seed nodes (or starting nodes), based on the given metapath.
codereview_python_data_6507
verbose = 0 -standard_include_path = os.path.abspath(os.path.normpath( - os.path.join(os.path.dirname(__file__), os.path.pardir, 'Includes'))) class Context(object): # This class encapsulates the context needed for compiling I don't think you need `os.path.normpath` here. verbose = 0 +standard_include_path = os.path.abspath(os.path.join(os.path.dirname(__file__), + os.path.pardir, 'Includes')) class Context(object): # This class encapsulates the context needed for compiling
codereview_python_data_6512
assert(np.array_equal(outputs, reference)) def test_one_hot_operator(): for i in range(10): premade_batch = [np.array([np.random.randint(0, sample_size)], dtype=np.int32) for x in range(sample_size)] yield check_one_hot_operator, premade_batch I think it would be good to set `np.random.seed` or `RandomState` before using np.random to make sure we have the same test every time. assert(np.array_equal(outputs, reference)) def test_one_hot_operator(): + np.random.seed(42); for i in range(10): premade_batch = [np.array([np.random.randint(0, sample_size)], dtype=np.int32) for x in range(sample_size)] yield check_one_hot_operator, premade_batch
codereview_python_data_6513
slow_step_size: A floating point value. The ratio for updating the slow weights. name: Optional name for the operations created when applying - gradients. Defaults to "RectifiedAdam". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is Rename `"RectifiedAdam"` -> `"Lookahead"` slow_step_size: A floating point value. The ratio for updating the slow weights. name: Optional name for the operations created when applying + gradients. Defaults to "Lookahead". **kwargs: keyword arguments. Allowed to be {`clipnorm`, `clipvalue`, `lr`, `decay`}. `clipnorm` is clip gradients by norm; `clipvalue` is clip gradients by value, `decay` is
codereview_python_data_6517
self.scanner_configs.get('output_path')): os.makedirs(output_path) output_path = os.path.abspath(output_path) - base_scanner.upload_csv(output_path, now_utc, output_csv_name) # Send summary email. # TODO: Untangle this email by looking for the csv content A base object is meant to be inherited, rather than have public utility methods used like this. self.scanner_configs.get('output_path')): os.makedirs(output_path) output_path = os.path.abspath(output_path) + self._upload_csv(output_path, now_utc, output_csv_name) # Send summary email. # TODO: Untangle this email by looking for the csv content
codereview_python_data_6520
def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples, cfg): # classification loss - if self.use_sigmoid_cls: - labels = labels.reshape(-1, 1) - label_weights = label_weights.reshape(-1, 1) - else: - labels = labels.reshape(-1) - label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels) if self.use_sigmoid_cls: You may confirm whether different shapes for sigmoid and softmax are necessary. def loss_single(self, cls_score, bbox_pred, labels, label_weights, bbox_targets, bbox_weights, num_total_samples, cfg): # classification loss + labels = labels.reshape(-1) + label_weights = label_weights.reshape(-1) cls_score = cls_score.permute(0, 2, 3, 1).reshape( -1, self.cls_out_channels) if self.use_sigmoid_cls:
codereview_python_data_6530
n = len(line) idx = int(line[0]) - 1 if n in (7, 10): - x, y, z = map(float, line[4:7]) elif n in (6, 9): - x, y, z = map(float, line[3:6]) - pos[idx] = x, y, z def _parse_vel(self, datalines, vel): """Strip velocity info into np array in place""" It's quicker just to do `pos[idx] = line[3:6]/line[4:7]` and let numpy figure it out than cast to float ourselves n = len(line) idx = int(line[0]) - 1 if n in (7, 10): + pos[idx] = line[4:7] elif n in (6, 9): + pos[idx] = line[3:6] def _parse_vel(self, datalines, vel): """Strip velocity info into np array in place"""
codereview_python_data_6535
network = network_and_project.group(2) if not network_interface.access_configs: - LOGGER.warn('Network interface: %s, doesn\'t ' 'have access_configs.', network_interface.full_name) continue I would like this message to show that we are unable to find the violation here. So, something like: ``` Unable to determine blacklist violation for network interface: %s, because it doesn't have access_configs. ``` network = network_and_project.group(2) if not network_interface.access_configs: + LOGGER.warn('Unable to determine blacklist violation for ' + 'network interface: %s, because it doesn\'t ' 'have access_configs.', network_interface.full_name) continue
codereview_python_data_6541
def framewise(self): """ Property to determine whether the current frame should have - framewise normalization enabled. """ current_frames = [el for f in self.traverse(lambda x: x.current_frame) for el in (f.traverse(lambda x: x, [Element]) Docstring should say why this is a bokeh specific property - that it is about generating events to update the bokeh plots when it makes sense. Otherwise, having this property makes sense. def framewise(self): """ Property to determine whether the current frame should have + framewise normalization enabled. Required for bokeh plotting + classes to determine whether to send updated ranges for each + frame. """ current_frames = [el for f in self.traverse(lambda x: x.current_frame) for el in (f.traverse(lambda x: x, [Element])
codereview_python_data_6545
decl = attr_type.cpp_optional_declaration_code(attr.cname) else: decl = attr_type.declaration_code(attr.cname) - if attr.utility_code_definition: - type.scope.use_utility_code(attr.utility_code_definition) code.putln("%s;" % decl) code.putln(footer) if type.objtypedef_cname is not None: Scopes have a method `use_entry_utility_code()` that "does the right thing". decl = attr_type.cpp_optional_declaration_code(attr.cname) else: decl = attr_type.declaration_code(attr.cname) + type.scope.use_entry_utility_code(attr) code.putln("%s;" % decl) code.putln(footer) if type.objtypedef_cname is not None:
codereview_python_data_6548
>>> ag.groupby('resnames', 'masses')['ALA'][15.999] <AtomGroup with 19 atoms> - .. versionadded:: """ res = {} The version number disappeared here. Also, you should add a line indicating that the function changed in version 0.18.0: ``` .. versionchanged:: 0.18.0 The function accepts multiple attributes. ``` >>> ag.groupby('resnames', 'masses')['ALA'][15.999] <AtomGroup with 19 atoms> + .. versionadded:: 0.18.0 """ res = {}
codereview_python_data_6557
# perform the analysis iteratively? # # To address the evolution of the graphs, you generate a variety of graph samples. In other words, you need -# **generative models** of graphs. Instead of and/or, in-addition to learning # node and edge features, you would need to model the distribution of arbitrary graphs. # While general generative models can model the density function explicitly and # implicitly and generate samples at once or sequentially, you only focus I believe we should just remove the "and/or, in-addition to" thing. That would just be "Instead of learning nodes and edge features, you would need to ...". I think you also mentioned the wrong Li; should be this one @mufeili . # perform the analysis iteratively? # # To address the evolution of the graphs, you generate a variety of graph samples. In other words, you need +# **generative models** of graphs. In-addition to learning # node and edge features, you would need to model the distribution of arbitrary graphs. # While general generative models can model the density function explicitly and # implicitly and generate samples at once or sequentially, you only focus
codereview_python_data_6558
# This prevents a race condition between two threads deserializing functions # and trying to import pandas at the same time. def import_pandas(*args): - import pandas ray.worker.global_worker.run_function_on_all_workers(import_pandas) Can you please change the line like this to tell flake8 lint to ignore the error? ```python import pandas # noqa F401 ```` two spaces between code and `#` symbol are required for this to work. # This prevents a race condition between two threads deserializing functions # and trying to import pandas at the same time. def import_pandas(*args): + import pandas # noqa F401 ray.worker.global_worker.run_function_on_all_workers(import_pandas)
codereview_python_data_6561
return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines())) def build_tracking_url(self, logs_output): return logs_output def run(self): Sorry for going back and forth. Adding docstring here would be very helpful for others to understand the need of this method. return ''.join(map(lambda s: s.decode('utf-8'), file_object.readlines())) def build_tracking_url(self, logs_output): + """ + This method is intended for transforming pattern match in logs to an URL + :param logs_output: Found match of `self.tracking_url_pattern` + :return: a tracking URL for the task + """ return logs_output def run(self):
codereview_python_data_6562
f_elems = str(elems) else: f_elems = f"[{elems[0]}, {elems[1]}, ..., {elems[-2]}, {elems[-1]}]" - types = tuple({type(e) for e in elems}) f_types = f"type {types[0]}" if len(types) == 1 else f"types {types}" raise InvalidArgument( f"Generated elements {f_elems} from strategy " ```suggestion types = tuple(sorted({type(e) for e in elems}, key=lambda t: t.__name__)) ``` Pretty pedantic, but determinism in error messages is useful for anyone grepping through logs. f_elems = str(elems) else: f_elems = f"[{elems[0]}, {elems[1]}, ..., {elems[-2]}, {elems[-1]}]" + types = tuple( + sorted({type(e) for e in elems}, key=lambda t: t.__name__) + ) f_types = f"type {types[0]}" if len(types) == 1 else f"types {types}" raise InvalidArgument( f"Generated elements {f_elems} from strategy "
codereview_python_data_6565
content_type='text/html') LOGGER.debug('Inventory summary sent successfully by email.') except util_errors.EmailSendError: - LOGGER.exception('Unable to send Violations email') @staticmethod def transform_to_template(data): This is the inventory summary. content_type='text/html') LOGGER.debug('Inventory summary sent successfully by email.') except util_errors.EmailSendError: + LOGGER.exception('Unable to send Inventory summary email') @staticmethod def transform_to_template(data):
codereview_python_data_6571
self.dialect.load(file_to_convert) base_script = {"scenarios": {}, EXEC: []} self.log.debug("Processing thread groups...") tg_etree_elements = self.dialect.tree.findall(".//ThreadGroup") - tg_etree_elements.extend(self.dialect.tree.findall(".//com.blazemeter.jmeter.threads.concurrency.ConcurrencyThreadGroup")) if not tg_etree_elements: raise TaurusInternalException("No thread groups found!") please make the line shorter self.dialect.load(file_to_convert) base_script = {"scenarios": {}, EXEC: []} self.log.debug("Processing thread groups...") + concurrency_tg = ".//com.blazemeter.jmeter.threads.concurrency.ConcurrencyThreadGroup" tg_etree_elements = self.dialect.tree.findall(".//ThreadGroup") + tg_etree_elements.extend(self.dialect.tree.findall(concurrency_tg)) if not tg_etree_elements: raise TaurusInternalException("No thread groups found!")
codereview_python_data_6572
return float(tokens.number.scientific[0]) elif tokens.string: - return unicode(tokens.string)[1:-1] elif tokens.boolean: return tokens.boolean[0] == 'true' Just `return tokens.string[1:-1]` is enough return float(tokens.number.scientific[0]) elif tokens.string: + return tokens.string[1:-1] elif tokens.boolean: return tokens.boolean[0] == 'true'
codereview_python_data_6576
relevant_facts.append(variable_facts) return relevant_facts @staticmethod async def _build_single_test_variant(copy_test, clean_test, combo): """ really good PR idea here & nice clean approach to your code. two things i think it could use: 1) at line 81, since we're starting to pull in most of the properties of a fact here, do you think we should just include the full fact dict() instead of pulling out specific properties? I think that's probably better, as we won't have to modify this again if we decide we want something else that's on the fact. 2) at line 98; is there a special meaning behind needing the combo_set/link_id variables to equal 1 or are we just doing a check to ensure it is not empty? if the latter, can we just do ``` if combo_set_id and combo_link_id: ... ``` relevant_facts.append(variable_facts) return relevant_facts + @staticmethod + def _reward_fact_relationship(combo_set, combo_link, score): + if len(combo_set) == 1 and len(combo_link) == 1: + score *= 2 + return score + @staticmethod async def _build_single_test_variant(copy_test, clean_test, combo): """
codereview_python_data_6578
"""Redis result store backend.""" from __future__ import absolute_import, unicode_literals -import threading from functools import partial from ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED from kombu.utils.functional import retry_over_time from kombu.utils.objects import cached_property Please ensure import order is correct according to isort. """Redis result store backend.""" from __future__ import absolute_import, unicode_literals from functools import partial from ssl import CERT_NONE, CERT_OPTIONAL, CERT_REQUIRED +import threading from kombu.utils.functional import retry_over_time from kombu.utils.objects import cached_property
codereview_python_data_6587
norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), - plugin=None, - stage_with_plugin=(False, False, False, False), with_cp=False, zero_init_residual=True): super(ResNet, self).__init__() In this way, all plugins share the same setting, which is not flexible if we have multiple plugins at the same time. It is more flexible (but also complicated) with this type: ```python plugin = dict( conv1=[ dict( cfg=dict(type='xxx', arg1='xxx'), stages=(False, True, True, True)) ], conv2=[ dict(cfg=dict(type='yyy')), dict(cfg=dict(type='yy2'), stages=(False, False, True, True)) ]) ``` norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), + plugins=None, with_cp=False, zero_init_residual=True): super(ResNet, self).__init__()
codereview_python_data_6611
@property def is_reactant(self): - """Return true if this Entry participate in any reaction in its parent pathway.""" for rxn in self._pathway.reactions: if self._id in rxn.reactant_ids: return True Can you change this to: ```python """Return true if this Entry participates in any reaction in its parent pathway.""" ``` (Already existing typo) @property def is_reactant(self): + """Return true if this Entry participates in any reaction in its parent pathway.""" for rxn in self._pathway.reactions: if self._id in rxn.reactant_ids: return True
codereview_python_data_6613
default_profile = QWebEngineProfile.defaultProfile() init_user_agent() - manager = notification.DBusNotificationManager() - manager.set_as_presenter_for(default_profile) default_profile.setter = ProfileSetter( # type: ignore[attr-defined] default_profile) This would probably fit better in `webenginetab.init` (even though it's not per-tab, but I haven't found a better place for such QtWebEngine-specific initialization yet), where various similar stuff is done. default_profile = QWebEngineProfile.defaultProfile() init_user_agent() + presenter = notification.DBusNotificationPresenter() + presenter.install(default_profile) default_profile.setter = ProfileSetter( # type: ignore[attr-defined] default_profile)
codereview_python_data_6621
'create', help='Start a new inventory') create_inventory_parser.add_argument( - 'import_as', metavar=('MODEL_NAME',), - nargs='?', help='Import the inventory when complete, requires a model name') create_inventory_parser.add_argument( '--background', Why are we dropping the dash dash usage here `'--import_as'```? 'create', help='Start a new inventory') create_inventory_parser.add_argument( + '--import_as', metavar=('MODEL_NAME',), help='Import the inventory when complete, requires a model name') create_inventory_parser.add_argument( '--background',
codereview_python_data_6627
-from __future__ import absolute_import - import pytest from pontoon.tags.models import Tag Why do you need this? import pytest from pontoon.tags.models import Tag
codereview_python_data_6628
logGroupName='/aws/lambda/{}'.format(function_name) ) - try: - return rs['logStreams'][0]['logStreamName'] - except Exception: - raise def get_event_message(events): nit: I think we can remove this `try-except` check here. logGroupName='/aws/lambda/{}'.format(function_name) ) + return rs['logStreams'][0]['logStreamName'] def get_event_message(events):
codereview_python_data_6629
def compactHash(string): hash = md5() - hash.update(string.encode('unicode_escape')) return hash.hexdigest() `string.encode('utf-8')` is more common but I guess this is mostly cosmetic :) def compactHash(string): hash = md5() + hash.update(string.encode('utf-8')) return hash.hexdigest()
codereview_python_data_6630
# # from stp_core.loop.eventually import eventually # from plenum.common.util import randomString # from plenum.test.test_node import checkNodesConnected # from plenum.test.node_catchup.helper import checkNodeLedgersForEquality # Please remove the test if it isn't needed anymore # # from stp_core.loop.eventually import eventually # from plenum.common.util import randomString +# from plenum.test import waits as plenumWaits # from plenum.test.test_node import checkNodesConnected # from plenum.test.node_catchup.helper import checkNodeLedgersForEquality #
codereview_python_data_6639
recommendation_top_artist_limit, recommendation_similar_artist_limit) - ti = time.monotonic() - messages = get_recommendations_for_all(params, users, ti) # persisted data must be cleared from memory after usage to avoid OOM recordings_df.unpersist() Do we actually need to pass the ti from here? Can't we just calculate it in the get_recommendations function? If we can, we should do that, because the extra parameter makes the function harder to use. recommendation_top_artist_limit, recommendation_similar_artist_limit) + messages = get_recommendations_for_all(params, users) # persisted data must be cleared from memory after usage to avoid OOM recordings_df.unpersist()
codereview_python_data_6643
def update_symbolizer_options(tool_options, symbolize_inline_frames=False): """Checks and updates the necessary symbolizer options such as - # `external_symbolizer_path` and `symbolize_inline_frames`.""" if 'external_symbolizer_path' not in tool_options: llvm_symbolizer_path_arg = _quote_value_if_needed( get_llvm_symbolizer_path()) Remove the # def update_symbolizer_options(tool_options, symbolize_inline_frames=False): """Checks and updates the necessary symbolizer options such as + `external_symbolizer_path` and `symbolize_inline_frames`.""" if 'external_symbolizer_path' not in tool_options: llvm_symbolizer_path_arg = _quote_value_if_needed( get_llvm_symbolizer_path())
codereview_python_data_6649
return RateLimiter(FLAGS.max_admin_api_calls_per_day, self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS) - def get_group_members(self, group): """Get all the members for specified groups. Args: - groups: A group key, e.g. it's email address. Returns: A list of member objects from the API. nit: can we make the name "group" into "group_key", such as: ``` request = members_stub.list(groupKey=group_key) ``` return RateLimiter(FLAGS.max_admin_api_calls_per_day, self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS) + def get_group_members(self, group_key): """Get all the members for specified groups. Args: + group_key: Its unique id assigned by the Admin API. Returns: A list of member objects from the API.
codereview_python_data_6651
in the array :attr:`RMSD.rmsd`. .. versionchanged:: 1.0.0 - ``save()`` method was removed, use ``np.savetxt()`` on :attr:`rmsd` - instead. """ def __init__(self, atomgroup, reference=None, select='all', Should the references point explicitly to the class (e.g. :attr:`RMSD.rmsd` rather than :attr:`rmsd`) to distinguish `RMSD.rmsd` from `rms.rmsd` (or at least for consistency with elsewhere in the docs)? Would apply to the versionchange message in the other modules, too in the array :attr:`RMSD.rmsd`. .. versionchanged:: 1.0.0 + ``save()`` method was removed, use ``np.savetxt()`` on + :attr:`RMSD.rmsd` instead. """ def __init__(self, atomgroup, reference=None, select='all',
codereview_python_data_6652
def _email_error(self, task, formatted_traceback, subject, headline): formatted_subject = subject.format(task=task, host=self.host) - command = ' '.join("'{}'".format(arg) for arg in sys.argv) message = notifications.format_task_error(headline, task, command, formatted_traceback) notifications.send_error_email(formatted_subject, message, task.owner_email) This makes the command look weird and hard to read I think. What about simply ``` command = subprocess.list2cmdline(sys.argv) ``` def _email_error(self, task, formatted_traceback, subject, headline): formatted_subject = subject.format(task=task, host=self.host) + command = subprocess.list2cmdline(sys.argv) message = notifications.format_task_error(headline, task, command, formatted_traceback) notifications.send_error_email(formatted_subject, message, task.owner_email)
codereview_python_data_6657
import time def SoftRelationPartition(edges, n, threshold=0.05): - """This partitions a list of edges based in to n partitions that - SMALL relations (which has only small number of edges) will be put - into a single LARGE partition (relations with large number of edges) - will be evenly divided into all partitions. - Algo: For r in relations: if r.size() > threadold this statement is confusing. This splits a list of edges into n partitions. Small relations will be placed into a single partition while a large relation will be evenly divided into all partitions. import time def SoftRelationPartition(edges, n, threshold=0.05): + """This partitions a list of edges to n partitions according to their + relation time. For any relation with number of edges larger than the + threshold, its edges will be evenly distributed into all partitions. + For any relation with number of edges smaller than the threshold, its + edges will be put into one single partition. + Algo: For r in relations: if r.size() > threadold
codereview_python_data_6662
class InsertStmt(MutatingStmt): - on_conflict: typing.Optional[typing.List[PointerRef]] = None class UpdateStmt(MutatingStmt, FilteredStmt): SQL `ON CONFLICT` accepts constraint names as conflict targets, let's use a constraint object directly. I suggest adding a `ConstraintRef` to IR: ```python class ConstraintRef(ImmutableBase): # The id of the constraint id: uuid.UUID # The module id of the constraint module_id: uuid.UUID ``` You can obtain the name of the SQL constraint with `common.get_constraint_backend_name()` class InsertStmt(MutatingStmt): + on_conflict: typing.Optional[typing.List[ConstraintRef]] = None class UpdateStmt(MutatingStmt, FilteredStmt):
codereview_python_data_6663
self.get_organization() self.check_billing_enabled() self.has_permissions() - self.get_host_project() self.enable_apis() This method name might be better as ```check_network_host_project_id()```. I know that other methods here also uses ```get``` naming, but I opened issue #842 to address self.get_organization() self.check_billing_enabled() self.has_permissions() + self.check_network_host_project_id() self.enable_apis()
codereview_python_data_6670
step_time = int(load.ramp_up / load.steps) thread_groups = jmx.tree.findall(".//ThreadGroup") for thread_group in thread_groups: - thread_cnc = int(thread_group.find(".//stringProp[@name='ThreadGroup.num_threads']").text) tg_name = thread_group.attrib["testname"] thread_step = int(ceil(float(thread_cnc) / load.steps)) step_group = JMX.get_stepping_thread_group(thread_cnc, thread_step, step_time, load.hold + step_time, String prop not guaranteed, better take anything step_time = int(load.ramp_up / load.steps) thread_groups = jmx.tree.findall(".//ThreadGroup") for thread_group in thread_groups: + thread_cnc = int(thread_group.find(".//*[@name='ThreadGroup.num_threads']").text) tg_name = thread_group.attrib["testname"] thread_step = int(ceil(float(thread_cnc) / load.steps)) step_group = JMX.get_stepping_thread_group(thread_cnc, thread_step, step_time, load.hold + step_time,
codereview_python_data_6674
rule_bigquery_acl.role: bigquery_acl.role, } - return all([ - re.match(rule_regex, acl_val) - for (rule_regex, acl_val) in rule_regex_to_val.iteritems() - ]) # TODO: The naming is confusing and needs to be fixed in all scanners. def find_policy_violations(self, bigquery_acl): It was already an issue, but the list comprehension will undo the laziness of all, so this will always evaluate the second re.match. For here and all_matched below, it might be better to use a list of tuples instead of a dict, and a helper function ``` def all_re_match(patterns_and_strings): for pattern, string in patterns_and_strings): if not re.match(pattern, string): return False return True ``` ... ``` return all_re_match([ (rule_bigquery_acl.dataset_id, bigquery_acl.dataset_id), (rule_bigquery_acl.role, bigquery_acl.role) ]) ``` It may even make sense to put the helper function in common/util/regular_exp.py rule_bigquery_acl.role: bigquery_acl.role, } + return regular_exp.all_match(rule_regex_to_val) # TODO: The naming is confusing and needs to be fixed in all scanners. def find_policy_violations(self, bigquery_acl):
codereview_python_data_6681
else: self.bot.sniper_disabled_global_warning = False targets = [] - target = {} # Retrieve the targets if self.mode == SniperMode.SOCIAL: Seems nowhere to use 'target' in the following code. else: self.bot.sniper_disabled_global_warning = False targets = [] # Retrieve the targets if self.mode == SniperMode.SOCIAL:
codereview_python_data_6682
sockdir, dir_stat.st_uid, dir_stat.st_mode)) print('sockfile: {} / owner {} / mode {:o}'.format( sockfile, file_stat.st_uid, file_stat.st_mode)) - # pylint: enable=no-member,useless-suppression assert file_owner_ok or dir_owner_ok assert file_mode_ok or dir_mode_ok I think this disable/enable is only needed around the `os.getuid` lines. sockdir, dir_stat.st_uid, dir_stat.st_mode)) print('sockfile: {} / owner {} / mode {:o}'.format( sockfile, file_stat.st_uid, file_stat.st_mode)) assert file_owner_ok or dir_owner_ok assert file_mode_ok or dir_mode_ok
codereview_python_data_6692
Raises: ApiExecutionError: ApiExecutionError is raised if the call to the GCP API fails """ try: nit: purely a matter of personal taste, it would be nicer if the `location` condition is first to make its precedence more clear; the condition also seems simpler to boot. ``` if location: do this elif zone: do that ``` Raises: ApiExecutionError: ApiExecutionError is raised if the call to the GCP API fails + ValueError: Raised if neither zone nor location are passed in. """ try:
codereview_python_data_6704
.. Note:: This option does not perform a true mass weighting but weighting by the number of atoms in each residue; the name of the parameter exists for historical reasons and will - be removed in 0.17.0. .. SeeAlso:: :class:`GNMAnalysis` should be on the same line as weights. .. Note:: This option does not perform a true mass weighting but weighting by the number of atoms in each residue; the name of the parameter exists for historical reasons and will + be removed in 0.17.0. Until then, setting `MassWeight` to + anything but ``None`` will override `weights`. .. SeeAlso:: :class:`GNMAnalysis`
codereview_python_data_6708
import subprocess import posixpath import functools -from xml.etree import ElementTree from PyQt5.QtWebKit import QWebSettings from PyQt5.QtWidgets import QApplication, QTabBar I'd prefer just `import xml.etree` here and then using `xml.etree.ElementTree` etc. so it's clear which module this is coming from. import subprocess import posixpath import functools +import xml.etree.ElementTree from PyQt5.QtWebKit import QWebSettings from PyQt5.QtWidgets import QApplication, QTabBar
codereview_python_data_6712
Returns: bool: True if using a composite root, else False. """ - return bool(not self.root_resource_id) def get_root_resource_id(self): """Return the configured root resource id. nit: not self.root_resource_id is a boolean so the casting is not be needed. Returns: bool: True if using a composite root, else False. """ + return not self.root_resource_id def get_root_resource_id(self): """Return the configured root resource id.
codereview_python_data_6721
POSTGRES_ADMIN_URI="postgresql://postgres@db/template1" -# Other postgres configuration options -# Oldest listens which can be stored in the database, in days. -MAX_POSTGRES_LISTEN_HISTORY = "-1" -# Log Postgres queries if they execeed this time, in milliseconds. -PG_QUERY_TIMEOUT = "3000" -# Set to True to enable 'synchronous_commit' for Postgres. Default: False -PG_ASYNC_LISTEN_COMMIT = False - # MusicBrainz OAuth MUSICBRAINZ_CLIENT_ID = "" MUSICBRAINZ_CLIENT_SECRET = "" no longer used POSTGRES_ADMIN_URI="postgresql://postgres@db/template1" # MusicBrainz OAuth MUSICBRAINZ_CLIENT_ID = "" MUSICBRAINZ_CLIENT_SECRET = ""
codereview_python_data_6723
'FORSETI_BUCKET': bucket_name[len('gs://'):], 'BUCKET_LOCATION': self.config.bucket_location, 'GCP_CLIENT_SERVICE_ACCOUNT': self.gcp_service_acct_email, - 'FORSETI_TARGET': 'forseti-target: "{}"'.format(self.branch), 'FORSETI_SERVER_ZONE': self.server_zone } Should we change `self.branch` to something more generic? like `self.version`? 'FORSETI_BUCKET': bucket_name[len('gs://'):], 'BUCKET_LOCATION': self.config.bucket_location, 'GCP_CLIENT_SERVICE_ACCOUNT': self.gcp_service_acct_email, + 'FORSETI_VERSION': self.version, 'FORSETI_SERVER_ZONE': self.server_zone }
codereview_python_data_6726
if labels.numel() == 0: return bboxes, labels - out_bboxes, keep = batched_nms(bboxes[:, :4], bboxes[:, -1].contiguous(), labels, cfg.nms_cfg) out_labels = labels[keep] To be on the safe side, it is best to add contiguous to bboxes. if labels.numel() == 0: return bboxes, labels + out_bboxes, keep = batched_nms(bboxes[:, :4].contiguous(), + bboxes[:, -1].contiguous(), + labels, cfg.nms_cfg) out_labels = labels[keep]
codereview_python_data_6727
TODO: Memory tracking """ try: - extension_slice_name = SystemdCgroupsApi.get_extension_cgroup_name(extension_name) + ".slice" cgroup_relative_path = os.path.join('azure.slice/azure-vmextensions.slice', - extension_slice_name + ".slice") cpu_cgroup_mountpoint, _ = self._cgroups_api.get_cgroup_mount_points() cpu_cgroup_path = os.path.join(cpu_cgroup_mountpoint, cgroup_relative_path) There seems to be a typo, an extra `.slice` here - `SystemdCgroupsApi.get_extension_cgroup_name(extension_name) + ".slice" + ".slice"` If this is used in multiple other places, maybe make this a function too (something like `SystemdCgroupsApi.get_extension_slice_name(extension_name)`) or something like that TODO: Memory tracking """ try: + extension_slice_name = SystemdCgroupsApi.get_extension_slice_name(extension_name) + ".slice" cgroup_relative_path = os.path.join('azure.slice/azure-vmextensions.slice', + extension_slice_name) cpu_cgroup_mountpoint, _ = self._cgroups_api.get_cgroup_mount_points() cpu_cgroup_path = os.path.join(cpu_cgroup_mountpoint, cgroup_relative_path)
codereview_python_data_6729
class BigQueryTarget(luigi.target.Target): def __init__(self, project_id, dataset_id, table_id, client=None, location=None, enable_chunking=False, chunk_size_gb=1000): self.table = BQTable(project_id=project_id, dataset_id=dataset_id, table_id=table_id, location=location) self.client = client or BigQueryClient() These should be `BigQueryLoadTask` parameters, not `BigQueryTarget` constructor's parameters. The purpose of a `Target` is identifying and locating an output, and help in achieving some properties like idempotency. Better not couple the aspects of the output's _production_ in it, as producing output is what a _`Task`_ does. class BigQueryTarget(luigi.target.Target): + # TODO: Pull enable_chunking and chunk_size_gb into def __init__(self, project_id, dataset_id, table_id, client=None, location=None, enable_chunking=False, chunk_size_gb=1000): self.table = BQTable(project_id=project_id, dataset_id=dataset_id, table_id=table_id, location=location) self.client = client or BigQueryClient()
codereview_python_data_6735
self._default_cuda_stream_priority) self._pipe.SetExecutionTypes(self._exec_pipelined, self._exec_separated, self._exec_async) self._pipe.SetQueueSizes(self._cpu_queue_size, self._gpu_queue_size) - self._pipe.EnableOperatorOutputMemoryStatistics(self._get_memory_stats) self._prepared = True self._pipe.Build() self._built = True ```suggestion self._pipe.EnableOperatorOutputMemoryStatistics(self._get_memory_stats) ``` Consider shortening name here: `EnableMemoryStatistics` `EnableOpMemoryStats` or something like that self._default_cuda_stream_priority) self._pipe.SetExecutionTypes(self._exec_pipelined, self._exec_separated, self._exec_async) self._pipe.SetQueueSizes(self._cpu_queue_size, self._gpu_queue_size) + self._pipe.EnableExecutorMemoryStats(self._get_memory_stats) self._prepared = True self._pipe.Build() self._built = True
codereview_python_data_6736
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4421-SEA 1645542567 1515473453</p> <hr> <p>Varnish cache server</p> </body> if you're going to do this inline you may as well use kwargs <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4446-SEA 1645542568 788953493</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_6737
class DistOptimizerHook(OptimizerHook): """Deprecated optimizer hook for distributed training""" - pass We may raise a warning. class DistOptimizerHook(OptimizerHook): """Deprecated optimizer hook for distributed training""" + + def __init__(self, *args, **kwargs): + warnings.warn( + '"DistOptimizerHook" is deprecated, please switch to' + '"mmcv.runner.OptimizerHook".', DeprecationWarning) + super().__init__(*args, **kwargs)
codereview_python_data_6741
import bigchaindb from bigchaindb.consensus import AbstractConsensusRules -# logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger(__name__) CONFIG_DEFAULT_PATH = os.environ.setdefault( This should be removed if not used import bigchaindb from bigchaindb.consensus import AbstractConsensusRules logger = logging.getLogger(__name__) CONFIG_DEFAULT_PATH = os.environ.setdefault(
codereview_python_data_6743
self.s = Seq.UnknownSeq(6) self.u = Seq.Seq(None, length=6) def test_unknownseq_construction(self): self.assertEqual("??????", Seq.UnknownSeq(6)) self.assertEqual("NNNNNN", Seq.UnknownSeq(6, character="N")) What scope does this have? i.e. side effects after this class has been tested? self.s = Seq.UnknownSeq(6) self.u = Seq.Seq(None, length=6) + def tearDown(self): + warnings.simplefilter("default", BiopythonDeprecationWarning) + def test_unknownseq_construction(self): self.assertEqual("??????", Seq.UnknownSeq(6)) self.assertEqual("NNNNNN", Seq.UnknownSeq(6, character="N"))
codereview_python_data_6747
-_base_ = ['deformable_detr_r50_8x2_50e_coco.py'] model = dict(bbox_head=dict(with_box_refine=True)) directly use string rather than list[str] +_base_ = 'deformable_detr_r50_8x2_50e_coco.py' model = dict(bbox_head=dict(with_box_refine=True))
codereview_python_data_6748
Input tensor dictionaries reducer : str or callable function One of "sum", "max", "min", "mean", "stack" or a callable function. - If a callable function is provided, the input arguments is a list of tensors - from cross types, and the output of function must be a single tensor order : list[Int], optional Merge order hint. Useful for "stack" reducer. If provided, each integer indicates the relative order ```suggestion If a callable function is provided, the input arguments must be a single list of tensors containing aggregation results from each edge type, and the output of function must be a single tensor. ``` Input tensor dictionaries reducer : str or callable function One of "sum", "max", "min", "mean", "stack" or a callable function. + If a callable function is provided, the input arguments must be a single list + of tensors containing aggregation results from each edge type, and the + output of function must be a single tensor. order : list[Int], optional Merge order hint. Useful for "stack" reducer. If provided, each integer indicates the relative order
codereview_python_data_6756
row['run_count'] } bigquery_data.append(big_query.Insert(row=bigquery_row, insert_id=None)) if bigquery_data: client = big_query.Client( dataset_id='main', table_id='fuzz_strategy_experiments') client.insert(bigquery_data) def _query_and_upload_strategy_probabilities(): Do you want to log an error or a warning in `else` branch? row['run_count'] } bigquery_data.append(big_query.Insert(row=bigquery_row, insert_id=None)) + if bigquery_data: client = big_query.Client( dataset_id='main', table_id='fuzz_strategy_experiments') client.insert(bigquery_data) + else: + logs.log("No fuzz strategy distribution data was found to upload to " + "BigQuery.") def _query_and_upload_strategy_probabilities():
codereview_python_data_6763
] ]) - @unittest.expectedFailure - async def test_edgeql_scope_tuple_08(self): await self.assert_query_result(r''' # compare to test_edgeql_scope_filter_03 to see how it # works out without tuples `FILTER` should refer to `_.1.name` and this will pass. ] ]) + async def test_edgeql_scope_tuple_09(self): await self.assert_query_result(r''' # compare to test_edgeql_scope_filter_03 to see how it # works out without tuples
codereview_python_data_6767
try: os.makedirs(os.path.dirname(self._filename)) except OSError as e: - # Unlikely, but could be created before - # we get a chance to create it. - if e.errno != errno.EEXIST: - raise self.basename = os.path.basename(self._filename) if remember_directory: Since we're using Python 3, you should be able to do: ```python except FileExistsError: pass ``` However, we should really handle any `OSError` here - if e.g. there's no permission to create the directory, you'd get a crash dialog at the moment, right? try: os.makedirs(os.path.dirname(self._filename)) + except FileExistsError: + pass except OSError as e: + self._die(e.strerror) self.basename = os.path.basename(self._filename) if remember_directory:
codereview_python_data_6769
capping_ace = resource_filename(__name__, "data/capping/ace.pdb") capping_nma = resource_filename(__name__, "data/capping/nma.pdb") -contacts_villin_folded = resource_filename(__name__, "data/contacts/villin_folded.gro") -contacts_villin_unfolded = resource_filename(__name__, "data/contacts/villin_unfolded.gro") contacts_file = resource_filename(__name__, "data/contacts/2F4K_qlist5_remap.dat") trz4data = resource_filename(__name__, "data/lammps/datatest.trz") possibly bzip the gro files? capping_ace = resource_filename(__name__, "data/capping/ace.pdb") capping_nma = resource_filename(__name__, "data/capping/nma.pdb") +contacts_villin_folded = resource_filename(__name__, "data/contacts/villin_folded.gro.bz2") +contacts_villin_unfolded = resource_filename(__name__, "data/contacts/villin_unfolded.gro.bz2") contacts_file = resource_filename(__name__, "data/contacts/2F4K_qlist5_remap.dat") trz4data = resource_filename(__name__, "data/lammps/datatest.trz")
codereview_python_data_6777
``p_components``, ``cumulated_variance`` will not sum to 1. ``align=True`` now correctly aligns the trajectory and computes the correct means and covariance matrix. - - .. versionchanged:: 0.19.0 - The start frame is used when performing selections and calculating - mean positions. Previously the 0th frame was always used. """ def __init__(self, universe, select='all', align=False, mean=None, we'll probably want a `versionchanged` entry here just to say that we are now storing results using `MDAnalysis.analysis.base.Results`. ``p_components``, ``cumulated_variance`` will not sum to 1. ``align=True`` now correctly aligns the trajectory and computes the correct means and covariance matrix. + .. versionchanged:: 2.0.0 + :attr:`p_components`, :attr:`variance`, :attr:`cumulated_variance` + and :attr:`mean_atoms` are now stored in a + :class:`MDAnalysis.analysis.base.Results` instance. """ def __init__(self, universe, select='all', align=False, mean=None,
codereview_python_data_6792
value = so.SchemaField( str, compcoef=0.909) - # Overloaded to mark it as not inheritable, since I found it - # basically impossible to get the inherited behavior to work right - # otherwise. - final = so.SchemaField( - bool, - inheritable=False, - compcoef=0.909, - ) - def __str__(self) -> str: return '<{}: at 0x{:x}>'.format(self.__class__.__name__, id(self)) Hm, why is it `inheritable` in the base class in the first place? value = so.SchemaField( str, compcoef=0.909) def __str__(self) -> str: return '<{}: at 0x{:x}>'.format(self.__class__.__name__, id(self))
codereview_python_data_6800
@pytest.mark.parametrize('number_of_eth_accounts', [0]) def test_data_import_shapeshift_trades(rotkehlchen_api_server): - """Test that the data import endpoint works successfully for blockfi trades""" rotki = rotkehlchen_api_server.rest_api.rotkehlchen dir_path = Path(__file__).resolve().parent.parent filepath = dir_path / 'data' / 'shapeshift-trade-history.csv' ```suggestion """Test that the data import endpoint works successfully for shapeshift trades""" ``` @pytest.mark.parametrize('number_of_eth_accounts', [0]) def test_data_import_shapeshift_trades(rotkehlchen_api_server): + """Test that the data import endpoint works successfully for shapeshift trades""" rotki = rotkehlchen_api_server.rest_api.rotkehlchen dir_path = Path(__file__).resolve().parent.parent filepath = dir_path / 'data' / 'shapeshift-trade-history.csv'
codereview_python_data_6801
level with shape (N, num_anchors * 4, H, W) img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. - imgs (list[torch.Tensor]): List of multiple images cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space. If you only want to use input image shape, better to get the shape as tensor at preprocessing step and feed it to img_metas. Then, there's no change to the interface of `get_bboxes` method. For example : ``` input_shape_t = one_metas[img_id]['input_shape_t'] ``` level with shape (N, num_anchors * 4, H, W) img_metas (list[dict]): Meta information of each image, e.g., image size, scaling factor, etc. cfg (mmcv.Config | None): Test / postprocessing configuration, if None, test_cfg would be used rescale (bool): If True, return boxes in original image space.
codereview_python_data_6809
if __name__ == "__main__": from Bio._utils import run_doctest run_doctest(verbose=0) You'd need to expand the existing example in order to make it a self-contained doctest, this alone will fail: ``` python >>> model = structure[0] >>> fm = FragmentMapper(model, lsize=10, flength=5, dir="fragment_data") >>> fragment = fm[residue] ``` if __name__ == "__main__": from Bio._utils import run_doctest + model = structure[0] + fm = FragmentMapper(model, lsize=10, flength=5, dir="fragment_data") + fragment = fm[residue] run_doctest(verbose=0)
codereview_python_data_6817
schema: s_schema.Schema ) -> s_types.Type: from . import types as s_types - source = self.get_source(schema) - if not isinstance(source, s_types.Type): - raise TypeError('Source is expected to be a Type') return source def compare( Let's use an `assert` statement here like we do in other places. schema: s_schema.Schema ) -> s_types.Type: from . import types as s_types source = self.get_source(schema) + assert isinstance(source, s_types.Type) return source def compare(
codereview_python_data_6818
@qutescheme.add_handler('testdata') def handler(url): # pylint: disable=unused-variable file_abs = os.path.abspath(os.path.dirname(__file__)) - filename = os.path.join(file_abs, '..', 'end2end', url.path().lstrip('/')) with open(filename, 'rb') as f: data = f.read() nitpick: `..` can be `os.pardir` @qutescheme.add_handler('testdata') def handler(url): # pylint: disable=unused-variable file_abs = os.path.abspath(os.path.dirname(__file__)) + filename = os.path.join(file_abs, os.pardir, 'end2end', url.path().lstrip('/')) with open(filename, 'rb') as f: data = f.read()
codereview_python_data_6821
def test_tf_dataset_mismatched_input_type(): input_dataset = tf.data.Dataset.from_tensors(np.full((2, 2), 42)).repeat() - for wrong_input_dataset in ["str", [input_dataset], input_dataset]: - for wrong_input_name in [42, ["a"]]: - yield check_tf_dataset_mismatched_input_type, wrong_input_dataset, wrong_input_name yield check_tf_dataset_mismatched_input_type, (input_dataset, input_dataset), ("a", "b"), "HWC" yield check_tf_dataset_mismatched_input_type, (input_dataset), ("a", "b") yield check_tf_dataset_mismatched_input_type, (input_dataset, input_dataset), ("b") Here you are checking that there's an error when you provide two wrong arguments at the same time. I think we should check one at a time. def test_tf_dataset_mismatched_input_type(): input_dataset = tf.data.Dataset.from_tensors(np.full((2, 2), 42)).repeat() + for wrong_input_dataset in ["str", [input_dataset]]: + yield check_tf_dataset_mismatched_input_type, wrong_input_dataset, "a" + for wrong_input_name in [42, ["a"]]: + yield check_tf_dataset_mismatched_input_type, input_dataset, wrong_input_name yield check_tf_dataset_mismatched_input_type, (input_dataset, input_dataset), ("a", "b"), "HWC" yield check_tf_dataset_mismatched_input_type, (input_dataset), ("a", "b") yield check_tf_dataset_mismatched_input_type, (input_dataset, input_dataset), ("b")
codereview_python_data_6823
''' G = nx.Graph() G.add_edge('a', 'b') - assert_equal(list(bridges(G)), [('a', 'b')]) def test_twoDisconnectedComponents(self): ''' I would delete this line, so that the tests work on Python 3.x as well. ''' G = nx.Graph() G.add_edge('a', 'b') + results = list(bridges(G)) + assert_true(results == [('a', 'b')] or + results == [('b', 'a')]) def test_twoDisconnectedComponents(self): '''
codereview_python_data_6826
"%(status_filter)s") paginate_by = 25 description = '' - actions = ('download_selected_orders',) current_view = 'dashboard:order-list' - order_actions = ('save_note', 'delete_note', 'change_order_statuses', 'create_order_payment_event') def dispatch(self, request, *args, **kwargs): `change_order_statuses` should probably be just added to `actions` and use the logic of BulkEditMixin. The other actions aren't supported as far as I can tell? "%(status_filter)s") paginate_by = 25 description = '' + actions = ('download_selected_orders','change_order_statuses') current_view = 'dashboard:order-list' + order_actions = ('save_note', 'delete_note', 'create_order_payment_event') def dispatch(self, request, *args, **kwargs):
codereview_python_data_6828
kms_master_key_id = long_uid() sse_specification = {"Enabled": True, "SSEType": "KMS", "KMSMasterKeyId": kms_master_key_id} - kms_master_key_arn = "arn:aws:kms:%s:%s:key/%s" % ( - aws_stack.get_local_region(), - TEST_AWS_ACCOUNT_ID, - kms_master_key_id, - ) result = dynamodb.create_table( TableName=table_name, Same as above - we can use `aws_stack.kms_key_arn(..)` here. kms_master_key_id = long_uid() sse_specification = {"Enabled": True, "SSEType": "KMS", "KMSMasterKeyId": kms_master_key_id} + kms_master_key_arn = aws_stack.kms_key_arn(kms_master_key_id) result = dynamodb.create_table( TableName=table_name,
codereview_python_data_6832
ExpandInplaceOperators(context), IterationTransform(context), SwitchTransform(context), - OptimizeBuiltinCalls(context), CreateClosureClasses(context), ## After all lookups and type inference CalculateQualifiedNamesTransform(context), ConsolidateOverflowCheck(context), ```suggestion OptimizeBuiltinCalls(context), ## Necessary? ``` ExpandInplaceOperators(context), IterationTransform(context), SwitchTransform(context), + OptimizeBuiltinCalls(context), ## Necessary? CreateClosureClasses(context), ## After all lookups and type inference CalculateQualifiedNamesTransform(context), ConsolidateOverflowCheck(context),
codereview_python_data_6833
self._edges = edges self._arange = arange self._bins = bins - self.results = Results() self.results.density = None def _single_frame(self): This line can be removed since `AnalysisBase` already initializes the results attribute. self._edges = edges self._arange = arange self._bins = bins self.results.density = None def _single_frame(self):
codereview_python_data_6836
:return: Tuple (activity_id, correlation_id, gs_created_timestamp) or "NA" for any property that's not available """ - def parse_value(parse_fn, value): try: if value not in (None, ""): Rather than logging "NA" for None, empty string or unparsable time we should just leave the value unchanged so that we know what we are getting from CRP. Displaying it as a date/time was just meant to make reading the log easier. If we get "0", we should just log "0". :return: Tuple (activity_id, correlation_id, gs_created_timestamp) or "NA" for any property that's not available """ + def format_value(parse_fn, value): try: if value not in (None, ""):
codereview_python_data_6838
no_visible_projects = locale.project_set.visible().count() == 0 - has_projects_to_request = projects.visible().exclude(locales=locale).count() > 0 if not projects: raise Http404 Why use `.visible()` here? Above you'll see that `projects` already includes `Project.objects.visible()`. no_visible_projects = locale.project_set.visible().count() == 0 + has_projects_to_request = projects.exclude(locales=locale).count() > 0 if not projects: raise Http404
codereview_python_data_6848
# If we are not able to query properly, draw randomly according to # probability parameters. if not distribution: - logs.log('Cannot use weighted strategy pool. Generating default strategy pool.') return generate_default_strategy_pool() # Change the distribution to a list of named tuples rather than a list of Seems better to remove this before landing. Ditto for some of the other logs below. # If we are not able to query properly, draw randomly according to # probability parameters. if not distribution: return generate_default_strategy_pool() # Change the distribution to a list of named tuples rather than a list of
codereview_python_data_6850
# REALLY Needs to use columns! print(fmt % (fill_exact_width(_("ID"), 6, 6), fill_exact_width(_("Action(s)"), 14, 14), - fill_exact_width(P_("Package", "Package", 1), 53, 53))) print("-" * 79) fmt = "%6u | %s | %-50s" num = 0 shouldn't it be `Packages`? # REALLY Needs to use columns! print(fmt % (fill_exact_width(_("ID"), 6, 6), fill_exact_width(_("Action(s)"), 14, 14), + fill_exact_width(P_("Package", "Packages", 1), 53, 53))) print("-" * 79) fmt = "%6u | %s | %-50s" num = 0
codereview_python_data_6852
Returns: A FuzzOptions object. """ - executable_path = environment.get_value('FUZZER_EXECUTABLE_PATH') - arguments = [executable_path] - return engine.FuzzOptions(corpus_dir, arguments, {}) # TODO(mbarbella): As implemented, this will not work for untrusted workers. # We would need to copy fuzzer binaries to workers. Rather than use an environment variable to hack around the interface, can we just pass the full path to the fuzzer as target_path instead? Right now target_path is fuzzer_name, but there's really no reason we need to care about what the name of the fuzzer is here. It's only used to get the full path to the fuzzer anyway, so let's just compute the full path beforehand and pass it here. Returns: A FuzzOptions object. """ + return engine.FuzzOptions(corpus_dir, [], {}) # TODO(mbarbella): As implemented, this will not work for untrusted workers. # We would need to copy fuzzer binaries to workers.
codereview_python_data_6867
command line by setting the AWS_ACCESS_KEY_ID and AWS_SECRET_KEY environment variables. - The project ARN is determined by the PROJECT_ARN environment variable. """ devicefarm_client = boto3.client("devicefarm") project_arn = os.environ.get("PROJECT_ARN", None) ARN -> Amazon Resource Number (ARN) command line by setting the AWS_ACCESS_KEY_ID and AWS_SECRET_KEY environment variables. + The project Amazon Resource Name (ARN) is determined by the PROJECT_ARN + environment variable. """ devicefarm_client = boto3.client("devicefarm") project_arn = os.environ.get("PROJECT_ARN", None)
codereview_python_data_6869
def event_report(self): for event, (color, parameters) in self._registered_events.iteritems(): - print '-'*80 - print 'Event: {}'.format(event) if parameters: print 'Parameters:' for parameter in parameters: There is still this print and the print above. def event_report(self): for event, (color, parameters) in self._registered_events.iteritems(): if parameters: print 'Parameters:' for parameter in parameters:
codereview_python_data_6870
break # pylint: enable=compare-to-zero - print 'XXX: Returning violations: %r' % violations return violations I realize this might be for debugging purposes, but would you like to turn this into a LOGGER.debug at some point? break # pylint: enable=compare-to-zero + LOGGER.debug('Returning violations: %r', violations) return violations
codereview_python_data_6871
def _cmp_key(self): """Unique key for the object to be used to generate the object hash""" # This key must be equal for two object considered as equal by __eq__ - return '_'.join(map(str, sorted(self.indices))) def __hash__(self): """Makes the object hashable""" I don't think a hash needs to be a string, so you could just do `hash(self.indices)` or maybe `hash((self.__class__.__name__, self.indices))`. I think this hash will happen a lot, so I'd rather it was fast (not constructing strings each time) def _cmp_key(self): """Unique key for the object to be used to generate the object hash""" # This key must be equal for two object considered as equal by __eq__ + return self.__class__, tuple(sorted(self.indices)) def __hash__(self): """Makes the object hashable"""
codereview_python_data_6876
from MDAnalysisTests.datafiles import (COORDINATES_XTC, COORDINATES_TOPOLOGY) -@pytest.mark.raises(exception=ValueError) def test_get_bad_auxreader_format_raises_ValueError(): # should raise a ValueError when no AuxReaders with match the specified format - mda.auxiliary.core.get_auxreader_for(format='bad-format') class BaseAuxReference(object): please use the `pytest.raises` context manager from MDAnalysisTests.datafiles import (COORDINATES_XTC, COORDINATES_TOPOLOGY) def test_get_bad_auxreader_format_raises_ValueError(): # should raise a ValueError when no AuxReaders with match the specified format + with pytest.raises(ValueError): + mda.auxiliary.core.get_auxreader_for(format='bad-format') class BaseAuxReference(object):