id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_10980
# if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factor, float): - scale_factor = torch.from_numpy(scale_factor).to( - det_bboxes.device) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes]) `scale_factor = det_bboxes.new_tensor(scale_factor)` # if det_bboxes is rescaled to the original image size, we need to # rescale it back to the testing scale to obtain RoIs. if rescale and not isinstance(scale_factor, float): + scale_factor = det_bboxes.new_tensor(scale_factor) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) mask_rois = bbox2roi([_bboxes])
codereview_python_data_10983
%prog [options] GRAPHITE_ROOT ''') option_parser.add_option('--port', default=8080, action='store', type=int, help='Port to listen on') -option_parser.add_option('--interface', default="0.0.0.0", action='store', help='Interface to listen on') option_parser.add_option('--libs', default=None, help='Path to the directory containing the graphite python package') option_parser.add_option('--noreload', action='store_true', help='Disable monitoring for changes') It's a small detail, but would you mind changing those to single-quotes? %prog [options] GRAPHITE_ROOT ''') option_parser.add_option('--port', default=8080, action='store', type=int, help='Port to listen on') +option_parser.add_option('--interface', default='0.0.0.0', action='store', help='Interface to listen on') option_parser.add_option('--libs', default=None, help='Path to the directory containing the graphite python package') option_parser.add_option('--noreload', action='store_true', help='Disable monitoring for changes')
codereview_python_data_10984
['templates/*', 'local_settings.py.example']}, scripts=glob('bin/*'), data_files=webapp_content.items() + storage_dirs + conf_files + examples, - install_requires=['Django>=1.9,<1.9.99', 'django-tagging==0.4.3', 'pytz', 'pyparsing<2.1', 'cairocffi'], classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English', We already removed pyparsing version binding, could you please remove it too? ['templates/*', 'local_settings.py.example']}, scripts=glob('bin/*'), data_files=webapp_content.items() + storage_dirs + conf_files + examples, + install_requires=['Django>=1.9,<1.9.99', 'django-tagging==0.4.3', 'pytz', 'pyparsing', 'cairocffi'], classifiers=[ 'Intended Audience :: Developers', 'Natural Language :: English',
codereview_python_data_10985
assert meta['status'] == states.PENDING def test_as_uri(self): from celery.backends import cassandra as mod mod.cassandra = Mock() What is being asserted in this test? assert meta['status'] == states.PENDING def test_as_uri(self): + # Just ensure as_uri works properly from celery.backends import cassandra as mod mod.cassandra = Mock()
codereview_python_data_10990
'index id: %s.', self.inventory_index_id) raise util_errors.NoDataError - summary_data = [] - for key, value in summary.iteritems(): - summary_data.append(dict(resource_type=key, count=value)) - summary_data = ( - sorted(summary_data, key=lambda k: k['resource_type'])) return summary_data def _get_details_data(self): There is a small amount of code duplication here (to generate `[{resource_type, count}, {}, {}, ...]`) and what's in `_get_summary_data()`. It would be nice to refactor it into a private helper method. ``` details_data = transform_for_template(details) return details_data or return transform_for_template(details) ``` And then transform_for_template(data) would be something like this: ``` def transform_for_template(data): template_data = [] for key, value in data.iteritems(): template_data.append(dict(resource_type=key, count=value)) return sorted(template_data, key=lambda k: k['resource_type']) ``` 'index id: %s.', self.inventory_index_id) raise util_errors.NoDataError + summary_data = self._transform_for_template(summary) return summary_data def _get_details_data(self):
codereview_python_data_10998
if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self) - if (not self.is_tty) or (self.settings.get("disable", False)): self.disabled = True return There is a case when user wants to still enable console on non-tty. Better have "true/false/auto" for "disable" if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self) + disable = str(self.settings.get('disable', 'auto')).lower() + if (disable == 'true') or ((disable == 'auto') and (not is_tty())): self.disabled = True return
codereview_python_data_11008
group.add_argument( "--keep-host-header", action="store_true", dest="keep_host_header", - help="Keep the host header as proxy addres" ) We should be a bit more descriptive here, something along this line: `Keep the original host header in reverse-mode. The default is to modify the host header to reflect the upstream server address during reverse proxying request.` @mhils ? group.add_argument( "--keep-host-header", action="store_true", dest="keep_host_header", + help="Reverse Proxy: Keep the original host header instead of rewriting it to the reverse proxy target." )
codereview_python_data_11012
number of frames to skip between each analysed frame verbose : bool, optional Turn on verbosity """ logger.info("Choosing frames to analyze") # if verbose unchanged, use class default May I ask why you didn't simply `verbose=verbose, **kwargs`? That way you don't have to modify the kwargs dictionary. number of frames to skip between each analysed frame verbose : bool, optional Turn on verbosity + kwargs : keyword arguments, which will be used to access the + underlying functionality in tqdm class, via ProgressBar class, + specifically, to adjust the location of the bar on the screen """ logger.info("Choosing frames to analyze") # if verbose unchanged, use class default
codereview_python_data_11014
else: logger.error("[GetWireserverEndpoint] Missing file {0}", file_path) - self.endpoint = DEFAULT_PROTOCOL_ENDPOINT logger.info("Using hardcoded Wireserver endpoint {0}", self.endpoint) return self.endpoint Do you need to release the lock before returning? else: logger.error("[GetWireserverEndpoint] Missing file {0}", file_path) + self.endpoint = KNOWN_WIRESERVER_IP logger.info("Using hardcoded Wireserver endpoint {0}", self.endpoint) return self.endpoint
codereview_python_data_11029
Parameters ---------- - input_datasets : dict[str, tf.data.Dataset] or dict[str, nvidia.dali.plugin.tf.experimental.input] input datasets to the DALI Pipeline. It must be provided as a dictionary mapping from - the names of the ``External Source`` nodes to the datasets objects intended as inputs - for those nodes (or to the :meth:`~nvidia.dali.plugin.tf.experimental.input` wrapper). For example:: nitpick: "intended as inputs for those nodes" seems a bit redundant Parameters ---------- + input_datasets : dict[str, tf.data.Dataset] or dict[str, nvidia.dali.plugin.tf.experimental.Input] input datasets to the DALI Pipeline. It must be provided as a dictionary mapping from + the names of the ``External Source`` nodes to the datasets objects or to the + :meth:`~nvidia.dali.plugin.tf.experimental.Input` wrapper. For example::
codereview_python_data_11031
and self.eq(other).all().all() ) - def explode( - self, column: Union[str, Tuple], ignore_index: bool = False - ): # noqa: PR01, RT01, D200 - """ - Transform each element of a list-like to a row, replicating index values. - """ - return super(DataFrame, self).explode(column, ignore_index) - def _update_var_dicts_in_kwargs(self, expr, kwargs): """ Copy variables with "@" prefix in `local_dict` and `global_dict` keys of kwargs. We can actually delete this implementation and only override in the `Series` class. and self.eq(other).all().all() ) def _update_var_dicts_in_kwargs(self, expr, kwargs): """ Copy variables with "@" prefix in `local_dict` and `global_dict` keys of kwargs.
codereview_python_data_11037
def _apply(self, group): attr = getattr(group, self.field) - mask = (attr == self.values[0]) - for v in self.values[1:]: - mask |= (attr == v) return group[mask] Any reason not to use np.isin ? ```suggestion mask = np.isin(attr, self.values) ``` def _apply(self, group): attr = getattr(group, self.field) + mask = np.isin(attr, self.values) return group[mask]
codereview_python_data_11038
Parameters ---------- - G : NetworkX Graph A graph source : node in `G` distance : the distance of the wanted nodes from `source` ```suggestion G : NetworkX graph ``` lower case on "graph" removes confusion with the `Graph` object type. Parameters ---------- + G : NetworkX graph A graph source : node in `G` distance : the distance of the wanted nodes from `source`
codereview_python_data_11045
vals: typing.Union[ typing.List[typing.List[typing.Any]], typing.List[typing.Any], - str, ]) -> None: if vals: # Whatever vals is, make it a list of rows containing lists of column values. Let's also adjust the somewhat weird type signature here as well. This probably should be `typing.Any` instead of `str` in the last line if we intend to support ints. vals: typing.Union[ typing.List[typing.List[typing.Any]], typing.List[typing.Any], + typing.Any, ]) -> None: if vals: # Whatever vals is, make it a list of rows containing lists of column values.
codereview_python_data_11052
'admin_project': { 'source_filenames': ( 'js/lib/jquery-ui.js', - 'js/dobule_list_selector.js', 'js/admin_project.js', ), 'output_filename': 'js/admin_project.min.js', This looks like a typo. Good thing is, you did that in the file name as well. :P 'admin_project': { 'source_filenames': ( 'js/lib/jquery-ui.js', + 'js/double_list_selector.js', 'js/admin_project.js', ), 'output_filename': 'js/admin_project.min.js',
codereview_python_data_11053
def query(self): return None - @abc.abstractproperty def autocommit(self): return False I don't think this needs to be an `abstractproperty`. You shouldn't require the user to override it. Just a plain `@property` should suffice. def query(self): return None + @property def autocommit(self): return False
codereview_python_data_11054
u = mda.Universe.empty(0) assert len(u.atoms) == 0 assert len(u.residues) == 0 - assert len(u.segments) == 0 \ No newline at end of file \ No newline at end of file add newline at end of file - I am pretty sure that PEP8 or pylint will yell. u = mda.Universe.empty(0) assert len(u.atoms) == 0 assert len(u.residues) == 0 \ No newline at end of file + assert len(u.segments) == 0 + \ No newline at end of file
codereview_python_data_11059
@handler.check_cron() def get(self): """Process all fuzz targets and update FuzzStrategy weights.""" - for engine_name, strategy_list in ENGINE_LIST: - _query_and_upload_strategy_probabilities(engine_name, strategy_list) nit: would need to update this once `ENGINE_LIST` is changed to a namedtuple @handler.check_cron() def get(self): """Process all fuzz targets and update FuzzStrategy weights.""" + for engine in ENGINE_LIST: + _query_and_upload_strategy_probabilities(engine.name, + engine.query_strategy_list)
codereview_python_data_11062
self.train_cfg = train_cfg self.test_cfg = test_cfg - self.fp16_enabled = False self.init_weights(pretrained=pretrained) This can be added in the parrent class. self.train_cfg = train_cfg self.test_cfg = test_cfg self.init_weights(pretrained=pretrained)
codereview_python_data_11063
'items': [{ 'key': 'startup-script', 'value': """#!/bin/bash -sudo apt-get install -y unzip git sudo apt-get install -y libmysqlclient-dev sudo apt-get install -y python-pip python-dev Should we break this up into a separate line? 'items': [{ 'key': 'startup-script', 'value': """#!/bin/bash +sudo apt-get install -y git +sudo apt-get install -y unzip sudo apt-get install -y libmysqlclient-dev sudo apt-get install -y python-pip python-dev
codereview_python_data_11066
parser.add_argument('--test_conn', action='store_true', dest='test_conn', help='Check pool connection with provided genesis file') -parser.add_argument('--taa_text', default="", type=str, required=False, help='Transaction author agreement text') -parser.add_argument('--taa_version', default="", type=str, required=False, help='Transaction author agreement version') Maybe it would be more convenient for our use cases to have a default TAA? parser.add_argument('--test_conn', action='store_true', dest='test_conn', help='Check pool connection with provided genesis file') +parser.add_argument('--taa_text', default="test transaction author agreement text", type=str, required=False, help='Transaction author agreement text') +parser.add_argument('--taa_version', default="test_taa", type=str, required=False, help='Transaction author agreement version')
codereview_python_data_11068
if __name__ == '__main__': sys.path.append('') setup_logger() - if not validate_environment(): - logging.debug('Environment is not properly configured for running Caldera, Exiting') - exit() parser = argparse.ArgumentParser('Welcome to the system') parser.add_argument('-E', '--environment', required=False, default='local', help='Select an env. file to use') parser.add_argument('--fresh', action='store_true', required=False, default=False, this should be an error log. exit with sys code = 1 if __name__ == '__main__': sys.path.append('') setup_logger() + validate_environment() parser = argparse.ArgumentParser('Welcome to the system') parser.add_argument('-E', '--environment', required=False, default='local', help='Select an env. file to use') parser.add_argument('--fresh', action='store_true', required=False, default=False,
codereview_python_data_11070
# type is a 7-bit bitfield spanning bits 1..7 -> div 2 try: lldpdu_tlv_type = orb(payload[0]) // 2 - return LLDPDU_CLASS_TYPES[lldpdu_tlv_type] - except (KeyError, IndexError): - pass - - return Raw @staticmethod def _dot1q_headers_size(layer): Use this instead: `return LLDPDU_CLASS_TYPES.get(lldpdu_tlv_type, Raw)` # type is a 7-bit bitfield spanning bits 1..7 -> div 2 try: lldpdu_tlv_type = orb(payload[0]) // 2 + return LLDPDU_CLASS_TYPES.get(lldpdu_tlv_type, Raw) + except IndexError: + return Raw @staticmethod def _dot1q_headers_size(layer):
codereview_python_data_11072
self.assertEqual(s.read_bytes(3), b'foo') self.assertEqual(s.read_bytes(2), b'ba') with self.assertRaises(transaction.SerializationError): - self.assertEqual(s.read_bytes(4), b'r') self.assertEqual(s.read_bytes(0), b'') self.assertEqual(s.read_bytes(1), b'r') self.assertEqual(s.read_bytes(0), b'') The `assertEqual` does not really make sense here. ```suggestion s.read_bytes(4) ``` self.assertEqual(s.read_bytes(3), b'foo') self.assertEqual(s.read_bytes(2), b'ba') with self.assertRaises(transaction.SerializationError): + s.read_bytes(4) self.assertEqual(s.read_bytes(0), b'') self.assertEqual(s.read_bytes(1), b'r') self.assertEqual(s.read_bytes(0), b'')
codereview_python_data_11076
Args: n_holes (int | tuple[int, int]): Number of regions to be dropped. - If it is given as a list, number of n_holes will be randomly - selected from `n_holes[0]` to `n_holes[1]`. cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate shape of dropped regions. It can be `tuple[int, int]` to use a fixed cutout shape, or `list[tuple[int, int]]` to randomly choose Indicate whether the interval is closed or open. Args: n_holes (int | tuple[int, int]): Number of regions to be dropped. + If it is given as a list, number of holes will be randomly + selected from the closed interval [`n_holes[0]`, `n_holes[1]`]. cutout_shape (tuple[int, int] | list[tuple[int, int]]): The candidate shape of dropped regions. It can be `tuple[int, int]` to use a fixed cutout shape, or `list[tuple[int, int]]` to randomly choose
codereview_python_data_11079
Returns ------- pred: iterator - (node, predecessor) iterator where `predecessor` is a predecessor of - `node` in a shortest path from `source` to `node`. Examples -------- ```suggestion (node, predecessor) iterator where `predecessor` is the predecessor of `node` in a breadth first search starting from `source`. ``` Returns ------- pred: iterator + (node, predecessor) iterator where `predecessor` is the predecessor of + `node` in a breadth first search starting from `source`. Examples --------
codereview_python_data_11080
if policy.fallback_component: issue.components.clear() issue.components.add(policy.fallback_component) issue.save() - # Update the testcase with this newly created issue. testcase.bug_information = str(issue.id) testcase.put() Awesome start! Let's add one more thing here. Let's append this string to the issue body as well: ``` '\n\n**NOTE**: This bug was filed into this component due to permission or configuration issues with the specified component(s) <components>' ``` Put this in a global _CONSTANT and format in the components from before we cleared. if policy.fallback_component: issue.components.clear() issue.components.add(policy.fallback_component) + if policy.fallback_policy_message: + issue.body += '\n\n' + policy.fallback_policy_message issue.save() + # Update the testcase with this newly created issue. testcase.bug_information = str(issue.id) testcase.put()
codereview_python_data_11081
self._sample_file = None self._start_time = None - def _Write(self, content): """Writes a string to the sample file. Args: Unclear from the method name what is written, maybe rename to `_WriteSample` or `_WriteString` self._sample_file = None self._start_time = None + def _WritesString(self, content): """Writes a string to the sample file. Args:
codereview_python_data_11088
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url from .packages.urllib3.response import HTTPResponse from .packages.urllib3.util import Timeout as TimeoutSauce -from .compat import urlparse, basestring, urldefrag from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, prepend_scheme_if_needed, get_auth_from_url, urldefragauth) from .structures import CaseInsensitiveDict Please remove the import of `urldefrag` here. from .packages.urllib3.poolmanager import PoolManager, proxy_from_url from .packages.urllib3.response import HTTPResponse from .packages.urllib3.util import Timeout as TimeoutSauce +from .compat import urlparse, basestring from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, prepend_scheme_if_needed, get_auth_from_url, urldefragauth) from .structures import CaseInsensitiveDict
codereview_python_data_11089
USER = 4 -wildcard_string = '[USER INPUT THIS UNBOUNDED FACT/RELATIONSHIP]' - class FactSchema(ma.Schema): I prefer to uppercase these static globals USER = 4 +WILDCARD_STRING = '[USER INPUT THIS UNBOUNDED FACT/RELATIONSHIP]' +wildcard_string = '' class FactSchema(ma.Schema):
codereview_python_data_11090
return ret def for_all_pckg(packages, fun, add_additional_packages=True): - """Iterates over all packages, executes a fun. Returns all fun results as a list""" ret = [] for pckg in all_packages: if pckg.key in packages: ```suggestion """Iterates over all packages, executes a function. Returns all function results as a list""" ``` "All fun results" read weird :) return ret def for_all_pckg(packages, fun, add_additional_packages=True): + """Iterates over all packages, executes a function. Returns all function results as a list""" ret = [] for pckg in all_packages: if pckg.key in packages:
codereview_python_data_11091
# See the License for the specific language governing permissions and # limitations under the License. -"""Tests the LienRulesEngine.""" import copy import itertools One of the tests should cover exact matches (no wildcard), and multiple locations in a single rule # See the License for the specific language governing permissions and # limitations under the License. +"""Tests the LocationRulesEngine.""" import copy import itertools
codereview_python_data_11092
cacert_path: /foo/bar/ca.pem cert_path: /foo/bar/cert.pem key_path: /foo/bar/key.pem - tls_verify: 1 env: FOO: bar restart_policy: on-failure As far as I know, we're using `True` more regularly here ... cacert_path: /foo/bar/ca.pem cert_path: /foo/bar/cert.pem key_path: /foo/bar/key.pem + tls_verify: true env: FOO: bar restart_policy: on-failure
codereview_python_data_11093
"""Repeat the last executed command, like '.' in vi. Args: - count: Which numeric argument to give the command. """ - mode_manager = objreg.get('mode-manager', scope='window', - window=win_id) if mode_manager.mode not in runners.last_command: raise cmdexc.CommandError("You didn't do anything yet.") cmd = runners.last_command[mode_manager.mode] That sounds odd IMHO, I'd prefer something like "Which count to pass to the command" or so """Repeat the last executed command, like '.' in vi. Args: + count: Which count to pass the command. """ + mode_manager = objreg.get('mode-manager', scope='window', window=win_id) if mode_manager.mode not in runners.last_command: raise cmdexc.CommandError("You didn't do anything yet.") cmd = runners.last_command[mode_manager.mode]
codereview_python_data_11096
self.state_sum.index_add_(0, grad_indices, grad_sum) std = self.state_sum[grad_indices] # _sparse_mask std_values = std.sqrt_().add_(1e-10).unsqueeze(1) - if gpu_id != -1: std_values = std_values.cuda(gpu_id) - elif self.gpu >= 0: - std_values = std_values.cuda(self.args.gpu) tmp = (-clr * grad_values / std_values) if tmp.device != device: tmp = tmp.to(device) we can't do the same thing as MXNet? get the device Id from other tensors. self.state_sum.index_add_(0, grad_indices, grad_sum) std = self.state_sum[grad_indices] # _sparse_mask std_values = std.sqrt_().add_(1e-10).unsqueeze(1) + if gpu_id != -1 and self.args.mix_cpu_gpu: std_values = std_values.cuda(gpu_id) tmp = (-clr * grad_values / std_values) if tmp.device != device: tmp = tmp.to(device)
codereview_python_data_11115
valdipList = [] for j in range(totalFrames/dt-1): try: a = self._getOneDeltaPoint(universe,repInd,j,sumsdt,dt) except ZeroDivisionError: Is it really ok to just ignore the division by zero? Or should this be fixed elsewhere? valdipList = [] for j in range(totalFrames/dt-1): + # If the selection of atoms is too small, there will be a division by zero in the next line. + # The except clause avoid the use of the result of _getOneDeltaPoint() on the mean. try: a = self._getOneDeltaPoint(universe,repInd,j,sumsdt,dt) except ZeroDivisionError:
codereview_python_data_11119
reduction='mean'): """Calculate balanced L1 loss - Please see the `CVPR 2019 paper <https://arxiv.org/pdf/1904.02701.pdf>`_ Args: pred (torch.Tensor): The prediction. Use paper title or method name instead of `CVPR 2019 paper`. reduction='mean'): """Calculate balanced L1 loss + Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_ Args: pred (torch.Tensor): The prediction.
codereview_python_data_11124
"Return data path to `filename`, checking locally first then in the config file." local_path = URLs.LOCAL_PATH/'data'/filename if local_path.exists() or local_path.with_suffix(ext).exists(): return local_path - elif archive: return Config.data_path() / filename - else: return Config.data_archive_path() / filename def download_data(url:str, fname:PathOrStr=None, data:bool=True, ext:str='.tgz') -> Path: "Download `url` to destination `fname`." should this be the other way around? if archive return data_archive_path "Return data path to `filename`, checking locally first then in the config file." local_path = URLs.LOCAL_PATH/'data'/filename if local_path.exists() or local_path.with_suffix(ext).exists(): return local_path + elif archive: return Config.data_archive_path() / filename + else: return Config.data_path() / filename def download_data(url:str, fname:PathOrStr=None, data:bool=True, ext:str='.tgz') -> Path: "Download `url` to destination `fname`."
codereview_python_data_11125
# Firebase. builder.add('img-src', 'www.gstatic.com') builder.add('connect-src', 'www.googleapis.com') - builder.add('frame-src', 'cluster-fuzz.firebaseapp.com') # External style. Used for fonts, charting libraries. builder.add('style-src', 'fonts.googleapis.com') cluster-fuzz is hardcoded, need to get project id or whatever # Firebase. builder.add('img-src', 'www.gstatic.com') builder.add('connect-src', 'www.googleapis.com') + builder.add('frame-src', utils.get_application_id() + '.firebaseapp.com') # External style. Used for fonts, charting libraries. builder.add('style-src', 'fonts.googleapis.com')
codereview_python_data_11129
parameter_filename = self._get_cgroup_file(parameter_name) logger.error("File {0} is empty but should not be".format(parameter_filename)) raise CGroupsException("File {0} is empty but should not be".format(parameter_filename)) - except CGroupsException as e: - raise e except Exception as e: parameter_filename = self._get_cgroup_file(parameter_name) logger.error("Exception while attempting to read {0}: {1}".format(parameter_filename, ustr(e))) does this change the traceback in the exception? maybe this instead? > except CGroupsException: raise parameter_filename = self._get_cgroup_file(parameter_name) logger.error("File {0} is empty but should not be".format(parameter_filename)) raise CGroupsException("File {0} is empty but should not be".format(parameter_filename)) + except CGroupsException: + raise except Exception as e: parameter_filename = self._get_cgroup_file(parameter_name) logger.error("Exception while attempting to read {0}: {1}".format(parameter_filename, ustr(e)))
codereview_python_data_11130
if self.rule_book is None or force_rebuild: self.build_rule_book() - violations = [] - # pylint: disable=redefined-variable-type for binding in policy.get('bindings', []): violations = itertools.chain( violations, Is there a way to do this w/o having to add a pylint disable here? if self.rule_book is None or force_rebuild: self.build_rule_book() + violations = itertools.chain() for binding in policy.get('bindings', []): violations = itertools.chain( violations,
codereview_python_data_11134
cloud_mode = self.settings.get("cloud-mode", None) proj_name = self.parameters.get("project", self.settings.get("project", None)) test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name)) - launch_existing_test = self.settings.get("use-existing-test", False) project = self._find_project(proj_name) maybe "launch-existing-test" conveys the meaning better cloud_mode = self.settings.get("cloud-mode", None) proj_name = self.parameters.get("project", self.settings.get("project", None)) test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name)) + launch_existing_test = self.settings.get("launch-existing-test", False) project = self._find_project(proj_name)
codereview_python_data_11135
@staticmethod def add_defaults(resource, stack_name: str): - role_name = resource.get("Properties", {}).get("Name") if not role_name: resource["Properties"]["Name"] = generate_default_name( stack_name, resource["LogicalResourceId"] I'm generally a huge fan of defensive programming on dicts (`.get(..., {})`), but wondering if in this case we should ensure in the runtime that `Properties` are always present, and then simply access them via `resource["Properties"]`. (in fact, we're anyway doing it on line 65..) Thoughts? @staticmethod def add_defaults(resource, stack_name: str): + role_name = resource["properties"].get("Name") if not role_name: resource["Properties"]["Name"] = generate_default_name( stack_name, resource["LogicalResourceId"]
codereview_python_data_11137
'ANDROID_KERNEL': 'Android Kernel', 'ANDROID_AUTO': 'Android Auto', 'ANDROID_X86': 'Android (x86)', - 'EMULATED_ANDROID': 'Android (Emulated)', 'CHROMEOS': 'Chrome OS', 'FUCHSIA': 'Fuchsia OS', 'MAC': 'Mac', all of these start with ANDROID_, so ANDROID_EMULATOR is better. i think we check somewhere things start with ANDROID 'ANDROID_KERNEL': 'Android Kernel', 'ANDROID_AUTO': 'Android Auto', 'ANDROID_X86': 'Android (x86)', + 'ANDROID_EMULATOR': 'Android (Emulated)', 'CHROMEOS': 'Chrome OS', 'FUCHSIA': 'Fuchsia OS', 'MAC': 'Mac',
codereview_python_data_11142
""" for o in self._options.values(): o.reset() def update(self, **kwargs): updated = set(kwargs.keys()) Does not trigger an update anymore. Intentional? """ for o in self._options.values(): o.reset() + self.changed.send(self._options.keys()) def update(self, **kwargs): updated = set(kwargs.keys())
codereview_python_data_11144
from ..core.operation import Operation from ..core.options import Compositor, Store, Options, StoreOptions from ..core.util import basestring, find_minmax, cartesian_product -from ..element import Curve, Area, Image, Polygons from .element import contours Will need a docstring. I know you were probably already planning to add one - just making sure! from ..core.operation import Operation from ..core.options import Compositor, Store, Options, StoreOptions from ..core.util import basestring, find_minmax, cartesian_product +from ..element import Curve, Area, Image, Polygons, Distribution, Bivariate from .element import contours
codereview_python_data_11145
self.frm_mng.switch(1) self.frm_mng.switch('relative=parent') if self.driver.find_element(By.ID, 'editor').get_attribute('contenteditable'): - self.driver.execute_script('arguments[0].innerHTML = %s;' % _tpl.str_repr(_tpl.apply('lo-la-lu')), self.driver.find_element(By.ID, 'editor')) else: raise NoSuchElementException("The element (By.ID, 'editor') is not contenteditable element") sleep(3) too long string, should be split self.frm_mng.switch(1) self.frm_mng.switch('relative=parent') if self.driver.find_element(By.ID, 'editor').get_attribute('contenteditable'): + self.driver.execute_script( + 'arguments[0].innerHTML = %s;' % _tpl.str_repr(_tpl.apply('lo-la-lu')) + , self.driver.find_element(By.ID, 'editor') + ) else: raise NoSuchElementException("The element (By.ID, 'editor') is not contenteditable element") sleep(3)
codereview_python_data_11147
raise ValueError("""Invalid value {} for the argument `cycle`. Valid values are - "no", False or None - cycling disabled - "quiet", True - quietly rewind the data - - "raise" - raise StopIteration on each rewind.""".format(cycle)) def _get_callback_from_source(source, cycle): iterable = False ```suggestion if cycle is None or cycle == False or cycle == "no": ``` raise ValueError("""Invalid value {} for the argument `cycle`. Valid values are - "no", False or None - cycling disabled - "quiet", True - quietly rewind the data + - "raise" - raise StopIteration on each rewind.""".format(repr(cycle))) def _get_callback_from_source(source, cycle): iterable = False
codereview_python_data_11148
def __init__(self, plot, **params): NdWidget.__init__(self, plot, **params) - self.nbagg = OutputMagic.options['backend'] == 'nbagg' self.frames = {} if self.embed: frames = {idx: self._plot_figure(idx) Another use of random numbers where some deterministic method may be better. def __init__(self, plot, **params): NdWidget.__init__(self, plot, **params) + nbagg = CommSocket is not object + self.nbagg = OutputMagic.options['backend'] == 'nbagg' and nbagg self.frames = {} if self.embed: frames = {idx: self._plot_figure(idx)
codereview_python_data_11156
def answers(self, other): if other.__class__ == self.__class__: - return self.payload.answers(other.payload) return 0 That's already by default on Packet: ``` def answers(self, other): """DEV: true if self is an answer from other""" if other.__class__ == self.__class__: return self.payload.answers(other.payload) return 0 ``` def answers(self, other): if other.__class__ == self.__class__: + if (self.msg_type == SOMEIP.TYPE_REQUEST_NO_RET): + return 0 + elif (self.msg_type == SOMEIP.TYPE_REQUEST_NORET_ACK): + return 0 + else: + return self.payload.answers(other.payload) return 0
codereview_python_data_11168
retg._edge_frames[i].update(rgrh._edge_frames[0]) return retg -def to_hetero(G, ntypes, etypes, ntype_field=NTYPE, etype_field=ETYPE): """Convert the given graph to a heterogeneous graph. The input graph should have only one type of nodes and edges. Each node and edge Why revert these back? retg._edge_frames[i].update(rgrh._edge_frames[0]) return retg +def to_hetero(G, ntypes, etypes, ntype_field=NTYPE, etype_field=ETYPE, metagraph=None): """Convert the given graph to a heterogeneous graph. The input graph should have only one type of nodes and edges. Each node and edge
codereview_python_data_11169
# This is a trade-off between data replication vs data loss. raise except Exception as error: - msg = "Failed to process event file {0}: \n, {1}".format(event_file, textutil.format_exception(error)) logger.warn(msg) add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True) I don't think we need these extra \n. The message should display on the same line. as in the previous code. # This is a trade-off between data replication vs data loss. raise except Exception as error: + msg = "Failed to process event file {0}:{1}".format(event_file, textutil.format_exception(error)) logger.warn(msg) add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
codereview_python_data_11179
'most_strings': sorted(qs, key=lambda x: x.total_strings)[-1], 'most_translations': sorted(qs, key=lambda x: x.approved_strings)[-1], 'most_suggestions': sorted(qs, key=lambda x: x.unreviewed_strings)[-1], - 'most_missing': sorted( - qs, - key=lambda x: ( - x.total_strings - - x.approved_strings - - x.fuzzy_strings - - x.strings_with_warnings - - x.strings_with_errors - ) - )[-1], } def adjust_stats( [nit] I wonder if that's possible to make the property called `missing` and use it here and in the template. 'most_strings': sorted(qs, key=lambda x: x.total_strings)[-1], 'most_translations': sorted(qs, key=lambda x: x.approved_strings)[-1], 'most_suggestions': sorted(qs, key=lambda x: x.unreviewed_strings)[-1], + 'most_missing': sorted(qs, key=lambda x: x.missing_strings)[-1], } def adjust_stats(
codereview_python_data_11182
# Ensure coordinates are regularly sampled - self.set_param(rtol=config.image_rtol) if rtol is None else self.set_param(rtol=rtol) - - validate_regular_sampling(self, 0, self.rtol) - validate_regular_sampling(self, 1, self.rtol) def __setstate__(self, state): Rather than using ``set_param`` here, the ``rtol`` value should be passed to the Dataset constructor above, something like: ``` rtol = (config.image_rtol if self.rtol is None else self.rtol) if rtol is None else rtol Dataset.__init__(self, data, kdims=kdims, vdims=vdims, extents=extents, rtol=rtol, **params) ``` and then change the default ``rtol`` to None. # Ensure coordinates are regularly sampled + rtol = config.image_rtol if rtol is None else rtol + validate_regular_sampling(self, 0, rtol) + validate_regular_sampling(self, 1, rtol) def __setstate__(self, state):
codereview_python_data_11192
# Copyright (c) 2018, Alexander Kirillov # This file supports `file_client` for `panopticapi`, # the source code is copied from `panopticapi`, -# only the way to read the gt images is modified. import multiprocessing import os docstring to indicate what is modified # Copyright (c) 2018, Alexander Kirillov # This file supports `file_client` for `panopticapi`, # the source code is copied from `panopticapi`, +# only the way to load the gt images is modified. import multiprocessing import os
codereview_python_data_11199
from bzt.utils import shutdown_process -class JavaEnv(object): - def get_additional_classpath(self): - pass - - def get_cp_from_files(self): - pass - - def get_files_from_cp(self): - pass - - class ReportableExecutor(ScenarioExecutor): def __init__(self): super(ReportableExecutor, self).__init__() There is "java.py" file to hold all of Java-specific things from bzt.utils import shutdown_process class ReportableExecutor(ScenarioExecutor): def __init__(self): super(ReportableExecutor, self).__init__()
codereview_python_data_11202
from tensorflow.python.framework import ops from tensorflow_addons.utils.resource_loader import get_path_to_datafile from tensorflow.python.framework import dtypes -from tensorflow.python.ops import array_ops -from tensorflow.python.ops import math_ops - _image_ops_so = tf.load_op_library( get_path_to_datafile("custom_ops/image/_image_ops.so")) Please use tf.* public api from tensorflow.python.framework import ops from tensorflow_addons.utils.resource_loader import get_path_to_datafile from tensorflow.python.framework import dtypes +from tensorflow.python import array_ops +from tensorflow.python import math_ops _image_ops_so = tf.load_op_library( get_path_to_datafile("custom_ops/image/_image_ops.so"))
codereview_python_data_11217
import termios from subprocess import CalledProcessError -import molecule.validators as validators import prettytable import sh import vagrant Need to fix this import :) import termios from subprocess import CalledProcessError import prettytable import sh import vagrant
codereview_python_data_11223
kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), - dilation=self.dilation, bias=True) self.init_offset() We may use `_pair` to wrap `dilation`. kernel_size=self.kernel_size, stride=_pair(self.stride), padding=_pair(self.padding), + dilation=_pair(self.dilation), bias=True) self.init_offset()
codereview_python_data_11225
type = report.main['type'] LOG.debug("Storing report") - report_id = client.addReport(self.__run_id, file_ids[fpath], bug_hash, msg, Are you sure that this piece of error checking is related to build action handling? type = report.main['type'] LOG.debug("Storing report") + report_id = client.addReport(analisys_id, file_ids[fpath], bug_hash, msg,
codereview_python_data_11228
def as_utf8_string(self): return bytes_literal(self.utf8encode(), 'utf8') - def as_encoded_c_string_literal(self): if self.encoding is None: s = self.as_utf8_string() else: Why not add this method also to `BytesLiteral`? That would simplify all those `AttributeError` catching cases. def as_utf8_string(self): return bytes_literal(self.utf8encode(), 'utf8') + def as_c_string_literal(self): + # first encodes the string then produces a c string literal if self.encoding is None: s = self.as_utf8_string() else:
codereview_python_data_11229
'''Cython - Command Line Parsing''' from Cython import __version__ as version -import Options menu = { 'default' : { Actually, I rewrote this file from scratches, but I've updated the copyright line to Pyrex&Cython _team_. This is because there is no Cython Foundation to enforce our rights. '''Cython - Command Line Parsing''' from Cython import __version__ as version + +import Cython.Compiler.Options as Options menu = { 'default' : {
codereview_python_data_11233
def _clahe( - image: TensorLike, clip_limit: Number, tile_grid_size: Union[List[int], Tuple[int]] ) -> tf.Tensor: """Implements CLAHE as tf ops""" original_2d_shape = (tf.shape(image)[0], tf.shape(image)[1]) Are you sure that having a so early casting to `tf.int32` is ok? def _clahe( + image: TensorLike, + clip_limit: Number, + tile_grid_size: Union[List[int], Tuple[int]], + gpu_optimized: bool, ) -> tf.Tensor: """Implements CLAHE as tf ops""" original_2d_shape = (tf.shape(image)[0], tf.shape(image)[1])
codereview_python_data_11234
# install ES version install_version = get_install_version_for_api_version(version) - - t1 = es_starter.start_elasticsearch(asynchronous=False, version=install_version) - # Ensure that all infra components are up and running - check_infra(apis=[], additional_checks=[es_starter.check_elasticsearch]) - time.sleep(15) - LOG.info('Elasticsearch started') - return t1 def cleanup_elasticsearch_instance(status): Not 100% sure, but I think if we pass `asynchronous=False` here, then the process will be started synchronously (i.e., the call to `start_elasticsearch(...)` won't return until the Elasticsearch process itself terminates, whenever the ES domain is stopped/deleted). So, I believe that lines 196-200 won't get executed (and hence can be removed), is that right? # install ES version install_version = get_install_version_for_api_version(version) + es_starter.start_elasticsearch(asynchronous=False, version=install_version) def cleanup_elasticsearch_instance(status):
codereview_python_data_11236
@NECKS.register_module() class ChannelMapper(nn.Module): - r"""Channel Mapper to reduce channels of backbone features. - This is used to reduce channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale. not necessarily "reduce" @NECKS.register_module() class ChannelMapper(nn.Module): + r"""Channel Mapper to unify channels of backbone features. + This is used to unify channels of backbone features. Args: in_channels (List[int]): Number of input channels per scale.
codereview_python_data_11240
CollectionExprAliasCommand[CollectionExprAliasT], sd.CreateObject[CollectionExprAliasT], ): - def canonicalize_attributes( - self, - schema: s_schema.Schema, - context: sd.CommandContext, - ) -> s_schema.Schema: - schema = super().canonicalize_attributes(schema, context) - self.set_attribute_value('internal', True) - return schema class DeleteCollectionExprAlias( It probably makes more sense to set 'internal' in `_handle_alias_op` as the type attribute, and I think this should also be done for non-collection alias types. CollectionExprAliasCommand[CollectionExprAliasT], sd.CreateObject[CollectionExprAliasT], ): + pass class DeleteCollectionExprAlias(
codereview_python_data_11244
- labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of - the remaining mask in the input mask, has shape (n,). """ assert len(labels) == len(masks) == len(scores) if len(labels) == 0: indexes or indices? - labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of + the remaining mask in the input mask, has shape (n,). """ assert len(labels) == len(masks) == len(scores) if len(labels) == 0:
codereview_python_data_11248
return await self.dao.create('core_fact', dict(property=property, value=value, source_id=source_id, score=score, set_id=set_id, link_id=link_id)) - async def create_rule(self, fact, source_id, action="DENY", match=".*"): """ Create fact rule. White list or black list. Order matters, executes like firewall rules. :param source_id: ties to source of rule use single quotes in these params, plus in the stack trace below return await self.dao.create('core_fact', dict(property=property, value=value, source_id=source_id, score=score, set_id=set_id, link_id=link_id)) + async def create_rule(self, fact, source_id, action='DENY', match='.*'): """ Create fact rule. White list or black list. Order matters, executes like firewall rules. :param source_id: ties to source of rule
codereview_python_data_11253
class RefDict(struct.Struct): - local_attr = struct.Field(str) - attr = struct.Field(str) - backref_attr = struct.Field(str, default='subject') - requires_explicit_inherit = struct.Field(bool, default=False) - ref_cls = struct.Field(type) class ObjectMeta(type): BTW, can we make all of these fields frozen? class RefDict(struct.Struct): + local_attr = struct.Field(str, frozen=True) + attr = struct.Field(str, frozen=True) + non_inheritable_attr = struct.Field(str, default=None, frozen=True) + backref_attr = struct.Field(str, default='subject', frozen=True) + requires_explicit_inherit = struct.Field(bool, default=False, frozen=True) + ref_cls = struct.Field(type, frozen=True) class ObjectMeta(type):
codereview_python_data_11260
test_cfg (dict): Testing config of anchor head. """ # noqa: W605 def __init__(self, num_classes, in_channels, The version here is not the mmdet version. test_cfg (dict): Testing config of anchor head. """ # noqa: W605 + _version = 1 + def __init__(self, num_classes, in_channels,
codereview_python_data_11269
force_reload=force_reload, verbose=verbose) - def process(self, root_path): # graph coo_adj = sp.load_npz(os.path.join( - root_path, "reddit{}_graph.npz".format(self._self_loop_str))) self._graph = DGLGraph(coo_adj, readonly=True) # features and labels - reddit_data = np.load(os.path.join(root_path, "reddit_data.npz")) features = reddit_data["feature"] labels = reddit_data["label"] # tarin/val/test indices modify according to tong's example force_reload=force_reload, verbose=verbose) + def process(self): # graph coo_adj = sp.load_npz(os.path.join( + self.raw_path, "reddit{}_graph.npz".format(self._self_loop_str))) self._graph = DGLGraph(coo_adj, readonly=True) # features and labels + reddit_data = np.load(os.path.join(self.raw_path, "reddit_data.npz")) features = reddit_data["feature"] labels = reddit_data["label"] # tarin/val/test indices
codereview_python_data_11274
help="File to write a ticket to (for TLS 1.3)") parser.add_argument("--res_master", help="Resumption master secret (for TLS 1.3)") parser.add_argument("--debug", action="store_const", const=5, default=0, help="Enter debug mode") parser.add_argument("server", nargs="?", default="127.0.0.1", Adding an `--sni` argument will be nice. We could specify the IP address and the TLS Server Name independently. help="File to write a ticket to (for TLS 1.3)") parser.add_argument("--res_master", help="Resumption master secret (for TLS 1.3)") +parser.add_argument("--sni", + help="Server Name Indication") parser.add_argument("--debug", action="store_const", const=5, default=0, help="Enter debug mode") parser.add_argument("server", nargs="?", default="127.0.0.1",
codereview_python_data_11278
else: format_string = '{fn}(' + prev if fn_name in dir(np): - format_string = 'np.'+format_string else: format_string = prev+', {fn}' if args: Maybe this ('np.') should be set via a class attribute... else: format_string = '{fn}(' + prev if fn_name in dir(np): + format_string = self._namespaces['numpy']+format_string else: format_string = prev+', {fn}' if args:
codereview_python_data_11281
def _is_generator_function(x): """Checks whether x is a generator function or a callable object where __call__ is a generator function""" - import inspect if inspect.isgeneratorfunction(x): return True if x is None or inspect.isfunction(x): ```suggestion raise TypeError("Source must be callable, iterable or a parameterless generator function") ``` def _is_generator_function(x): """Checks whether x is a generator function or a callable object where __call__ is a generator function""" if inspect.isgeneratorfunction(x): return True if x is None or inspect.isfunction(x):
codereview_python_data_11284
crop_size (tuple): (crop_h, crop_w) in absolute pixels. """ h, w = image_size - if self.crop_type == 'relative': - crop_h, crop_w = self.crop_size - return int(h * crop_h + 0.5), int(w * crop_w + 0.5) - elif self.crop_type == 'relative_range': - crop_size = np.asarray(self.crop_size, dtype=np.float32) - crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) - return int(h * crop_h + 0.5), int(w * crop_w + 0.5) - elif self.crop_type == 'absolute': return (min(self.crop_size[0], h), min(self.crop_size[1], w)) elif self.crop_type == 'absolute_range': assert self.crop_size[0] <= self.crop_size[1] Put the most frequently used option to the beginning: - absolute - absolute_range - relative - relative_range crop_size (tuple): (crop_h, crop_w) in absolute pixels. """ h, w = image_size + if self.crop_type == 'absolute': return (min(self.crop_size[0], h), min(self.crop_size[1], w)) elif self.crop_type == 'absolute_range': assert self.crop_size[0] <= self.crop_size[1]
codereview_python_data_11285
return None -def setup_production_build(build_type, es_enabled=False): """Sets up build with a particular revision.""" # Bail out if there are not stable and beta build urls. if build_type == 'extended_stable': we can set this in the job instead. no need to hardcode it here. return None +def setup_production_build(build_type): """Sets up build with a particular revision.""" # Bail out if there are not stable and beta build urls. if build_type == 'extended_stable':
codereview_python_data_11288
batch_id=get_work_response['batch_id'], ) return task.task_id def _get_work(self): if self._stop_requesting_work: - return None, 0, 0, 0, WORKER_STATE_DISABLED if self.worker_processes > 0: logger.debug("Asking scheduler for work...") Same here. Rename one to match the other. batch_id=get_work_response['batch_id'], ) return task.task_id + else: + return None def _get_work(self): if self._stop_requesting_work: + return GetWorkResponse(None, 0, 0, 0, 0, WORKER_STATE_DISABLED) if self.worker_processes > 0: logger.debug("Asking scheduler for work...")
codereview_python_data_11291
Notes ----- - The measure is described in [1]_. - The algorithm is presented in [2]. The number of nodes in the group must be a maximum of n - 2 where `n` is the total number of nodes in the graph. Can we include the old references and descriptions of those papers relative to GBC along with the new reference? Providing a lit review trail of this concept is probably quite helpful to some people and we don't need to throw away the previous history. Maybe something like: ``` Group Betweenness is described in [1]_ and its importance discussed in [3]_. An initial algorithm is mentioned in [2]_. This function uses an improved algorithm presented in [4]_. ``` Feel free to change as you like. Notes ----- + Group betweenness centrality is described in [1]_ and its importance discussed in [3]_. + initial implementation of the algorithm is mentioned in [2]_. This function uses + an improved algorithm presented in [4]_. The number of nodes in the group must be a maximum of n - 2 where `n` is the total number of nodes in the graph.
codereview_python_data_11295
def test_refit(self): X, y = load_breast_cancer(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) - X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, test_size=0.3, random_state=24) params = { 'objective': 'binary', 'metric': 'binary_logloss', I probably was unclear, sorry. I meant, `lgb_eval` are not used in this test (neither for early stopping, nor metric evaluation). def test_refit(self): X, y = load_breast_cancer(True) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) params = { 'objective': 'binary', 'metric': 'binary_logloss',
codereview_python_data_11298
s3_dest = 's3://mybucket/copydir_new/' response = s3_client.copy(s3_dir, s3_dest, threads=10, part_size=copy_part_size) - self._run_copy_response_test(response) for i in range(n): original_size = s3_client.get_key(s3_dir + str(i)).size I'd have a test for empty directory and empty file cases. s3_dest = 's3://mybucket/copydir_new/' response = s3_client.copy(s3_dir, s3_dest, threads=10, part_size=copy_part_size) + self._run_copy_response_test(response, expected_num=n, expected_size=(n * file_size)) for i in range(n): original_size = s3_client.get_key(s3_dir + str(i)).size
codereview_python_data_11299
nogaps = lambda x, y: -2000 - y # noqa: E731 # Very expensive to open a gap in seq2 unless it is in one of the allowed positions specificgaps = ( - lambda x, y: (-2 - y) if x in breaks else (-2000 - y) - ) # noqa: E731 aligner = Align.PairwiseAligner() aligner.mode = "global" aligner.match_score = 1 I would have expected ``# noqa: E731`` would need to be on the line above with the lambda, but flake8 is not complaining so that's probably fine as is. nogaps = lambda x, y: -2000 - y # noqa: E731 # Very expensive to open a gap in seq2 unless it is in one of the allowed positions specificgaps = ( + lambda x, y: (-2 - y) if x in breaks else (-2000 - y) # noqa: E731 + ) aligner = Align.PairwiseAligner() aligner.mode = "global" aligner.match_score = 1
codereview_python_data_11302
class _PDBIOBase(object): - """ - Provides the set_structure method for PDBIO and MMCIFIO - """ def set_structure(self, pdb_object): """Check what the user is providing and build a structure.""" if pdb_object.level == "S": This is breaking the style tests, ``` $ flake8 Bio/ Bio/PDB/PDBIO.py:45:1: D200 One-line docstring should fit on one line with quotes Bio/PDB/PDBIO.py:45:1: D204 1 blank line required after class docstring Bio/PDB/PDBIO.py:45:1: D400 First line should end with a period ``` Use something like this: ```python class _PDBIOBase(object): """Base class to provide the set_structure method for PDBIO and MMCIFIO."""" ``` class _PDBIOBase(object): + """Base class to provide the set_structure method for PDBIO and MMCIFIO.""" + def set_structure(self, pdb_object): """Check what the user is providing and build a structure.""" if pdb_object.level == "S":
codereview_python_data_11303
"""Ask user to name their Cloud SQL instance.""" self._print_banner('Setup Cloud SQL name') instance_name = raw_input( - 'Enter a prefix for the Forseti Cloud SQL instance ' '(press [enter] to use the default: {}) '.format( self.DEFAULT_CLOUDSQL_INSTANCE_NAME))\ .strip().lower() What prefix would this be? Offhand, I am not sure myself. Would it help to provide an example in the displayed output? """Ask user to name their Cloud SQL instance.""" self._print_banner('Setup Cloud SQL name') instance_name = raw_input( + 'Enter a name for the Forseti Cloud SQL instance ' '(press [enter] to use the default: {}) '.format( self.DEFAULT_CLOUDSQL_INSTANCE_NAME))\ .strip().lower()
codereview_python_data_11315
class PassthruDataset(Dataset): - def __init__(self,*args, **kwargs): *xs,y=args self.xs,self.y = xs,y - self.is_reg = kwargs.get('is_reg', True) def __len__(self): return len(self.y) def __getitem__(self, idx): return [o[idx] for o in self.xs] + [self.y[idx]] why not say `is_reg=True` here, instead of `**kwargs`? class PassthruDataset(Dataset): + def __init__(self,*args, is_reg=True): *xs,y=args self.xs,self.y = xs,y + self.is_reg = is_reg def __len__(self): return len(self.y) def __getitem__(self, idx): return [o[idx] for o in self.xs] + [self.y[idx]]
codereview_python_data_11316
OUTPUT_RESULTS_FILE_PATH = os.path.join(_LOG_COLLECTOR_DIR, "results.txt") COMPRESSED_ARCHIVE_PATH = os.path.join(_LOG_COLLECTOR_DIR, "logs.zip") -CGROUPS_SLICE = "azure.slice" CGROUPS_UNIT = "collect-logs.scope" INVALID_CGROUPS_ERRCODE = 2 you should use _AZURE_SLICE (from cgroupconfigurator.py) instead of redefining this slice OUTPUT_RESULTS_FILE_PATH = os.path.join(_LOG_COLLECTOR_DIR, "results.txt") COMPRESSED_ARCHIVE_PATH = os.path.join(_LOG_COLLECTOR_DIR, "logs.zip") CGROUPS_UNIT = "collect-logs.scope" INVALID_CGROUPS_ERRCODE = 2
codereview_python_data_11318
def _from_file_str_data(self, file_str): req_json = super()._from_file_str_data(file_str) - if req_json is None: - return None - tmp = json.loads(req_json) - txn_type = (tmp.get('result', {}).get('txn', {}) or tmp.get('txn', {})).get('type', None) - if txn_type not in ["114", "116", "117"]: - return None - cred_def_id = (tmp.get('result', {}).get('txnMetadata', {}) or tmp.get('txnMetadata', {})).get('txnId', None) - return cred_def_id class RGGetEntryRevoc(RGEntryRevoc): So, GetRevocRegEntry request consists of GET and WRITE every time, right? Should we mention this specific in docs, so that QA can calculate the expected load from the script appropriately? def _from_file_str_data(self, file_str): req_json = super()._from_file_str_data(file_str) + return self.get_txnid_field(req_json) class RGGetEntryRevoc(RGEntryRevoc):
codereview_python_data_11321
query_sig = urlparse.unquote(query_params['Signature'][0]) if query_sig != signature: - LOGGER.debug('Signature does not match. \ - What we recieved is %s and what we calculated is %s' % (query_sig, signature)) return requests_error_response_xml_signature_calculation( code=403, code_string='SignatureDoesNotMatch', Nit: I'd probably try to rephrase the logs a bit - avoiding the "we" part, reducing the content to the bare minimum: ``` LOGGER.debug('Signatures do not match: received "%s", calculated "%s"' % (query_sig, signature)) ``` query_sig = urlparse.unquote(query_params['Signature'][0]) if query_sig != signature: + LOGGER.debug('Signatures do not match: received "%s", calculated "%s"' % (query_sig, signature)) return requests_error_response_xml_signature_calculation( code=403, code_string='SignatureDoesNotMatch',
codereview_python_data_11328
# is not affected, this leads to different small/medium/large # mask AP results. for x in predictions: - x.pop('bbox', None) cocoDt = cocoGt.loadRes(predictions) except IndexError: print_log( ```suggestion x.pop('bbox') ``` # is not affected, this leads to different small/medium/large # mask AP results. for x in predictions: + x.pop('bbox') cocoDt = cocoGt.loadRes(predictions) except IndexError: print_log(
codereview_python_data_11330
if tokens and start == 0: # Move matching key, structure pair to the front of the list, so that # structures that are more likely to match are tried first. - if index != 0: key_structure = structures.pop(index) structures.insert(0, key_structure) Seeing ``` n = None n != 0 True ``` Maybe change this to => `if index is not None and index != 0:` if tokens and start == 0: # Move matching key, structure pair to the front of the list, so that # structures that are more likely to match are tried first. + if index is not None and index != 0: key_structure = structures.pop(index) structures.insert(0, key_structure)
codereview_python_data_11339
For all other extensions, we should create a """ - ignore_extension_regex = "Microsoft\\.AKS\\.Compute\\.AKS\\S*" return re.match(ignore_extension_regex, self.get_extension_full_name(extension)) is None def create_placeholder_status_file(self, extension=None, status=ValidHandlerStatus.transitioning, code=0, minor: using a raw string may make the regex slighly more readable: r"Microsoft\.AKS\.Compute\.AKS\S*" For all other extensions, we should create a """ + ignore_extension_regex = r"Microsoft.AKS.Compute.AKS\S*" return re.match(ignore_extension_regex, self.get_extension_full_name(extension)) is None def create_placeholder_status_file(self, extension=None, status=ValidHandlerStatus.transitioning, code=0,
codereview_python_data_11351
This method is called after parsing the event file in the events folder and before emitting it. This means all events, either coming from the agent or from the extensions, are passed through this method. The purpose is to add a static list of sys_info parameters such as VMName, Region, RAM, etc. If the sys_info parameters - are already populated in the event, they are not overwritten. Since the ContainerId parameter is only populated on the fly for the agent events because it is not a static sys_info parameter, an event coming from an extension will not have it, so we explicitly add it. :param event: Event to be enriched with sys_info parameters Shouldn't it be - `they ARE overwritten`? This method is called after parsing the event file in the events folder and before emitting it. This means all events, either coming from the agent or from the extensions, are passed through this method. The purpose is to add a static list of sys_info parameters such as VMName, Region, RAM, etc. If the sys_info parameters + are already populated in the event, they will be overwritten by the sys_info values obtained from the agent. Since the ContainerId parameter is only populated on the fly for the agent events because it is not a static sys_info parameter, an event coming from an extension will not have it, so we explicitly add it. :param event: Event to be enriched with sys_info parameters
codereview_python_data_11353
# possibly it's a deprecated but still accepted value. if params[i].options and value not in params[i].options: log.warning( - 'given option "{option}" in function "{func}" is not in list of valid options, it may be deprecated but still accepted' - .format(option=value, func=func)) if not params[i].validateValue(value): raise InputParameterError( I'd still include the parameter name here, and shorten up the text: ``` 'Deprecated value "{value}" specified for parameter "{param}" of function "{func}"' ``` Longer-term we pshould support stricter enforcement for cases where there is a well-understood set of valid options, but this should be fine for now. Most likely we'd need to add another flag to the param to set the enforcement level, and/or a separate set of deprecated values that are accepted but trigger the log message. # possibly it's a deprecated but still accepted value. if params[i].options and value not in params[i].options: log.warning( + 'Deprecated or invalid value "{value}" specified for parameter "{param}" of function "{func}"' + .format(value=value, param=params[i].name, func=func)) if not params[i].validateValue(value): raise InputParameterError(
codereview_python_data_11357
class RemoteFinder(BaseFinder): local = False - disabled = False def __init__(self, hosts=None): if hosts is None: Maybe add disabled = False in BaseFinder ? class RemoteFinder(BaseFinder): local = False def __init__(self, hosts=None): if hosts is None:
codereview_python_data_11364
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4435-SEA 1645523406 485518248</p> <hr> <p>Varnish cache server</p> </body> I think the code here would be more clear, if the name of the method would be more descriptive as something like below.... which is what the method is really doing. `_is_more_than_max_age()` `_is_exceeded_max_age()` ``` if self._is_more_than_max_age(): violations.append() ``` What do you think? <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4423-SEA 1645523406 4113987423</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_11367
options_list = [] for k in sorted(opts.keys()): o = opts._options[k] - if o.typespec in (str, int, bool): - t = o.typespec.__name__ - elif o.typespec == typing.Optional[str]: - t = 'Union' - elif o.typespec == typing.Sequence[str]: - t = 'Sequence' - else: - raise NotImplementedError option = { 'name': k, 'type': t, Move this typespec to string conversion into its own function in mitmproxy/utils/typecheck? :) options_list = [] for k in sorted(opts.keys()): o = opts._options[k] + t = typecheck.typespec_to_str(o.typespec) option = { 'name': k, 'type': t,
codereview_python_data_11377
allowed_border=-1, pos_weight=-1, debug=False), - test_cfg=dict( - nms_pre=1000, - score_thr=0.05, - )) score_thr should be removed allowed_border=-1, pos_weight=-1, debug=False), + test_cfg=dict(nms_pre=1000))
codereview_python_data_11379
e(nps.basic_indices, shape=(0, 0), max_dims=-1), e(nps.basic_indices, shape=(0, 0), max_dims=1.0), e(nps.basic_indices, shape=(0, 0), min_dims=2, max_dims=1), - e(nps.basic_indices, shape=(0, 0), min_dims=50), e(nps.basic_indices, shape=(0, 0), max_dims=50), e(nps.integer_array_indices, shape=()), e(nps.integer_array_indices, shape=(2, 0)), It looks like this is a duplicate of the case that follows it. e(nps.basic_indices, shape=(0, 0), max_dims=-1), e(nps.basic_indices, shape=(0, 0), max_dims=1.0), e(nps.basic_indices, shape=(0, 0), min_dims=2, max_dims=1), e(nps.basic_indices, shape=(0, 0), max_dims=50), e(nps.integer_array_indices, shape=()), e(nps.integer_array_indices, shape=(2, 0)),
codereview_python_data_11393
def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): model.eval() results = [] dataset = data_loader.dataset Add some docstring here since this method is more complex now. def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): + """ + When gpu_collect=True, it will use GPU communication to collect results + from different workers. Otherwise it will saves the results to tmpdir + first and collect it by CPU. + """ model.eval() results = [] dataset = data_loader.dataset
codereview_python_data_11394
log_str = log_lure_avail_str + 'Move to destiny. ' + str(cnt) + ' ' + log_lured_str + \ 'pokestops will be in range of ' + str(self.radius) + 'm. Arrive in ' \ - + str(round(distance(self.bot.position[0], self.bot.position[1], lat, lng))) + 'm.' logger.log(log_str) self.announced = False This change doesn't seem relevant to this PR log_str = log_lure_avail_str + 'Move to destiny. ' + str(cnt) + ' ' + log_lured_str + \ 'pokestops will be in range of ' + str(self.radius) + 'm. Arrive in ' \ + + str(distance(self.bot.position[0], self.bot.position[1], lat, lng)) + 'm.' logger.log(log_str) self.announced = False
codereview_python_data_11398
pandas_result = pandas.DataFrame(data).cov() df_equals(modin_result, pandas_result) - @pytest.mark.skip(reason="AssertionError: numpy array are different") @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_dot(self, data): modin_df = pd.DataFrame(data) this should be in a separate PR imho, with an accompanying issue to track fixing pandas_result = pandas.DataFrame(data).cov() df_equals(modin_result, pandas_result) + @pytest.mark.skipif( + os.name == "nt", + reason="AssertionError: numpy array are different", + ) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) def test_dot(self, data): modin_df = pd.DataFrame(data)