id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_8712
@pytest.fixture() def plot_data(self, psa): - # TODO: Which one is needed? Passes with any one! psa.run(metric='hausdorff') psa.run(metric='discrete_frechet') return psa.plot() `isinstance` is probably more appropriate than `type(...) is ...`. @pytest.fixture() def plot_data(self, psa): psa.run(metric='hausdorff') psa.run(metric='discrete_frechet') return psa.plot()
codereview_python_data_8714
"Focus follows new flows." ) loader.add_option( - "filter_active", bool, False, "Toggle whether the view filter is enabled." ) Let's call this `view_filter_active` please so that it's symmetric with `intercept` and `intercept_active`. "Focus follows new flows." ) loader.add_option( + "view_filter_active", bool, False, "Toggle whether the view filter is enabled." )
codereview_python_data_8721
if testcase.regression.startswith('0:'): # If the regression range starts from the start of time, - # then we assume that the bug impacts extended stable. - new_impact = data_types.SecurityImpact.EXTENDED_STABLE elif testcase.is_impact_set_flag: # Add impact label based on testcase's impact value. if testcase.impact_extended_stable_version: Also: do we need to give a head's up to any PMs before we do this? They may have scripts today that depend on `Security_Impact-Stable` and not yet ExtendedStable. if testcase.regression.startswith('0:'): # If the regression range starts from the start of time, + # then we assume that the bug impacts stable. + # TODO(yuanjunh): change to extended stable label when it's fully supported. + new_impact = data_types.SecurityImpact.STABLE elif testcase.is_impact_set_flag: # Add impact label based on testcase's impact value. if testcase.impact_extended_stable_version:
codereview_python_data_8723
mask_lower = x < lower mask_upper = upper < x mask = tf.logical_or(mask_lower, mask_upper) - mask = tf.cast(mask, tf.float32) return x * mask I think we have to do ```python x = tf.convert_to_tensor(x) ... mask = tf.cast(mask, x.dtype) ``` mask_lower = x < lower mask_upper = upper < x mask = tf.logical_or(mask_lower, mask_upper) + mask = tf.cast(mask, x.dtype) return x * mask
codereview_python_data_8735
def test_defaults_replacement(self, klass, monkeypatch): configtypes.FontBase.set_defaults(['Terminus'], '23pt') - expected = '23pt Terminus' - assert klass().to_py('23pt default_family') == expected class TestFontFamily: ```suggestion assert klass().to_py('23pt default_family') == '23pt Terminus' ``` and remove the line above. def test_defaults_replacement(self, klass, monkeypatch): configtypes.FontBase.set_defaults(['Terminus'], '23pt') + assert klass().to_py('23pt default_family') == '23pt Terminus' class TestFontFamily:
codereview_python_data_8736
import MDAnalysis.lib.log import pytest from MDAnalysis.lib.log import _set_verbose -from numpy.testing import assert_, assert_equal, assert_raises def test_start_stop_logging(): `assert_` and `assert_raises` should not be used. import MDAnalysis.lib.log import pytest from MDAnalysis.lib.log import _set_verbose def test_start_stop_logging():
codereview_python_data_8747
Only callables that accept one argument (:meth:`~nvidia.dali.types.SampleInfo` objects that represent the index of the requested sample) can be used as ``source`` when ``parallel`` is set to True. It can be a function or an object implementing ``__call__`` operator, which - allows to add some initial state to the object instance. Keep in mind, that **copies** of the ``source`` will be distributed between Python workers, and no global state can be shared between them. ```suggestion allows to add an initial state to the object instance. ``` Only callables that accept one argument (:meth:`~nvidia.dali.types.SampleInfo` objects that represent the index of the requested sample) can be used as ``source`` when ``parallel`` is set to True. It can be a function or an object implementing ``__call__`` operator, which + allows to add an initial state to the object instance. Keep in mind, that **copies** of the ``source`` will be distributed between Python workers, and no global state can be shared between them.
codereview_python_data_8748
def do_transform(self, x, is_y): h,w,*_ = x.shape intpr = cv2.INTER_AREA - if(is_y < min(h, w)) : intpr = cv2.INTER_LINEAR if is_y: return scale_min(x, self.sz_y, intpr if self.tfm_y == TfmType.PIXEL else cv2.INTER_NEAREST) else : return scale_min(x, self.sz, intpr) I think you wanted to type sz_y here, no? def do_transform(self, x, is_y): h,w,*_ = x.shape intpr = cv2.INTER_AREA + if(sz_y < min(h, w)) : intpr = cv2.INTER_LINEAR if is_y: return scale_min(x, self.sz_y, intpr if self.tfm_y == TfmType.PIXEL else cv2.INTER_NEAREST) else : return scale_min(x, self.sz, intpr)
codereview_python_data_8771
if source == target: if source not in G: - raise nx.NodeNotFound( - f"Source {source} or target {target} not in G".format(source, target) - ) return 0 weight = _weight_function(G, weight) length = _dijkstra(G, source, weight, target=target) This should be only ` f"Source {source} or target {target} not in G"`, the `format` keyword isn't required here. if source == target: if source not in G: + raise nx.NodeNotFound(nx.NodeNotFound(f"Node {source} not found in graph")) return 0 weight = _weight_function(G, weight) length = _dijkstra(G, source, weight, target=target)
codereview_python_data_8777
def test_atomtype_alignment(self): result_line = ("ATOM 1 H5T GUA R 1 7.974 6.430 9.561" - " 1.00 0.00 RNAA H 0\n") assert_equal(self.writtenstuff[4], result_line) A test with a negative charge would be nice. But it should go in an other test method, or the name of this test method should be changed since its scope would be different. def test_atomtype_alignment(self): result_line = ("ATOM 1 H5T GUA R 1 7.974 6.430 9.561" + " 1.00 0.00 RNAA H\n") assert_equal(self.writtenstuff[4], result_line)
codereview_python_data_8787
await self._save_and_refresh_item(file_path, Adversary, final, allowed) stored_adv = await self._services.get('data_svc').locate('adversaries', dict(adversary_id=final["id"])) for a in stored_adv: - a.has_repeatable_abilities = a.check_repeatable_abilities(self.get_service('data_svc').ram) return [a.display for a in stored_adv] async def _persist_ability(self, access, ab): I think this is an okay workaround for not needing to attach all the full ability objects to the operation await self._save_and_refresh_item(file_path, Adversary, final, allowed) stored_adv = await self._services.get('data_svc').locate('adversaries', dict(adversary_id=final["id"])) for a in stored_adv: + a.has_repeatable_abilities = a.check_repeatable_abilities(self.get_service('data_svc').ram['abilities']) return [a.display for a in stored_adv] async def _persist_ability(self, access, ab):
codereview_python_data_8788
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4451-SEA 1645523408 3341736689</p> <hr> <p>Varnish cache server</p> </body> If you did the naming change above, this should also be changed: ``` KEY_AGE_MORE_THAN_MAX_AGE = 101 KEY_DATETIME_MORE_THAN_MAX_AGE = ... KEY_TIME_MORE_THAN_MAX_AGE = ... ``` <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4439-SEA 1645523408 2939127269</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_8789
line = line.split() atomids[i]= line[0] names[i] = line[1] - types[i] = str(line[5]) bonded_atoms = line[6:] for other_atom in bonded_atoms: other_atom = int(other_atom) - 1 Str call isn't needed line = line.split() atomids[i]= line[0] names[i] = line[1] + types[i] = line[5] bonded_atoms = line[6:] for other_atom in bonded_atoms: other_atom = int(other_atom) - 1
codereview_python_data_8790
return {} with open(metadata_file_path) as handle: - return handle.read() def get_all_issue_metadata(fuzz_target_path): For consistency with the other return value ({}) in this function on line 453, and other get_* functions, this should return the parsed value (i.e. the dictionary). Something like this (with extra checking so that bad formats don't propagate all the way into Testcase entities). ```python with open(metadata_file_path) as handle: try: return json.load(handle) except (ValueError, TypeError): logs.log_error('Invalid metadata file format.', path=metadata_file_path) return {} ``` return {} with open(metadata_file_path) as handle: + try: + return json.load(handle) + except (ValueError, TypeError): + logs.log_error('Invalid metadata file format.', path=metadata_file_path) + return {} def get_all_issue_metadata(fuzz_target_path):
codereview_python_data_8815
return global_params.iteritems() -_event_callbacks = {} class Task(object): __metaclass__ = Register I think this should go into the Task class. return global_params.iteritems() class Task(object): __metaclass__ = Register
codereview_python_data_8818
---------- g : DGLGraph The graph. - node_feats : torch.Tensor - The input node feature of shape :math:`(N_{in}, node_in_feats)` - where :math:`N_{in}` is the number of source nodes. edge_feats : torch.Tensor The input edge feature of shape :math:`(E, edge_in_feats)` where :math:`E` is the number of edges. For using with HeteroGraphConv a module must support accepting a pair of source node features and destination node features. You will need to do something like: ```python if isinstance(node_feats, tuple): node_feats_src, _ = node_feats else: node_feats_src = node_feats g.srcdata['hv'] = self.project_node(node_feats_src) # ... ``` ---------- g : DGLGraph The graph. + node_feats : torch.Tensor or pair of torch.Tensor + The input node features. If a torch.Tensor is given, it represents the input + node feature of shape :math:`(N, D_{in})` where :math:`D_{in}` is size of + input feature, :math:`N` is the number of nodes. + If a pair of torch.Tensor is given, which is the case for bipartite graph, + the pair must contain two tensors of shape :math:`(N_{src}, D_{in_{src}})` and + :math:`(N_{dst}, D_{in_{dst}})` separately for the source and destination nodes. + edge_feats : torch.Tensor The input edge feature of shape :math:`(E, edge_in_feats)` where :math:`E` is the number of edges.
codereview_python_data_8824
""" h_rel = self.rel_emb(rels) proj_rel = self.rel_project(rels).reshape(-1, self.nfeats, self.rfeats) - h_head = torch.einsum('ab,abc->ac', h_head, proj_rel) - h_tail = torch.einsum('ab,abc->ac', h_tail, proj_rel) return - torch.norm(h_head + h_rel - h_tail, p=self.p, dim=-1) Is this a matmul between `(E, D1) @ (E, D1, D2)`? You could utilize torch's batched matmul. ```python (h_head.unsqueeze(1) @ proj_rel).squeeze(1) ``` Probably need to benchmark which implementation is faster. """ h_rel = self.rel_emb(rels) proj_rel = self.rel_project(rels).reshape(-1, self.nfeats, self.rfeats) + h_head = (h_head.unsqueeze(1) @ proj_rel).squeeze(1) + h_tail = (h_tail.unsqueeze(1) @ proj_rel).squeeze(1) return - torch.norm(h_head + h_rel - h_tail, p=self.p, dim=-1)
codereview_python_data_8829
# No need to pad. return - if last_date is pd.NaT: # If there is no data, determine how many days to add so that # desired days are written to the correct slots. days_to_zerofill = tds[tds.slice_indexer(end=date)] How about `pd.isnull(last_date)`? # No need to pad. return + if pd.isnull(last_date): # If there is no data, determine how many days to add so that # desired days are written to the correct slots. days_to_zerofill = tds[tds.slice_indexer(end=date)]
codereview_python_data_8830
plot_opts = Keywords(['plot_opt1', 'plot_opt2']+custom_plot, name) opt_groups = {'plot': Options(allowed_keywords=plot_opts), 'style': Options(allowed_keywords=style_opts), - 'output':Options(allowed_keywords=['backend'])} Store._options[backend][name] = opt_groups ```suggestion 'output': Options(allowed_keywords=['backend'])} ``` plot_opts = Keywords(['plot_opt1', 'plot_opt2']+custom_plot, name) opt_groups = {'plot': Options(allowed_keywords=plot_opts), 'style': Options(allowed_keywords=style_opts), + 'output': Options(allowed_keywords=['backend'])} Store._options[backend][name] = opt_groups
codereview_python_data_8837
LQTY_ADDR = string_to_ethereum_address('0x063c26fF1592688B73d8e2A18BA4C23654e2792E') LQTY_PROXY = string_to_ethereum_address('0x9476832d4687c14b2c1a04E2ee4693162a7340B6') -ADDR_NOT_IN_LIQUITY = '0xA0446D8804611944F1B527eCD37d7dcbE442caba' liquity_mocked_historical_prices = { A_ETH: { what do you mean by address not in liquity? LQTY_ADDR = string_to_ethereum_address('0x063c26fF1592688B73d8e2A18BA4C23654e2792E') LQTY_PROXY = string_to_ethereum_address('0x9476832d4687c14b2c1a04E2ee4693162a7340B6') +ADDR_WITHOUT_TROVE = string_to_ethereum_address('0xA0446D8804611944F1B527eCD37d7dcbE442caba') liquity_mocked_historical_prices = { A_ETH: {
codereview_python_data_8839
mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) - if (result.cmdline[0] != 'repeat-command' and - result.cmdline[0] != 'prompt-accept'): last_command[mode_manager.mode] = ( self._parse_count(text)[1], count if count is not None else result.count) You can simplify this to `result.cmdline[0] not in ['repeat-command', 'prompt-accept']`. Thinking about it, I guess it makes sense to add `:leave-mode` as well as that's a mode-changing command (bound to `<esc>` usually) which you probably don't want to be repeated with `.` either. mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) + if result.cmdline[0] not in ['leave-mode', 'prompt-accept', + 'repeat-command']: last_command[mode_manager.mode] = ( self._parse_count(text)[1], count if count is not None else result.count)
codereview_python_data_8849
Parameters ---------- nbunch : single node, container, or all nodes (default= all nodes) - The view will only report edges from these nodes (outgoing if directed). data : string or bool, optional (default=False) The edge attribute returned in 3-tuple (u, v, ddict[data]). If True, return edge attribute dict in 3-tuple (u, v, ddict). For `in_edges` I think the original wording was actually correct. Parameters ---------- nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges incident to these nodes. data : string or bool, optional (default=False) The edge attribute returned in 3-tuple (u, v, ddict[data]). If True, return edge attribute dict in 3-tuple (u, v, ddict).
codereview_python_data_8851
from datastore import ndb_utils from handlers import base_handler from libs import handler def admins_from_iam_policy(iam_policy): maybe add some logs.log to tell which users are getting added as admins. also, look for any places that need more logging for better future debugging. from datastore import ndb_utils from handlers import base_handler from libs import handler +from metrics import logs def admins_from_iam_policy(iam_policy):
codereview_python_data_8866
project=project, network=network, ip=ips, - raw_data=json.dumps(instance_network_interface.__dict__, - indent=2)) # Rule violation. # resource_type: string Why not store the original JSON inside of the instance_network_interface object as a private attribute with a getter property. In common/gcp_type/instance.py, class InstanceNetworkInterface.__init__() set self._json = json.dumps(kwargs, sort_keys=True) Then add a new method: def as_json(self): return _json Then you can set raw_data to instance_network_interface.as_json() Note that an as_json method would be a nice base method for all gcp_type objects. project=project, network=network, ip=ips, + raw_data=instance_network_interface.as_json()) # Rule violation. # resource_type: string
codereview_python_data_8870
def head(self): p = os.path.join(self.request.master.options.cadir, self.filename) p = os.path.expanduser(p) - with open(p, "rb") as f: - content_length = len(f.read()) self.set_header("Content-Type", "application/x-x509-ca-cert") self.set_header( Please use: `os.path.getsize(p)` instead. def head(self): p = os.path.join(self.request.master.options.cadir, self.filename) p = os.path.expanduser(p) + content_length = os.path.getsize(p) self.set_header("Content-Type", "application/x-x509-ca-cert") self.set_header(
codereview_python_data_8871
@task def send_confile(confile): put('confiles/' + confile, 'tempfile') - sudo('mv tempfile ~/.bigchaindb') print('For this node, bigchaindb show-config says:') run('bigchaindb show-config') do we need sudo here? I am afraid that the bigchaindb process may not have the right read/write permissions @task def send_confile(confile): put('confiles/' + confile, 'tempfile') + run('mv tempfile ~/.bigchaindb') print('For this node, bigchaindb show-config says:') run('bigchaindb show-config')
codereview_python_data_8880
where None represents all parsers and plugins. A parser filter expression is a comma separated value string that - denotes which parsers should be used. See filters/parser_filter.py - for details of the expression syntax. - Note that preset names in this expression will not be expanded. """ super(EventExtractionWorker, self).__init__() self._abort = False `Note that preset names in this expression will not be expanded.` Though correct this note does not cover the core of the issue. Change to something in line of: This function requires a parser filter expression in which preset names have been expanded. where None represents all parsers and plugins. A parser filter expression is a comma separated value string that + denotes which parsers and plugins should be used. See + filters/parser_filter.py for details of the expression syntax. + This function does not support presets, and requires a parser + filter expression where presets have been expanded. """ super(EventExtractionWorker, self).__init__() self._abort = False
codereview_python_data_8889
__version__ = '0.2.0' -__short_version__ = '0.1' \ No newline at end of file \ No newline at end of file maybe bump this too? __version__ = '0.2.0' \ No newline at end of file +__short_version__ = '0.2' \ No newline at end of file
codereview_python_data_8890
request: Request, auth_constraint: AuthConstraint, auth_action: AbstractAuthAction=None): - is_role_accepted = self.is_role_accepted(request, auth_constraint) - if is_role_accepted is None: return False, "sender's DID {} is not found in the Ledger".format(request.identifier) - if request.signature: - if not is_role_accepted: - return False, "{} can not do this action".format(self.get_named_role_from_req(request)) - if request.signatures: - if not self.is_sig_count_accepted(request, auth_constraint): return False, "Not enough {} signatures".format(Roles(auth_constraint.role).name) if not self.is_owner_accepted(auth_constraint, auth_action): if auth_action.field != '*': I think `is_sig_count_accepted` needs to be checked regardless of whether we have SIG or SIGS. Example: a client set SIG only (1 valid signatures), but auth rule requires 3 signatures. With the given code, the validation will pass, which is incorrect. request: Request, auth_constraint: AuthConstraint, auth_action: AbstractAuthAction=None): + if self.get_role(request) is None: return False, "sender's DID {} is not found in the Ledger".format(request.identifier) + if not self.is_sig_count_accepted(request, auth_constraint): return False, "Not enough {} signatures".format(Roles(auth_constraint.role).name) if not self.is_owner_accepted(auth_constraint, auth_action): if auth_action.field != '*':
codereview_python_data_8892
elif self.typedef_flag: base_code = self.cname else: - base_code = "__PYX_ENUM_DECL %s" % self.cname base_code = public_decl(base_code, dll_linkage) return self.base_declaration_code(base_code, entity_code) Does this really apply to plain C enums as well? It seemed to me that it's only a C++ enum issue. I would like to keep the code impact of this as low as possible. elif self.typedef_flag: base_code = self.cname else: + base_code = "enum %s" % self.cname base_code = public_decl(base_code, dll_linkage) return self.base_declaration_code(base_code, entity_code)
codereview_python_data_8897
'qutebrowser/misc/checkpyver.py', 'qutebrowser/misc/guiprocess.py', 'qutebrowser/misc/editor.py', - 'qutebrowser/misc/cmdhistory.py' 'qutebrowser/mainwindow/statusbar/keystring.py', 'qutebrowser/mainwindow/statusbar/percentage.py', There's a comma missing here at the end :wink: 'qutebrowser/misc/checkpyver.py', 'qutebrowser/misc/guiprocess.py', 'qutebrowser/misc/editor.py', + 'qutebrowser/misc/cmdhistory.py', 'qutebrowser/mainwindow/statusbar/keystring.py', 'qutebrowser/mainwindow/statusbar/percentage.py',
codereview_python_data_8898
"""Check that we're accessing proper config options.""" if FAILED_LOAD: if not ConfigChecker.printed_warning: - print("[WARN] Could not find configdata.yml. Please run " + "pylint from qutebrowser root.", file=sys.stderr) print("Skipping some checks...", file=sys.stderr) ConfigChecker.printed_warning = True No need for the `+` here. """Check that we're accessing proper config options.""" if FAILED_LOAD: if not ConfigChecker.printed_warning: + print("[WARN] Could not find configdata.yml. Please run " "pylint from qutebrowser root.", file=sys.stderr) print("Skipping some checks...", file=sys.stderr) ConfigChecker.printed_warning = True
codereview_python_data_8900
Args: url: url to save as a bookmark. If None, use url of current page. - title: title of the new bookmark.""" if url and not title: raise cmdexc.CommandError('Title must be provided if url has ' 'been provided') The `"""` should be on a separate line (as per pep8 or pep257, not sure which of those two coding standards :wink:) Args: url: url to save as a bookmark. If None, use url of current page. + title: title of the new bookmark. + """ if url and not title: raise cmdexc.CommandError('Title must be provided if url has ' 'been provided')
codereview_python_data_8904
'full_name': full_name, 'rule_index': 0, 'rule_name': violation.constraint, - 'violation_type': 'CV ' + violation.constraint, 'violation_data': json_format.MessageToDict( violation.metadata, including_default_value_fields=True), 'resource_data': resource_data, Slight nit- would suggest putting 'CV: ' + violation.constraint instead to make it more clear that it is from CV. 'full_name': full_name, 'rule_index': 0, 'rule_name': violation.constraint, + 'violation_type': 'CV_' + violation.constraint, 'violation_data': json_format.MessageToDict( violation.metadata, including_default_value_fields=True), 'resource_data': resource_data,
codereview_python_data_8907
distributed = False if len(cfg.gpu_ids) > 1: warnings.warn( - 'Only supports single GPU in non-distribute testing time.' - f'We treat gpu-ids is set to {cfg.gpu_ids}, and will set ' - f'to {cfg.gpu_ids[0:1]}.') cfg.gpu_ids = cfg.gpu_ids[0:1] else: distributed = True a warning message is confusingplease polish the description distributed = False if len(cfg.gpu_ids) > 1: warnings.warn( + f'We treat {cfg.gpu_ids} as gpu-ids, and reset to ' + f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in ' + 'non-distribute testing time.') cfg.gpu_ids = cfg.gpu_ids[0:1] else: distributed = True
codereview_python_data_8909
# check spans eff_query_spans = [blocksize_multiplier * s for s in hsp.query_span_all] - assert hsp.hit_span_all == eff_query_spans block_sizes = hsp.query_span_all # set strand and starts This should probably throw an exception (``ValueError`` maybe?) rather than use an assert, just in case someone is running Python with asserts turned off. # check spans eff_query_spans = [blocksize_multiplier * s for s in hsp.query_span_all] + if hsp.hit_span_all != eff_query_spans: + raise ValueError("HSP hit span and query span values do not match.") block_sizes = hsp.query_span_all # set strand and starts
codereview_python_data_8913
self.assertListEqual(generate_metric_list(cpu_percent_values), collected_metrics[name]["cpu"]["cur_cpu"][0:5]) @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_cgroup_tracking(self, *args): num_extensions = 5 why do we need to mock getprocstat? thanks self.assertListEqual(generate_metric_list(cpu_percent_values), collected_metrics[name]["cpu"]["cur_cpu"][0:5]) + # mocking get_proc_stat to make it run on Mac and other systems + # this test does not need to read the values of the /proc/stat file @patch("azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_stat") def test_cgroup_tracking(self, *args): num_extensions = 5
codereview_python_data_8914
# Additional filter on the project field speeds things up because it makes faster # to execute a SQL subquery generated by Django. if project and project.slug != "all-projects": - translations = translations.filter(entity__resource__project=project,) # Finally, we return a query that returns both the matching entities with no # plurals and the entities with plurals that were stored earlier. Nit: trailing comma. # Additional filter on the project field speeds things up because it makes faster # to execute a SQL subquery generated by Django. if project and project.slug != "all-projects": + translations = translations.filter(entity__resource__project=project) # Finally, we return a query that returns both the matching entities with no # plurals and the entities with plurals that were stored earlier.
codereview_python_data_8919
world_size=world_size, rank=proc_id) train_mask, val_mask, test_mask, n_classes, g = data - nfeat = g.ndata.pop('feat').to(device) - labels = g.ndata.pop('label').to(device) in_feats = nfeat.shape[1] train_nid = th.LongTensor(np.nonzero(train_mask)).squeeze() We should probably add a `--data-cpu` flag to allow optionally storing these on the CPU when running on the GPU (see `train_sampling.py`). world_size=world_size, rank=proc_id) train_mask, val_mask, test_mask, n_classes, g = data + nfeat = g.ndata.pop('feat') + labels = g.ndata.pop('label') + if not args.data_cpu: + nfeat = nfeat.to(device) + labels = labels.to(device) in_feats = nfeat.shape[1] train_nid = th.LongTensor(np.nonzero(train_mask)).squeeze()
codereview_python_data_8931
if response.status_code == 200 and status_code == '201' and key: response.status_code = 201 response._content = self.get_201_reponse(key, bucket_name) - response.headers['Content-Length']=len(response._content) response.headers['Content-Type'] = 'application/xml; charset=utf-8' return response Can we please change this to: ``` response.headers['Content-Length'] = str(len(response._content)) ``` if response.status_code == 200 and status_code == '201' and key: response.status_code = 201 response._content = self.get_201_reponse(key, bucket_name) + response.headers['Content-Length'] = str(len(response._content)) response.headers['Content-Type'] = 'application/xml; charset=utf-8' return response
codereview_python_data_8935
SDNV2('CTSN', 0), SDNV2('LT', 0), SDNV2('DL', 0), - MultipleTypeField([ - (SDNV2("FO", 0), lambda x: ( - x.ProcFlags & 0x01)), - (SDNV2("ADUL", 0), lambda x: ( - x.ProcFlags & 0x01)), - ], - SDNV2("ADUL", 0)), ] def mysummary(self): This is incorrect. A MultipleTypeField choses between fields that all have the same name. SDNV2('CTSN', 0), SDNV2('LT', 0), SDNV2('DL', 0), + ConditionalField(SDNV2("FO", 0), lambda x: ( + x.ProcFlags & 0x01)), + ConditionalField(SDNV2("ADUL", 0), lambda x: ( + x.ProcFlags & 0x01)), ] def mysummary(self):
codereview_python_data_8949
if trans: msg = self.output.post_transaction_output(trans) logger.info(msg) - list_bunch = dnf.cli.output._make_lists(trans, self._goal) - if [tsi._active for tsi in list_bunch.failed]: - raise dnf.exceptions.Error(_('Transaction failed')) def gpgsigcheck(self, pkgs): """Perform GPG signature verification on the given packages, Why not `[tsi._active for tsi in trans if tsi.op_type == dnf.transaction.FAIL]`? And is it really necessary to call `._active` ? if trans: msg = self.output.post_transaction_output(trans) logger.info(msg) + for tsi in trans: + if tsi.op_type == dnf.transaction.FAIL: + raise dnf.exceptions.Error(_('Transaction failed')) def gpgsigcheck(self, pkgs): """Perform GPG signature verification on the given packages,
codereview_python_data_8951
world_size=world_size, rank=proc_id) - if args.per_etype_fanout: - sampler = dgl.dataloading.MultiLayerEtypeNeighborSampler(fanouts, etype_field='etype') - else: - sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts) loader = dgl.dataloading.NodeDataLoader( g, target_idx[train_idx], why do you need to change the single-machine solution? world_size=world_size, rank=proc_id) + sampler = dgl.dataloading.MultiLayerNeighborSampler(fanouts) loader = dgl.dataloading.NodeDataLoader( g, target_idx[train_idx],
codereview_python_data_8954
except Exception as e: logger.error("Unexpected error in process_data {}".format(e)) - def _declate_upgrade_failed(self, *, from_version, to_version, reason): A typo in method name except Exception as e: logger.error("Unexpected error in process_data {}".format(e)) + def _declare_upgrade_failed(self, *, from_version, to_version, reason):
codereview_python_data_8956
# there was only one exact match return options[0] - # there are more than one exact match for this fuzzy symbol raise MultipleSymbolsFoundForFuzzySymbol( symbol=symbol, options=self.retrieve_all(owner.sid for owner in owners), ```suggestion # there is more than one exact match for this fuzzy symbol ``` # there was only one exact match return options[0] + # there is more than one exact match for this fuzzy symbol raise MultipleSymbolsFoundForFuzzySymbol( symbol=symbol, options=self.retrieve_all(owner.sid for owner in owners),
codereview_python_data_8959
path = getattr(options, 'yara_rules_path', None) if path: try: - with open(path, 'rb') as rules_file: yara_rules_string = rules_file.read() - yara_rules_string = codecs.decode(yara_rules_string, 'utf-8') except IOError as exception: raise errors.BadConfigObject( Maybe use shorter form `io.open(path, 'rb', encoding='utf-8')` path = getattr(options, 'yara_rules_path', None) if path: try: + with io.open(path, 'rt', encoding='utf-8') as rules_file: yara_rules_string = rules_file.read() except IOError as exception: raise errors.BadConfigObject(
codereview_python_data_8963
def _process_post_response(self, response, mode): logger.debug(response) result = response['result'] if mode == self.mode_commit: check_tx_code = result.get('check_tx', {}).get('code', 0) If you omit the second parameter, `get` returns `None` if the lookup is unsuccessful, so you can just write `check_tx_code = result.get('check_tx', {}).get('code')` if you want. def _process_post_response(self, response, mode): logger.debug(response) + error = response.get('error') + if error: + return (500, error) + result = response['result'] if mode == self.mode_commit: check_tx_code = result.get('check_tx', {}).get('code', 0)
codereview_python_data_8966
d["resp_ctype"] = t.split(";")[0] else: d["resp_ctype"] = "" - return flowcache.get( - raw_format_flow, - tuple(sorted(d.items())), - focus, - extended, - truncate_urls, - ) I think these could just be part of `d` ? d["resp_ctype"] = t.split(";")[0] else: d["resp_ctype"] = "" + + return flowcache.get(raw_format_flow, tuple(sorted(d.items())))
codereview_python_data_8968
with mb_conn.cursor() as curs: curs.execute("DROP TABLE IF EXISTS mapping.tmp_mbid_mapping") curs.execute("""CREATE TABLE mapping.tmp_mbid_mapping ( - id SERIAL, recording_name TEXT NOT NULL, recording_mbid UUID NOT NULL, artist_credit_name TEXT NOT NULL, SERIAL sets up a sequence and uses it to fill in this value if you don't provide it. You're generating your own ids and inserting them with `insert_rows(mb_curs2, "mapping.tmp_mbid_mapping", rows)`, which means that the sequence won't be advanced. I can't see you creating an index or PK on this field, it might be better to make it explicitly `INTEGER NOT NULL` so that we don't run into any unexpected problems later with mb_conn.cursor() as curs: curs.execute("DROP TABLE IF EXISTS mapping.tmp_mbid_mapping") curs.execute("""CREATE TABLE mapping.tmp_mbid_mapping ( + id INTEGER NOT NULL, recording_name TEXT NOT NULL, recording_mbid UUID NOT NULL, artist_credit_name TEXT NOT NULL,
codereview_python_data_8970
self.assertFalse(task_9.has_excessive_failures()) task_9.add_failure() self.assertTrue(task_9.has_excessive_failures()) - - -if __name__ == '__main__': - unittest.main() remove this (you should use `tox` for running tests, so this won't be needed) self.assertFalse(task_9.has_excessive_failures()) task_9.add_failure() self.assertTrue(task_9.has_excessive_failures())
codereview_python_data_8978
Args: times: How many times to repeat. command: The command to run, with optional args. """ - if count is not None: times *= count No blank line after the docstring. Args: times: How many times to repeat. command: The command to run, with optional args. + count: Multiplies with 'times' when given. """ if count is not None: times *= count
codereview_python_data_8983
data = test_pipeline(data) data = collate([data], samples_per_gpu=1) # just get the actual data from DataContainer - data['img_metas'] = [i.data[0] for i in data['img_metas']] - data['img'] = [i.data[0] for i in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] Avoid using `i` to name variables other than numbers or indices. data = test_pipeline(data) data = collate([data], samples_per_gpu=1) # just get the actual data from DataContainer + data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] + data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0]
codereview_python_data_8984
Atlas of all connected graphs with up to 6 nodes. This example uses Graphviz via PyGraphviz. -It should show 142 graphs (oeis.org/A001349). """ import random It's probably not relevant to the example, but I was confused by the linked sequence: it looks like the value is 112 for `n=6`. I'm not sure that I'm interpreting this correctly though. Atlas of all connected graphs with up to 6 nodes. This example uses Graphviz via PyGraphviz. + +The image should show 142 graphs. +We don't plot the empty graph nor the single node graph. +(142 is the sum of values 2 to n=6 in sequence oeis.org/A001349). """ import random
codereview_python_data_8985
import numpy as np -class Median_2d_test(test.TestCase): def _validateMedian_2d(self, inputs, expected_values, filter_shape = (3, 3)): Class name in CamelCase to be consistent with others : e.g. Median2DTest import numpy as np +class Median2DTest(test.TestCase): def _validateMedian_2d(self, inputs, expected_values, filter_shape = (3, 3)):
codereview_python_data_8993
self.applications.add(offer1, result1) self.applications.add(offer2, result2) discounts = self.applications.grouped_voucher_discounts - discounts = [*discounts,] assert len(discounts) == 1 assert discounts[0]['voucher'] == voucher assert discounts[0]['discount'] == D('3.00') This breaks tests for Python<3.5. self.applications.add(offer1, result1) self.applications.add(offer2, result2) discounts = self.applications.grouped_voucher_discounts + discounts = [x for x in discounts] assert len(discounts) == 1 assert discounts[0]['voucher'] == voucher assert discounts[0]['discount'] == D('3.00')
codereview_python_data_8994
The subgraph index. """ e_array = e.todgltensor() - rst = _CAPI_DGLGraphEdgeSubgraph(self._handle, e_array, preserve_nodes) induced_nodes = utils.toindex(rst(1)) - gidx = GraphIndex(rst(0)) return SubgraphIndex(gidx, self, induced_nodes, e) @utils.cached_member(cache='_cache', prefix='scipy_adj') The same as in `graph.py`. The subgraph index. """ e_array = e.todgltensor() + rst = _CAPI_DGLGraphEdgeSubgraph(self, e_array, preserve_nodes) induced_nodes = utils.toindex(rst(1)) + gidx = rst(0) return SubgraphIndex(gidx, self, induced_nodes, e) @utils.cached_member(cache='_cache', prefix='scipy_adj')
codereview_python_data_8999
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): - """__get_item__ and get call _find_no_duplicates -- never used in - Requests internally. Takes as args name and optional domain and path. - Returns a cookie.value. Throws KeyError if cookie is not found and - CookieConflictError if there are multiple cookies that match name and - optionally domain and path.""" toReturn = None for cookie in iter(self): if cookie.name == name: This first line makes no sense. raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path)) def _find_no_duplicates(self, name, domain=None, path=None): + """Both ``__get_item__`` and ``get`` call this function: it's never + used elsewhere in Requests. Takes as args name and optional domain and + path. Returns a cookie.value. Throws KeyError if cookie is not found + and CookieConflictError if there are multiple cookies that match name + and optionally domain and path.""" toReturn = None for cookie in iter(self): if cookie.name == name:
codereview_python_data_9005
>>> view = blocks_of(arr, 2, 2) >>> view[:] = 100 >>> arr - array([[100, 1, 2, 3], - [ 4, 100, 6, 7], - [ 8, 9, 100, 11], - [ 12, 13, 14, 100]]) Notes ----- So a block of size 2x2 is equivalent to the diagonal? Or rather 4 blocks of size 1x1 along the diagonal? >>> view = blocks_of(arr, 2, 2) >>> view[:] = 100 >>> arr + array([[100, 100, 2, 3], + [100, 100, 6, 7], + [ 8, 9, 100, 100], + [ 12, 13, 100, 100]]) Notes -----
codereview_python_data_9008
# Detecing KVM is tricky, so let's use an environment variable, set from the # docker image, to determine whether to turn it on or not. - kvm = environment.get_value('FUCHSIA_USE_KVM') - if kvm: qemu_args.append('-enable-kvm') # Get the list of fuzzers for ClusterFuzz to choose from. nit: no need for the variable. just use get_value in the if. # Detecing KVM is tricky, so let's use an environment variable, set from the # docker image, to determine whether to turn it on or not. + if environment.get_value('FUCHSIA_USE_KVM'): qemu_args.append('-enable-kvm') # Get the list of fuzzers for ClusterFuzz to choose from.
codereview_python_data_9010
sleep(worker_id + 1) line_input = queue_of_texts.get() if line_input is not None: - letters_in_line = Counter([idx for idx in line_input.lower() if - idx.isalpha()]) letter_to_frequency.add_counter(letters_in_line) queue_of_texts.task_done() if line_input is None: ```suggestion letters_in_line = Counter(idx for idx in line_input.lower() if idx.isalpha()) ``` sleep(worker_id + 1) line_input = queue_of_texts.get() if line_input is not None: + letters_in_line = Counter(idx for idx in line_input.lower() if idx.isalpha()) letter_to_frequency.add_counter(letters_in_line) queue_of_texts.task_done() if line_input is None:
codereview_python_data_9013
results (dict): Result dict from loading pipeline. Returns: - dict: Padded results, 'pad_shape', 'pad_fixed_size', - 'pad_size_divisor' keys are added into result dict. """ self._pad_img(results) The change of keys should be described in the class docstring. results (dict): Result dict from loading pipeline. Returns: + dict: Updated result dict. """ self._pad_img(results)
codereview_python_data_9016
"kaitaistruct>=0.7,<0.9", "ldap3>=2.5,<2.6", "passlib>=1.6.5, <1.8", - "ply>=3.4, <3.12", "pyasn1>=0.3.1,<0.5", "pyOpenSSL>=17.5,<18.1", "pyparsing>=2.1.3, <2.3", Did you actually test this with ply 3.4? That release is pretty old (2011), so I think we can bump this to at least 3.6 (2015) or even 3.10 (2017)... "kaitaistruct>=0.7,<0.9", "ldap3>=2.5,<2.6", "passlib>=1.6.5, <1.8", + "ply>=3.6, <3.12", "pyasn1>=0.3.1,<0.5", "pyOpenSSL>=17.5,<18.1", "pyparsing>=2.1.3, <2.3",
codereview_python_data_9017
accepted_mimetypes: Iterable[str], ) -> List[str]: """Override chooseFiles to (optionally) invoke custom file uploader.""" - if config.val.fileselect.handler == "default": return super().chooseFiles(mode, old_files, accepted_mimetypes) return shared.choose_file( multiple=(mode == QWebEnginePage.FileSelectOpenMultiple)) Perhaps add `assert handler == "external", handler` here, so this fails rather than doing the wrong thing if we ever add a new value there and forget to handle it here. (At that point it probably makes sense again to do `handler = config.val.fileselect.handler` - sorry for that! accepted_mimetypes: Iterable[str], ) -> List[str]: """Override chooseFiles to (optionally) invoke custom file uploader.""" + handler = config.val.fileselect.handler + if handler == "default": return super().chooseFiles(mode, old_files, accepted_mimetypes) + assert handler == "external" return shared.choose_file( multiple=(mode == QWebEnginePage.FileSelectOpenMultiple))
codereview_python_data_9020
import torch -from nvidia.dali.pipeline import pipeline import nvidia.dali.types as types import nvidia.dali.fn as fn -@pipeline def create_coco_pipeline(default_boxes, args): try: shard_id = torch.distributed.get_rank() same as in the previous file, do we want to replace our examples to use this? import torch +from nvidia.dali.pipeline import pipeline_def import nvidia.dali.types as types import nvidia.dali.fn as fn +@pipeline_def def create_coco_pipeline(default_boxes, args): try: shard_id = torch.distributed.get_rank()
codereview_python_data_9026
('move-to-end-of-line', ['$']), ('move-to-start-of-document', ['gg']), ('move-to-end-of-document', ['G']), - ('yank selection -p', ['Y']), ('yank selection', ['y'] + RETURN_KEYS), ('scroll left', ['H']), ('scroll down', ['J']), What's the `-p` supposed to do? It's unrecognized even for `yank-selected`. ('move-to-end-of-line', ['$']), ('move-to-start-of-document', ['gg']), ('move-to-end-of-document', ['G']), + ('yank selection -s', ['Y']), ('yank selection', ['y'] + RETURN_KEYS), ('scroll left', ['H']), ('scroll down', ['J']),
codereview_python_data_9031
port = DEFAULT_PORT hosts.append((ip, port)) else: - ip = result[0] - port = int(result[1]) - hosts.append((ip, port)) - #raise RuntimeError("Format error of ip_config.") server_count_per_machine = args.num_servers # Get partition info of the graph data part_config = args.workspace + '/' + args.part_config why modify this? port = DEFAULT_PORT hosts.append((ip, port)) else: + raise RuntimeError("Format error of ip_config.") server_count_per_machine = args.num_servers # Get partition info of the graph data part_config = args.workspace + '/' + args.part_config
codereview_python_data_9038
class BinaryDSVReaderTest(shared_test_lib.BaseTestCase): """Tests for the binary delimited separated values reader.""" - def setUp(self): - """Sets up the needed objects used throughout the test.""" - self._resolver_context = context.Context() - @shared_test_lib.skipUnlessHasTestFile(['password.csv']) def testIterator(self): """Tests the iterator functionality.""" test_file = self._GetTestFilePath(['password.csv']) test_path_spec = os_path_spec.OSPathSpec(location=test_file) file_object = os_file_io.OSFile(self._resolver_context) file_object.open(test_path_spec) line_reader = line_reader_file.BinaryLineReader(file_object) Seeing that this test has 1 test method, I opt to just merge setUp into the test method for clarity class BinaryDSVReaderTest(shared_test_lib.BaseTestCase): """Tests for the binary delimited separated values reader.""" @shared_test_lib.skipUnlessHasTestFile(['password.csv']) def testIterator(self): """Tests the iterator functionality.""" test_file = self._GetTestFilePath(['password.csv']) test_path_spec = os_path_spec.OSPathSpec(location=test_file) + self._resolver_context = context.Context() file_object = os_file_io.OSFile(self._resolver_context) file_object.open(test_path_spec) line_reader = line_reader_file.BinaryLineReader(file_object)
codereview_python_data_9041
environment.reset_current_memory_tool_options() # Clear exceptions. - if sys.version_info.major == 2: - # TODO(ochang): Remove this once migrated to Python 3. - sys.exc_clear() # Call python's garbage collector. utils.python_gc() i think we use this in 3 places, feel free to create a helper in utils.py and use it src/python/bot/startup/heartbeat.py: sys.exc_clear() src/python/bot/startup/run_bot.py: sys.exc_clear() src/python/bot/tasks/commands.py: sys.exc_clear() environment.reset_current_memory_tool_options() # Clear exceptions. + utils.exc_clear() # Call python's garbage collector. utils.python_gc()
codereview_python_data_9055
) self.connected_exchanges[location].append(exchange_obj) - def get_all_binance_pairs(self, location: Location) -> List[str]: - pairs = list(query_binance_exchange_pairs(location=location).keys()) - if len(pairs) == 0: - self.msg_aggregator.add_error('Failed to query Binance available pairs') - return pairs - def get_user_binance_pairs(self, name: str, location: Location) -> List[str]: is_connected = location in self.connected_exchanges if is_connected: I suggest you do it differently. Just like we do with all other api endpoints the error should be handled by the api. So write down what exceptions can be raised or return None if you don't want to raise exceptions. And then instead of using the self.msg_aggregator here, just return an api error. ) self.connected_exchanges[location].append(exchange_obj) def get_user_binance_pairs(self, name: str, location: Location) -> List[str]: is_connected = location in self.connected_exchanges if is_connected:
codereview_python_data_9056
from tensorflow_addons.image.transform_ops import rotate from tensorflow_addons.image.transform_ops import transform from tensorflow_addons.image.translate_ops import translate -from tensorflow_addons.image.augment_ops import blend @gabrieldemarmiesse Is blend an `augment_ops` or something else? I.e. in Opencv is a `addWeighted` operation so I don't know if could be in the trasnform group, an `arithmetic` etc. I we will add more blending operations we could use `image.blend_ops`. from tensorflow_addons.image.transform_ops import rotate from tensorflow_addons.image.transform_ops import transform from tensorflow_addons.image.translate_ops import translate +from tensorflow_addons.image.compose_ops import blend
codereview_python_data_9069
return os.path.exists(file_path) -def file_open(file_path, mode="rb", kwargs=None): if isinstance(file_path, str): match = S3_ADDRESS_REGEX.search(file_path) if match: ```suggestion def file_open(file_path, mode="rb", compression="infer"): ``` return os.path.exists(file_path) +def file_open(file_path, mode="rb", compression="infer"): if isinstance(file_path, str): match = S3_ADDRESS_REGEX.search(file_path) if match:
codereview_python_data_9071
if not user_email: return - spotify_url = current_app.config['SERVER_ROOT_URL'] + '/profile/music-services/details/' text = render_template('emails/spotify_import_error.txt', error=error, link=spotify_url) send_mail( subject='ListenBrainz Spotify Importer Error', can we use url_for() here? if not user_email: return + spotify_url = url_for("profile.music_services_details") text = render_template('emails/spotify_import_error.txt', error=error, link=spotify_url) send_mail( subject='ListenBrainz Spotify Importer Error',
codereview_python_data_9072
""" Get a list of output links filtered on some criteria """ - outputs = self.fastquery.get_outputs_by_pubkey(owner) if not include_spent: outputs = self.fastquery.filter_spent_outputs(outputs) return outputs Can we make this so that we don't need to instantiate a new `FastQuery` instance every time we need to use it, or move it under `__init__`? """ Get a list of output links filtered on some criteria """ + outputs = self.fastquery.get_outputs_by_public_key(owner) if not include_spent: outputs = self.fastquery.filter_spent_outputs(outputs) return outputs
codereview_python_data_9073
[11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]]) - >>> self = AnchorGenerator(9, [1.], [1.], ctr_offset=0.5) - >>> all_anchors = self.grid_anchors((2, 2), device='cpu') - >>> print(all_anchors) - tensor([[ 0., 0., 8., 8.], - [16., 0., 24., 8.], - [ 0., 16., 8., 24.], - [16., 16., 24., 24.]]) """ def __init__(self, We may add a standard docstring. [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]]) """ def __init__(self,
codereview_python_data_9075
'https://{domain}/revisions?job={job_type}&revision={revision}') FILE_UNREPRODUCIBLE_TESTCASE_TEXT = ( - '***************************************\n' 'Note: This crash might not be reproducible with the provided testcase. ' 'That said, for the past %d days, we\'ve been seeing this crash ' 'frequently.\n\n' maybe ``` '*************** UNREPRODUCIBLE ************************\n' ``` (and the rest is great) 'https://{domain}/revisions?job={job_type}&revision={revision}') FILE_UNREPRODUCIBLE_TESTCASE_TEXT = ( + '************************* UNREPRODUCIBLE *************************\n' 'Note: This crash might not be reproducible with the provided testcase. ' 'That said, for the past %d days, we\'ve been seeing this crash ' 'frequently.\n\n'
codereview_python_data_9077
LOG = get_logger('system') -class DBContext: """ Simple helper class to setup and sql engine, a database session and a connection. Please use new-style class `DBContext(object)` LOG = get_logger('system') +class DBContext(object): """ Simple helper class to setup and sql engine, a database session and a connection.
codereview_python_data_9080
class TestGROWriterLarge(TestCase, tempdir.TempDir): @classmethod def setUpClass(cls): cls.tmpdir = tempdir.TempDir() Why does this need to be changed? class TestGROWriterLarge(TestCase, tempdir.TempDir): + # not normally recommended to use class-level + # setup for universe (special case here) @classmethod def setUpClass(cls): cls.tmpdir = tempdir.TempDir()
codereview_python_data_9084
use_alternate = biased_coin(data, alternate_chance) data.stop_example() if use_alternate: - return base - else: return alternate class many(object): Surely if `use_alternate` is True, we should return `alternate`, not `base`? use_alternate = biased_coin(data, alternate_chance) data.stop_example() if use_alternate: return alternate + else: + return base class many(object):
codereview_python_data_9087
from dgl.nn.pytorch import RelGraphConv class RGCN(nn.Module): - """ - Parameters - ---------- - in_dim : int - Input feature size or number of nodes - """ def __init__(self, in_dim, h_dim, out_dim, num_rels, regularizer="basis", num_bases=-1, dropout=0., self_loop=False, link_pred=False): It's ok if you don't want to have very long docstring in examples as long as the argument name is self-explaining, but don't leave something half done. from dgl.nn.pytorch import RelGraphConv class RGCN(nn.Module): def __init__(self, in_dim, h_dim, out_dim, num_rels, regularizer="basis", num_bases=-1, dropout=0., self_loop=False, link_pred=False):
codereview_python_data_9088
] ) for rt in resp["RouteTables"]: - for assoc in rt["Associations"]: ec2_client.disassociate_route_table( AssociationId=assoc["RouteTableAssociationId"] ) nit: Can we assume that `Associations` is always present? Let's better use `rt.get("Associations", [])` here to avoid `KeyError`. ] ) for rt in resp["RouteTables"]: + for assoc in rt.get("Associations", []): ec2_client.disassociate_route_table( AssociationId=assoc["RouteTableAssociationId"] )
codereview_python_data_9092
def FastaNcbiIterator(source, alphabet=single_letter_alphabet): for title, sequence in SimpleFastaParser(source): id, name, xrefs = fasta_title_parser_auto(title) yield SeqRecord(Seq(sequence, alphabet), id, name, name, dbxrefs=xrefs) The SeqRecord ``.dbxrefs`` is expected to be a list of strings of the format ``"prefix:value"``, not a dictionary. So given: ``` >emb|CAA12345.6||gi|78 fake protein seq @#$%^[] MAGWSCLVTGGGGFLGQRIICLLVEEKDLQEIRVLDKVFRP GACQGTSVVIHTAS ``` we might expect ``["EMBL:CAA12345.6", ...]`` def FastaNcbiIterator(source, alphabet=single_letter_alphabet): + """Iterate over Fasta records as SeqRecord objects, with dbxrefs support. + + Arguments: + - source - input stream opened in text mode, or a path to a file + - alphabet - optional alphabet + + """ for title, sequence in SimpleFastaParser(source): id, name, xrefs = fasta_title_parser_auto(title) yield SeqRecord(Seq(sequence, alphabet), id, name, name, dbxrefs=xrefs)
codereview_python_data_9093
with shared_test_lib.TempDirectory() as temp_directory: filter_file = os.path.join(temp_directory, 'filter.txt') - with open(filter_file, 'w') as file_object: file_object.write('/a_directory/.+_file\n') options.file_filter = filter_file `open(filter_file, 'w')` => `io.open(filter_file, 'w', encoding='utf-8')` with shared_test_lib.TempDirectory() as temp_directory: filter_file = os.path.join(temp_directory, 'filter.txt') + with open(filter_file, 'wt', encoding='utf-8') as file_object: file_object.write('/a_directory/.+_file\n') options.file_filter = filter_file
codereview_python_data_9106
def getAscendent(self, node_type): """Return the ancenstor node of the given type, or None. - Node type can a - two letter code or longer description. e.g. 'fa' or 'family'. """ if node_type in _nodetype_to_code: node_type = _nodetype_to_code[node_type] This (and the original) are missing the word "be" here, and the line wrapping looks odd. The ``getDescendents`` text is fine. def getAscendent(self, node_type): """Return the ancenstor node of the given type, or None. + Node type can be a two letter code or longer description, + e.g. 'fa' or 'family'. """ if node_type in _nodetype_to_code: node_type = _nodetype_to_code[node_type]
codereview_python_data_9109
def __init__(self, *a, __remote_end__=None, **kw): if __remote_end__ is None: - if hasattr(self, "_preprocess_init_args"): - a, kw = self._preprocess_init_args(*a, **kw) - if __remote_end__ is None: __remote_end__ = remote_cls(*a, **kw) while True: # unwrap the object if it's a wrapper do this under single `if` statement, not double def __init__(self, *a, __remote_end__=None, **kw): if __remote_end__ is None: + try: + preprocess = object.__getattribute__(self, "_preprocess_init_args") + except AttributeError: + pass + else: + a, kw = preprocess(*a, **kw) + __remote_end__ = remote_cls(*a, **kw) while True: # unwrap the object if it's a wrapper
codereview_python_data_9115
""" def __init__(self, config): - """Args: - config(ClientConfig): the client config object """ super(ExplainClient, self).__init__(config) self.stub = explain_pb2_grpc.ExplainStub(config['channel']) It's not clear exactly what is the `prefix` here. So it would be nice to have an short example. """ def __init__(self, config): + """Initialize + + Args: + config (ClientConfig): the client config object """ super(ExplainClient, self).__init__(config) self.stub = explain_pb2_grpc.ExplainStub(config['channel'])
codereview_python_data_9126
# pylint: enable=bad-builtin msg = message_mock.getmsg(usertypes.MessageLevel.error) assert msg.text.startswith( - "Error building SQL Query: Expression tree is too large") @pytest.mark.parametrize('max_items, before, after', [ You could just replace `map(str, range(10000)))` with `str(x) for x in range(10000)` so you don't need the `pylint` disables. # pylint: enable=bad-builtin msg = message_mock.getmsg(usertypes.MessageLevel.error) assert msg.text.startswith( + "Error with SQL Query: Expression tree is too large") @pytest.mark.parametrize('max_items, before, after', [
codereview_python_data_9130
""" inplace = validate_bool_kwarg(inplace, "inplace") duplicates = self.duplicated(subset=subset, keep=keep) - indices, = duplicates.nonzero() return self.drop(index=self.index[indices], inplace=inplace) def duplicated(self, subset=None, keep="first"): It looks like that `nonzero` is also deprecated in pandas 0.24. Could we change it to ```suggestion indices, = duplicates.to_numpy().nonzero() ``` This also might be a good first issue to at least avoid using `nonzero` and, possibly, avoid `duplicated` """ inplace = validate_bool_kwarg(inplace, "inplace") duplicates = self.duplicated(subset=subset, keep=keep) + indices, = duplicates.values.nonzero() return self.drop(index=self.index[indices], inplace=inplace) def duplicated(self, subset=None, keep="first"):
codereview_python_data_9131
else: i += 1 result.extend(x[i:]) - return list(map(tuple, result)) def _intervals(s): This function is quite cryptic. Could there be a short notice what happens here? else: i += 1 result.extend(x[i:]) + return tuple(map(tuple, result)) def _intervals(s):
codereview_python_data_9134
if self.gt_unique_best: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 else: - assigned_gt_inds[overlaps[:, i] == gt_max_overlaps[i]] \ - = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_zeros((num_bboxes, )) The `\` is not necessary here. if self.gt_unique_best: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 else: + assigned_gt_inds[overlaps[:, i] == + gt_max_overlaps[i]] = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_zeros((num_bboxes, ))
codereview_python_data_9136
self.generate_set_slot_code(src, scope, code) -class DelSlot(InternalMethodSlot): - # For __del__ if defined - def __init__(self, slot_name, **kwargs): - super(DelSlot, self).__init__(slot_name, **kwargs) - - def slot_code(self, scope): - if not scope.lookup_here("__del__"): - return 0 - return InternalMethodSlot.slot_code(self, scope) - - class SyntheticSlot(InternalMethodSlot): # Type slot descriptor for a synthesized method which # dispatches to one or more user-defined methods depending This looks like `SyntheticSlot` would do the job. self.generate_set_slot_code(src, scope, code) class SyntheticSlot(InternalMethodSlot): # Type slot descriptor for a synthesized method which # dispatches to one or more user-defined methods depending
codereview_python_data_9138
"""Computes the axis wise maximum over chosen elements. Args: - data: 2-D float `Tensor` of size `[n, m]`. - mask: 2-D Boolean `Tensor` of size `[n, m]`. dim: The dimension over which to compute the maximum. Returns: I guess it's better to switch to `with shape` """Computes the axis wise maximum over chosen elements. Args: + data: 2-D float `Tensor` of shape `[n, m]`. + mask: 2-D Boolean `Tensor` of shape `[n, m]`. dim: The dimension over which to compute the maximum. Returns:
codereview_python_data_9140
# We need to see if we match the end of the netloc, accounting for a # 'username[:password]@' at the beginning and a ':port' at the end. - host_or_ip = netloc.split('@')[-1].split(':')[0] if is_ipv4_address(host_or_ip): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): People are rather inconsistent as to whether or not they properly encode their password. If they have an `@` in their password, and they've not encoded it appropriately, this will cause issues. It would be better to `rsplit` here as a result. It'll cause less teeth-gnashing. # We need to see if we match the end of the netloc, accounting for a # 'username[:password]@' at the beginning and a ':port' at the end. + host_or_ip = netloc.rsplit('@')[-1].split(':')[0] if is_ipv4_address(host_or_ip): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip):
codereview_python_data_9142
-from . import (maxflow, mincost, edmondskarp, fordfulkerson, preflowpush, - shortestaugmentingpath, capacityscaling, networksimplex, utils) __all__ = sum([maxflow.__all__, mincost.__all__, Now that there no longer are name conflicts, you can remove this import and move the definition of `__all__` to the bottom. +from .maxflow import * +from .mincost import * +from .edmondskarp import * +from .fordfulkerson import * +from .preflowpush import * +from .shortestaugmentingpath import * +from .capacityscaling import * +from .networksimplex import * +from .utils import build_flow_dict, build_residual_network + __all__ = sum([maxflow.__all__, mincost.__all__,
codereview_python_data_9143
response = authenticate_presign_url_signv4(method, path, headers, data, url, query_params, request_dict) if response is not None: - LOGGER.debug('Signature calculation failed with the error.') return response def authenticate_presign_url_signv2(method, path, headers, data, url, query_params, request_dict): # Calculating Signature - LOGGER.debug('Calculating the version 2 signature.') aws_request = create_request_object(request_dict) credentials = Credentials(access_key=TEST_AWS_ACCESS_KEY_ID, secret_key=TEST_AWS_SECRET_ACCESS_KEY) auth = HmacV1QueryAuth(credentials=credentials, expires=query_params['Expires'][0]) nit: Should we add the error to the log line here as well? ``` LOGGER.debug('Signature calculation failed: %s' % response) ``` response = authenticate_presign_url_signv4(method, path, headers, data, url, query_params, request_dict) if response is not None: + LOGGER.error('Presign signature calculation failed: %s' % response) return response def authenticate_presign_url_signv2(method, path, headers, data, url, query_params, request_dict): # Calculating Signature aws_request = create_request_object(request_dict) credentials = Credentials(access_key=TEST_AWS_ACCESS_KEY_ID, secret_key=TEST_AWS_SECRET_ACCESS_KEY) auth = HmacV1QueryAuth(credentials=credentials, expires=query_params['Expires'][0])
codereview_python_data_9145
mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) - if result.cmdline[0] not in ['leave-mode', 'prompt-accept', 'repeat-command']: last_command[mode_manager.mode] = ( self._parse_count(text)[1], You blacklisted `:prompt-accept`, but that should be `:command-accept`, right? mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) + if result.cmdline[0] not in ['leave-mode', 'command-accept', 'repeat-command']: last_command[mode_manager.mode] = ( self._parse_count(text)[1],
codereview_python_data_9151
# specified explicitly async with self._run_and_rollback(): with self.assertRaisesRegex( - exceptions.UnknownEdgeDBError, r"subjectexpr is already defined for .+max_int"): await self.con.execute(""" CREATE CONSTRAINT test::max_int(std::int) Let's not proliferate `UnknownEdgeDBError` and instead introduce proper error classes. In this case I would introduce `InvalidConstraintDefinitionError` inheriting from `SchemaError`. For this `schema.error` exceptions need to get `code`, and a corresponding class in `edgedb.client.exceptions`. # specified explicitly async with self._run_and_rollback(): with self.assertRaisesRegex( + exceptions.InvalidConstraintDefinitionError, r"subjectexpr is already defined for .+max_int"): await self.con.execute(""" CREATE CONSTRAINT test::max_int(std::int)
codereview_python_data_9152
else: cmd = "pw useradd {0} -m".format(username) if comment is not None: - cmd += " -c '{0}'".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, " These changes look suspiciously like the ones in default.py; is this a lot of copy-paste that should be refactored? else: cmd = "pw useradd {0} -m".format(username) if comment is not None: + cmd += " -c {0}".format(comment) retcode, out = shellutil.run_get_output(cmd) if retcode != 0: raise OSUtilError(("Failed to create user account:{0}, "
codereview_python_data_9153
((lambda x, y: x ** y), "**")] def test_bool_disallowed(): - error_msg = "[Ii]nput[s]? to arithmetic operator `[\S]*` cannot be [a]?[ ]?boolean[s]?. Consider using bitwise operator[s]?" for kinds in unary_input_kinds: for (op, _, op_desc, _, _) in math_function_operations: yield check_raises_re, kinds, np.bool_, op, shape_small, op_desc, error_msg nitpick: raises is not case sensitive by default ((lambda x, y: x ** y), "**")] def test_bool_disallowed(): + error_msg = "Input[s]? to arithmetic operator `[\S]*` cannot be [a]?[ ]?boolean[s]?. Consider using bitwise operator[s]?" for kinds in unary_input_kinds: for (op, _, op_desc, _, _) in math_function_operations: yield check_raises_re, kinds, np.bool_, op, shape_small, op_desc, error_msg
codereview_python_data_9156
class AtomSelection(Selection): def __init__(self, name, resid, segid): - Selection.__init__(self) self.name = name self.resid = resid self.segid = segid Really, we never used that? It's been here since Day 1. Fair to remove it, though. class AtomSelection(Selection): def __init__(self, name, resid, segid): self.name = name self.resid = resid self.segid = segid
codereview_python_data_9164
init_swagger_documentation(app_svc.application) if args.fresh: - logging.info("Fresh startup: removing server data files") asyncio.get_event_loop().run_until_complete(data_svc.destroy()) run_tasks(services=app_svc.get_services()) may want to add a note here on where to find previous files or just that the old session/data was saved init_swagger_documentation(app_svc.application) if args.fresh: + logging.info("Fresh startup: resetting server data. See %s directory for data backups.", DATA_BACKUP_DIR) asyncio.get_event_loop().run_until_complete(data_svc.destroy()) run_tasks(services=app_svc.get_services())