id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_7387
from plaso.lib import errors # pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc class Token(object): Please make these local overrides instead of overrides for the whole module (repeat every else) from plaso.lib import errors # pylint: disable=missing-type-doc,missing-return-type-doc,missing-return-doc +# pylint: disable=missing-param-doc class Token(object):
codereview_python_data_7392
def convert_pos_from_native(self, x, inplace=True): """Conversion of coordinate array x from native units to base units. - Keywords - -------- - inplace : bool Whether to modify the array inplace, overwriting previous data Note This is also not the correct numpy-style and we should add `x` in the parameter description. def convert_pos_from_native(self, x, inplace=True): """Conversion of coordinate array x from native units to base units. + Parameters + ---------- + x : array_like + Positions to transform + inplace : bool, optional Whether to modify the array inplace, overwriting previous data Note
codereview_python_data_7394
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4450-SEA 1645543193 1543471933</p> <hr> <p>Varnish cache server</p> </body> I think the module name "mapping" is confusing, given the number of mappings we have now. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4470-SEA 1645543193 2833611162</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_7406
# See the License for the specific language governing permissions and # limitations under the License. -"""Forseti installer CLI config object""" from config import Config from ..util.constants import TEMPLATE_TYPE_CLIENT class ClientConfig(Config): - """Forseti installer CLI config object""" def __init__(self, **kwargs): """Initialize. This breaks style, please fix throughout. # See the License for the specific language governing permissions and # limitations under the License. +"""Forseti installer CLI config object.""" from config import Config from ..util.constants import TEMPLATE_TYPE_CLIENT class ClientConfig(Config): + """Forseti installer CLI config object.""" def __init__(self, **kwargs): """Initialize.
codereview_python_data_7407
def follow_vk_redirect(cls, url): # If this is a 'videos' catalog URL with an video ID in the GET request, get that instead parsed_url = urlparse(url) - if parsed_url.path.startswith('/video'): query = {v[0]: v[1] for v in [q.split('=') for q in parsed_url.query.split('&')] if v[0] == 'z'} try: true_path = unquote(query['z']).split('/')[0] shouldn't it be `/videos` ? def follow_vk_redirect(cls, url): # If this is a 'videos' catalog URL with an video ID in the GET request, get that instead parsed_url = urlparse(url) + if parsed_url.path.startswith('/videos'): query = {v[0]: v[1] for v in [q.split('=') for q in parsed_url.query.split('&')] if v[0] == 'z'} try: true_path = unquote(query['z']).split('/')[0]
codereview_python_data_7409
cmd.cli.demands = copy.deepcopy(self.cli.demands) cmd.configure() cmd.run() - except Exception as e: logger.error(_("Error:") + " " + e.value) except: return no need for another except cmd.cli.demands = copy.deepcopy(self.cli.demands) cmd.configure() cmd.run() + except dnf.exceptions.Error as e: logger.error(_("Error:") + " " + e.value) except: return
codereview_python_data_7415
out_shape, inds, device='cpu', - interpolation='bilinear', rounded=True): """See :func:`BaseInstanceMasks.crop_and_resize`.""" if len(self.masks) == 0: empty_masks = np.empty((0, *out_shape), dtype=np.uint8) rounded -> binarize out_shape, inds, device='cpu', + interpolation='bilinear', + binarize=True): """See :func:`BaseInstanceMasks.crop_and_resize`.""" if len(self.masks) == 0: empty_masks = np.empty((0, *out_shape), dtype=np.uint8)
codereview_python_data_7420
for atom in atom_list: x, y, z = atom.coord radius = _get_atom_radius(atom, rtype="united") - print( - "{:6.3f}\t{:6.3f}\t{:6.3f}\t{:1.2f}".format(x, y, z, radius), - file=pdb_to_xyzr, ) # make surface I'd be tempted to switch this to ``pdb_to_xyzr.write("...\n".format(...))``since I don't think we use print to file anywhere else in Biopython... for atom in atom_list: x, y, z = atom.coord radius = _get_atom_radius(atom, rtype="united") + pdb_to_xyzr.write( + "{:6.3f}\t{:6.3f}\t{:6.3f}\t{:1.2f}\n".format(x, y, z, radius) ) # make surface
codereview_python_data_7423
assert(pipe.epoch_size("caffe_reader") != 0) assert(pipe.epoch_size("caffe2_reader") != 0) assert(pipe.epoch_size("file_reader") != 0) - assert(len(pipe.epoch_size()) == 4) \ No newline at end of file missing empty line assert(pipe.epoch_size("caffe_reader") != 0) assert(pipe.epoch_size("caffe2_reader") != 0) assert(pipe.epoch_size("file_reader") != 0) \ No newline at end of file + assert(len(pipe.epoch_size()) == 4)
codereview_python_data_7427
# -*- coding: utf-8 -*- -"""Parser for bash history files.""" import io import re ```suggestion """Parser for fish history files.""" ``` # -*- coding: utf-8 -*- +"""Parser for fish history files.""" import io import re
codereview_python_data_7432
Args: s: The string to evaluate. - file: Interpret s as a path to file also implies --quiete. quiet: Don't show the output in a new tab. """ if file: Please add a comma before "also", and quiete -> quiet. Args: s: The string to evaluate. + file: Interpret s as a path to file, also implies --quiet. quiet: Don't show the output in a new tab. """ if file:
codereview_python_data_7438
def get_s3(region=None): - """Get a Boto 3 S3 resource with a specific Region or with your default Region.""" return boto3.resource('s3', region_name=region) if region else boto3.resource('s3') **Amazon** S3 resource...**AWS** Region... def get_s3(region=None): + """ + Get a Boto 3 Amazon S3 resource with a specific AWS Region or with your + default AWS Region. + """ return boto3.resource('s3', region_name=region) if region else boto3.resource('s3')
codereview_python_data_7444
if not conv: raise configexc.ValidationError( value, - '{} not in {}'.format(kind, list(converters.keys()))) if len(kind) != len(vals): raise configexc.ValidationError( dict keys are unordered in Python 3.5, so the error message in the test doesn't match. Also, note that converting a dict to a list gives you the keys by default, so I'd do: ```suggestion '{} not in {}'.format(kind, list(sorted(converters)))) ``` (and adjust the test to match) if not conv: raise configexc.ValidationError( value, + '{} not in {}'.format(kind, list(sorted(converters)))) if len(kind) != len(vals): raise configexc.ValidationError(
codereview_python_data_7450
""" def wrapped_cmd(*args, **kwargs): - filename = "/".join([filedir, "status", "{0}.status".format(wrapped_cmd.inc)]) fileutil.write_file(filename, msg) # Every time enable is called, the agent looks for the next status file os.path.join with os.path.sep instead of "/" """ def wrapped_cmd(*args, **kwargs): + filename = os.path.join(filedir, "status", "{0}.status".format(wrapped_cmd.inc)) fileutil.write_file(filename, msg) # Every time enable is called, the agent looks for the next status file
codereview_python_data_7451
summary='Create a new objective', description='Create a new objective using the format provided in the `ObjectiveSchema`.') @aiohttp_apispec.request_schema(ObjectiveSchema) - @aiohttp_apispec.response_schema(ObjectiveSchema) async def create_objective(self, request: web.Request): objective = await self.create_on_disk_object(request) return web.json_response(objective.display) Wasn't sure if this one was accidentally passed over, or if this is being saved for another ticket. But for the endpoints that have parameters in the URL, you can define those parameters by adding something similar to this to the `aiohttp_apispec.docs` decorator: ``` parameters=[{ 'in': 'path', 'name': 'id', 'schema': {'type': 'string'}, 'required': 'true', 'description': 'UUID of the Objective object.' }], ``` summary='Create a new objective', description='Create a new objective using the format provided in the `ObjectiveSchema`.') @aiohttp_apispec.request_schema(ObjectiveSchema) + @aiohttp_apispec.response_schema(ObjectiveSchema, description='Returns single objective in ObjectiveSchema format.') async def create_objective(self, request: web.Request): objective = await self.create_on_disk_object(request) return web.json_response(objective.display)
codereview_python_data_7455
return MissingProductImage() def get_all_images(self): - if self.is_child and not self.images.exists() and self.parent is not None: return self.parent.images.all() return self.images.all() `and not self.parent_id` avoids another query. return MissingProductImage() def get_all_images(self): + if self.is_child and not self.images.exists() and self.parent_id is not None: return self.parent.images.all() return self.images.all()
codereview_python_data_7462
code.putln("#endif") code.putln("") - code.putln("#if CYTHON_COMPILING_IN_LIMITED_API") - code.putln("static void %s_free(void *m) {" % Naming.module_cname) - code.putln(" %s_clear(m);" % Naming.module_cname) - code.putln("}") - code.putln("#endif") code.putln("") code.putln("static struct PyModuleDef %s = {" % Naming.pymoduledef_cname) All of these changes would eventually also apply to the PEP489 mode, I think. (Just mentioning it here, that doesn't have to be part of this PR.) code.putln("#endif") code.putln("") code.putln("") code.putln("static struct PyModuleDef %s = {" % Naming.pymoduledef_cname)
codereview_python_data_7464
AgentTestCase.tearDown(self) def _create_dummy_archive(self, size=1024): - with open(self.archive_path, "wb") as f: f.truncate(size) def test_it_should_invoke_all_periodic_operations(self): I'm worried about the scope of this mock; if any other code branches happen to call `os.path.exists` on an unrelated file, could this patch cause issues? Maybe making a inner function that only special cases the exact file we're looking for, and passing the rest on to the original function? AgentTestCase.tearDown(self) def _create_dummy_archive(self, size=1024): + with open(self.archive_path, "wb") as f: # pylint: disable=C0103 f.truncate(size) def test_it_should_invoke_all_periodic_operations(self):
codereview_python_data_7469
def worker(start_method, sock, task_queue, res_queue, worker_cb, worker_params): if start_method == "spawn": - init_queue(sock, task_queue) - init_queue(sock, res_queue) sock.close() while True: if worker_cb(task_queue, res_queue, **worker_params) is None: I tried to follow what are we testing here, but I got lost. Are we sending the messages to the processes and they are sending them back? We have two queues, right? def worker(start_method, sock, task_queue, res_queue, worker_cb, worker_params): if start_method == "spawn": + task_queue.open_shm(multiprocessing.reduction.recv_handle(sock)) + res_queue.open_shm(multiprocessing.reduction.recv_handle(sock)) sock.close() while True: if worker_cb(task_queue, res_queue, **worker_params) is None:
codereview_python_data_7475
result = self.test(*args, **kwargs) finish = benchmark_time() internal_draw_time = sum(data.draw_times[initial_draws:]) - runtime = datetime.timedelta(microseconds=((finish - start - internal_draw_time) * 1000000)) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final: If we pass `seconds=` instead of microseconds, we don't need the constant factor :smile: result = self.test(*args, **kwargs) finish = benchmark_time() internal_draw_time = sum(data.draw_times[initial_draws:]) + runtime = datetime.timedelta( + seconds=finish - start - internal_draw_time + ) self.__test_runtime = runtime current_deadline = self.settings.deadline if not is_final:
codereview_python_data_7477
``cumulated_variance``. The value at the ith index of `cumulated_variance` is the sum of the variances from 0 to i. - >>> n_pcs = np.where(PSF_pca.cumulated_varance > 0.95)[0][0] >>> atomgroup = u.select_atoms('backbone') >>> pca_space = PSF_pca.transform(atomgroup, n_components=n_pcs) Is it really "varance" and not "variance"?? Please double check! ``cumulated_variance``. The value at the ith index of `cumulated_variance` is the sum of the variances from 0 to i. + >>> n_pcs = np.where(PSF_pca.cumulated_variance > 0.95)[0][0] >>> atomgroup = u.select_atoms('backbone') >>> pca_space = PSF_pca.transform(atomgroup, n_components=n_pcs)
codereview_python_data_7480
applies_to = {} for applies_dict in rule_def.get('applies_to'): resource_type = applies_dict['type'] if resource_type not in SUPPORTED_LOCATION_RESOURCE_TYPES: Can we make this backward compatible? e.g. if 'resource_ids' is not defined then we can use '*' as the default value. Would like nice if we can also have a test case for this applies_to = {} for applies_dict in rule_def.get('applies_to'): + # For backwards compatibility for when applies_to was a string. + if isinstance(applies_dict, str): + applies_dict = {'type': applies_dict, 'resource_ids': ['*']} + resource_type = applies_dict['type'] if resource_type not in SUPPORTED_LOCATION_RESOURCE_TYPES:
codereview_python_data_7481
Processes a list of streams promoting Parameterized objects and methods to Param based streams. """ - parameterizeds = defaultdict(list) valid, invalid = [], [] for s in streams: if isinstance(s, Stream): What would be the effect of simply ignoring the extra subscribers to a parameter? Would that be safe? If so, could this error simply be skipped? If not, maybe make a note about why that wouldn't work? Processes a list of streams promoting Parameterized objects and methods to Param based streams. """ + parameterizeds = defaultdict(set) valid, invalid = [], [] for s in streams: if isinstance(s, Stream):
codereview_python_data_7491
return self.temp_code = code.funcstate.allocate_temp( type, manage_ref=self.use_managed_ref) - - if self.type.is_cpp_class: - code.globalstate.use_utility_code( - UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp")) else: self.temp_code = None Why not load the utility code in `_make_move_result_rhs()`? Just pass in `code`. return self.temp_code = code.funcstate.allocate_temp( type, manage_ref=self.use_managed_ref) else: self.temp_code = None
codereview_python_data_7493
+ _base_doc[_base_doc.find('verbose :'):]) # DaskLGBMClassifier support for callbacks and init_model is not tested - fit.__doc__ = ( - f"{_base_doc[:_base_doc.find('callbacks :')]} **kwargs\n {' ' * 12} 'Other parameters passed through to ``LGBMClassifier.fit()``.\n'" - ) def predict(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array: """Docstring is inherited from the lightgbm.LGBMClassifier.predict.""" please change this section to exactly the following ```python fit.__doc__ = f""" {_base_doc[:_base_doc.find('callbacks :')]}**kwargs Other parameters passed through to ``LGBMClassifier.fit()`` """ ``` + _base_doc[_base_doc.find('verbose :'):]) # DaskLGBMClassifier support for callbacks and init_model is not tested + fit.__doc__ = f""" + {_base_doc[:_base_doc.find('callbacks :')]}**kwargs + Other parameters passed through to ``LGBMClassifier.fit()`` + """ def predict(self, X: _DaskMatrixLike, **kwargs: Any) -> dask_Array: """Docstring is inherited from the lightgbm.LGBMClassifier.predict."""
codereview_python_data_7498
and ref.nullable and typemod == ql_ft.TypeModifier.SingletonType ): - raise RuntimeError( 'operations on potentially empty arguments not supported in ' 'simple expressions') return args Is this a temporary assertion? If not, then this should be `errors.UnsupportedFeatureError` and ref.nullable and typemod == ql_ft.TypeModifier.SingletonType ): + raise errors.UnsupportedFeatureError( 'operations on potentially empty arguments not supported in ' 'simple expressions') return args
codereview_python_data_7527
if not resource['should_notify']: LOGGER.debug('Not notifying for: %s', resource['resource']) continue - if notifier_configs.get('email_connector_config'): notifiers.append(email_violations.EmailViolations( resource['resource'], inventory_index_id, violation_map[resource['resource']], global_configs, Does this mean that every user will get the email by default? There's no way to disable emails? if not resource['should_notify']: LOGGER.debug('Not notifying for: %s', resource['resource']) continue + if notifier_configs.get('email_connector'): notifiers.append(email_violations.EmailViolations( resource['resource'], inventory_index_id, violation_map[resource['resource']], global_configs,
codereview_python_data_7536
results = [] # Find all dashboard names that contain each of our query terms as a substring - for dashboard in Dashboard.objects.values('name').order_by('name'): - name = dashboard['name'].lower() if name.startswith('temporary-'): continue You could even do ```python for name in Dashboard.objects.order_by('name').values_list('name', flat=True): name = ``` results = [] # Find all dashboard names that contain each of our query terms as a substring + for dashboard_name in Dashboard.objects.order_by('name').values_list('name', flat=True): + name = dashboard_name.lower() + if name.startswith('temporary-'): continue
codereview_python_data_7540
"""Return a list of all the edges with this label.""" if label not in self._label_map: raise ValueError("Unknown label: " + str(label)) - return list(self._label_map[label]) def labels(self): """Return a list of all the edge labels in this graph.""" Are you sure about removing ``sorted(...)`` here? Older versions of Python do not sort dictionaries so the output is not consistent between Python implementations. """Return a list of all the edges with this label.""" if label not in self._label_map: raise ValueError("Unknown label: " + str(label)) + return sorted(list(self._label_map[label])) def labels(self): """Return a list of all the edge labels in this graph."""
codereview_python_data_7542
assert_equal_branchings(x, x_) -def test_edge_attribute_preservation(): # Test that edge attributes are preserved when finding an optimum graph - # using the Edmonds class. Check this for both normal and multigraphs G = nx.Graph() edgelist = [(0, 1, [('weight', 5), ('otherattr', 1), ('otherattr2', 3)]), Make this two tests, normal vs multigraph? Then, a failure for one but success for the other is more informative in the test output. assert_equal_branchings(x, x_) +def test_edge_attribute_preservation_normal_graph(): # Test that edge attributes are preserved when finding an optimum graph + # using the Edmonds class for normal graphs. G = nx.Graph() edgelist = [(0, 1, [('weight', 5), ('otherattr', 1), ('otherattr2', 3)]),
codereview_python_data_7543
def cond_check(pkg): if not pkg.requires: return True - return self.sack.query().installed().filter(name=pkg.requires) or \ pkg.requires in (pkg.name for pkg in self._goal._installs) or \ pkg.requires in [pkg.name for pkg in trans.install] @mluscon I know that with correct coms it will work (```<packagereq type="conditional" requires="xorg-x11-server-Xorg">ibus-m17n</packagereq>```), but what happened if someone will have in comps ```<packagereq type="mandatory" requires="xorg-x11-server-Xorg">ibus-m17n</packagereq>``` ? def cond_check(pkg): if not pkg.requires: return True + installed = self.sack.query().installed().filter(name=pkg.requires).run() + return len(installed) > 0 or \ pkg.requires in (pkg.name for pkg in self._goal._installs) or \ pkg.requires in [pkg.name for pkg in trans.install]
codereview_python_data_7551
if returntype == "numpy": adj = np.full((len(coord), len(coord)), False, dtype=bool) pairs = capped_distance(coord, coord, max_cutoff=cutoff, box=box, return_distances=False) - - for x, y in pairs: - adj[x][y]=True return adj elif returntype == "sparse": # Initialize square List of Lists matrix of dimensions equal to number Same issue again here, we don't want to be looping at the the python level. By using numpy functions the same work is done, but is executed a lot faster. if returntype == "numpy": adj = np.full((len(coord), len(coord)), False, dtype=bool) pairs = capped_distance(coord, coord, max_cutoff=cutoff, box=box, return_distances=False) + + idx, idy = np.transpose(pairs) + adj[idx, idy]=True + return adj elif returntype == "sparse": # Initialize square List of Lists matrix of dimensions equal to number
codereview_python_data_7553
tparams = Params(alpha = 1, beta = 2, lambda1 = 3, lambda2 = 4, d = 5, n_epochs = 6, inter = True) -tmodel = dt.Frame([[random.random()] * tparams.d, - [random.random()] * tparams.d], names=['z', 'n']) default_params = Params(alpha = 0.005, beta = 1, lambda1 = 0, lambda2 = 1, Here, the old statement was more correct: `[random.random() for _ in range(N)]` creates a list of N different random numbers; `[random.random()] * N` creates a list of N same numbers, although that number is chosen randomly. tparams = Params(alpha = 1, beta = 2, lambda1 = 3, lambda2 = 4, d = 5, n_epochs = 6, inter = True) +tmodel = dt.Frame([[random.random() for _ in range(tparams.d)], + [random.random() for _ in range(tparams.d)]], names=['z', 'n']) default_params = Params(alpha = 0.005, beta = 1, lambda1 = 0, lambda2 = 1,
codereview_python_data_7560
Return: True if there was a change, False otherwise. """ - if policy is usertypes.UNSET: return False old_value = self._settings.unknownUrlSchemePolicy() policy = self._UNKNOWN_URL_SCHEME_POLICY[policy] I'm pretty sure this handling is incorrect - when navigating from a page with a customized URL scheme to a page without such a customization, nothing will be set now. Return: True if there was a change, False otherwise. """ + if policy is usertypes.UNSET: # type: ignore return False old_value = self._settings.unknownUrlSchemePolicy() policy = self._UNKNOWN_URL_SCHEME_POLICY[policy]
codereview_python_data_7563
val_idx = train_idx # calculate norm for each edge type and store in edge - for canonical_etypes in hg.canonical_etypes: - u, v, eid = hg.all_edges(form='all', etype=canonical_etypes) _, inverse_index, count = torch.unique(v, return_inverse=True, return_counts=True) degrees = count[inverse_index] norm = torch.ones(eid.shape[0]).float() / degrees.float() norm = norm.unsqueeze(1) - hg.edges[canonical_etypes].data['norm'] = norm # get target category id category_id = len(hg.ntypes) Will it be easier to calculate degrees by `hg.in_degrees(canonical_etypes)`? val_idx = train_idx # calculate norm for each edge type and store in edge + for canonical_etype in hg.canonical_etypes: + u, v, eid = hg.all_edges(form='all', etype=canonical_etype) _, inverse_index, count = torch.unique(v, return_inverse=True, return_counts=True) degrees = count[inverse_index] norm = torch.ones(eid.shape[0]).float() / degrees.float() norm = norm.unsqueeze(1) + hg.edges[canonical_etype].data['norm'] = norm # get target category id category_id = len(hg.ntypes)
codereview_python_data_7566
nx.write_gexf(G,fh) fh.seek(0) H=nx.read_gexf(fh,node_type=int) - assert_true(H.node[1]['testattr'], True) Should be only one argument to assert_true()... nx.write_gexf(G,fh) fh.seek(0) H=nx.read_gexf(fh,node_type=int) + assert_equal(H.node[1]['testattr'], True)
codereview_python_data_7567
event_data.http_response_bytes = self._GetValueFromStructure( structure, 'response_bytes') - if key in ['combined_log_format', 'vhost_combined_log_format']: event_data.http_request_referer = self._GetValueFromStructure( structure, 'referer') event_data.http_request_user_agent = self._GetValueFromStructure( change list to tuple event_data.http_response_bytes = self._GetValueFromStructure( structure, 'response_bytes') + if key in ('combined_log_format', 'vhost_combined_log_format'): event_data.http_request_referer = self._GetValueFromStructure( structure, 'referer') event_data.http_request_user_agent = self._GetValueFromStructure(
codereview_python_data_7579
name = "SCI" fields_desc = [ - MACField("system_identifier", "00:00:00:00:00:00"), ShortField("port_identifier", 0) ] Maybe you could use `SourceMACField("system_identifier")` to get a "default value that works"? name = "SCI" fields_desc = [ + SourceMACField("system_identifier"), ShortField("port_identifier", 0) ]
codereview_python_data_7583
from io import StringIO from collections import defaultdict -from importlib import reload from numpy.testing import ( assert_equal, assert_array_equal,) Why do we need `reload`? This is very unusual and I'd not rather have it unless absolutely necessary. It is not explicitly used so should be removed. from io import StringIO from collections import defaultdict from numpy.testing import ( assert_equal, assert_array_equal,)
codereview_python_data_7585
ConditionalField(IP6Field("ipv6_address", '::1'), lambda pkt: pkt.length == 16)] class IE_MSInternationalNumber(IE_Base): name = "MS International Number" Do you think it's cleaner to have the IPv4 and IPv6 fields named separately, or both under `address` by using a `MultipleTypeField` ? ConditionalField(IP6Field("ipv6_address", '::1'), lambda pkt: pkt.length == 16)] + def post_build(self, p, pay): + if self.length == 4: + tmp_len = len(p) - 3 + p = p[:1] + struct.pack("!H", tmp_len) + p[3:] + return p + class IE_MSInternationalNumber(IE_Base): name = "MS International Number"
codereview_python_data_7586
the ``img_scale``. Returns: - tuple[int]: Image scale sampled - None: Placeholder, to be consistent with :func:`random_select`. """ assert isinstance(img_scale, tuple) and len(img_scale) == 2 ```python Returns: (tuple[int], None): xxx ``` the ``img_scale``. Returns: + (tuple, None): Returns a tuple ``(scale, None)``, where + ``scale`` is sampled ratio multiplied with ``img_scale`` and + None is just a placeholder to be consistent with + :func:`random_select`. """ assert isinstance(img_scale, tuple) and len(img_scale) == 2
codereview_python_data_7591
weights_groupselections : False or list of {"mass", ``None`` or array_like} (optional) 1. ``False`` will apply imposed weights to `groupselections` from - ``weights`` option if `weights` is not iterable. Otherwise will assume - a list of length equal to length of `groupselections` filled with - ``None`` values. 2. A list of {"mass", ``None`` or array_like} with the length of `groupselections` will apply the weights to `groupselections` correspondingly. Thanks for making that change, @z3y50n. Could you actually make this a little more detailed, though -- it's not intended to be just any iterable, but values of None or "mass". The current message could imply that users can pass in any old value. weights_groupselections : False or list of {"mass", ``None`` or array_like} (optional) 1. ``False`` will apply imposed weights to `groupselections` from + ``weights`` option if ``weights`` is either ``"mass"`` or ``None``. + Otherwise will assume a list of length equal to length of + `groupselections` filled with ``None`` values. 2. A list of {"mass", ``None`` or array_like} with the length of `groupselections` will apply the weights to `groupselections` correspondingly.
codereview_python_data_7601
import torch.nn.functional as F import argparse from sklearn.metrics import f1_score -from train import GAT from dgl.data.ppi import PPIDataset from torch.utils.data import DataLoader Put `GAT` in `gat.py`. Let `train.py` and `train_ppi.py` import from it. import torch.nn.functional as F import argparse from sklearn.metrics import f1_score +from gat import GAT from dgl.data.ppi import PPIDataset from torch.utils.data import DataLoader
codereview_python_data_7604
class Node(object): - """A node in the Scop hierarchy sunid -- SCOP unique identifiers. e.g. '14986' Since you'll have to edit this file again anyway, you might as well add the trailing full stop here too. class Node(object): + """A node in the Scop hierarchy. sunid -- SCOP unique identifiers. e.g. '14986'
codereview_python_data_7611
:return: True if the recycling process should be run; otherwise, False. :rtype: bool """ - if inventory.items().get_space_left() < (DEFAULT_MIN_EMPTY_SPACE if self.min_empty_space is None else self.min_empty_space): return True return False Good job on reverting changes, you forgot this one though. :return: True if the recycling process should be run; otherwise, False. :rtype: bool """ + if inventory.Items.get_space_left() < (DEFAULT_MIN_EMPTY_SPACE if self.min_empty_space is None else self.min_empty_space): return True return False
codereview_python_data_7613
for (root, dir_names, files) in os.walk(path): if dir_names: partitioned_columns.add(dir_names[0].split("=")[0]) partitioned_columns = list(partitioned_columns) else: directory = False We shouldn't have to continue searching for partitioned_column names once we hit a directory with a file since `os.walk` is a dfs search for (root, dir_names, files) in os.walk(path): if dir_names: partitioned_columns.add(dir_names[0].split("=")[0]) + if files: + file_path = os.path.join(root, files[0]) + break partitioned_columns = list(partitioned_columns) else: directory = False
codereview_python_data_7614
def with_mask(self): return hasattr(self, 'mask_head') and self.mask_head is not None - @property - def with_mask_iou(self): - return hasattr(self, - 'mask_iou_head') and self.mask_iou_head is not None - @abstractmethod def extract_feat(self, imgs): pass Remove this property from the base detector. def with_mask(self): return hasattr(self, 'mask_head') and self.mask_head is not None @abstractmethod def extract_feat(self, imgs): pass
codereview_python_data_7615
def test_run_missing_binding(self): q = sql.Query('SELECT :answer') - with pytest.raises(sql.SqlError, match='Missing bound values!'): q.run() def test_run_batch(self): I don't feel like this test properly tests whether the method under test works properly. Like it needs a test case that does affect a row. def test_run_missing_binding(self): q = sql.Query('SELECT :answer') + with pytest.raises(sql.SqlBugError, match='Missing bound values!'): q.run() def test_run_batch(self):
codereview_python_data_7621
title = 'requests' version = '2.14.2' build = 0x021402 It looks like the file may need `# -*- coding: utf-8 -*-` at the top for Python 2 because of this line. +# -*- coding: utf-8 -*- title = 'requests' version = '2.14.2' build = 0x021402
codereview_python_data_7626
def analyse(self, env, nonempty=0, is_self_arg=False): - self.base_type.is_self_arg = self.is_self_arg = is_self_arg if self.type is not None: return self.name_declarator, self.type Can we keep the `if` test? Given the default class attributes, it doesn't seem necessary to always set it (and that avoids an unnecessary attribute in the instance dicts). def analyse(self, env, nonempty=0, is_self_arg=False): + if is_self_arg: + self.base_type.is_self_arg = self.is_self_arg = is_self_arg if self.type is not None: return self.name_declarator, self.type
codereview_python_data_7636
expand = param.Boolean(default=True, doc=""" Whether the x_range and y_range should be allowed to expand - beyond the extent of the data.""") height = param.Integer(default=400, doc=""" The height of the aggregated image in pixels.""") Maybe this docstring could be, ahem, expanded, with some of the pros and cons. My guess is: ``` Whether the x_range and y_range should be allowed to expand beyond the extent of the data. Setting this value to True is useful for the case where you want to ensure a certain size of output grid, e.g. if you are doing masking or other arithmetic on the grids. A value of False ensures that the grid is only just as large as it needs to be to contain the data, which will be faster and use less memory if the resulting aggregate is being overlaid on a much larger background.""") ``` If that description is accurate, maybe it should be False by default? expand = param.Boolean(default=True, doc=""" Whether the x_range and y_range should be allowed to expand + beyond the extent of the data. Setting this value to True is + useful for the case where you want to ensure a certain size of + output grid, e.g. if you are doing masking or other arithmetic + on the grids. A value of False ensures that the grid is only + just as large as it needs to be to contain the data, which will + be faster and use less memory if the resulting aggregate is + being overlaid on a much larger background.""") height = param.Integer(default=400, doc=""" The height of the aggregated image in pixels.""")
codereview_python_data_7643
# map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) - labels = unmap(labels, num_total_anchors, inside_flags, - self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags) To make it more clear, `fill=self.num_classes`. # map up to original set of anchors if unmap_outputs: num_total_anchors = flat_anchors.size(0) + labels = unmap( + labels, num_total_anchors, inside_flags, + fill=self.num_classes) # fill bg label label_weights = unmap(label_weights, num_total_anchors, inside_flags) bbox_targets = unmap(bbox_targets, num_total_anchors, inside_flags)
codereview_python_data_7644
return redirect('pontoon.teams.team', locale=user.profile.custom_homepage) # Guess user's team page or redirect to /teams - locale = get_project_locale_from_request(request, Locale.objects.available()) if locale not in ('en-US', 'en', None): start_url = reverse('pontoon.teams.team', kwargs={ 'locale': locale, Locale.objects.**visible()** - otherwise it doesn't fix the bug. return redirect('pontoon.teams.team', locale=user.profile.custom_homepage) # Guess user's team page or redirect to /teams + locale = get_project_locale_from_request(request, Locale.objects.visible()) if locale not in ('en-US', 'en', None): start_url = reverse('pontoon.teams.team', kwargs={ 'locale': locale,
codereview_python_data_7654
parent: QWidget = None) -> None: super().__init__(parent) self.pattern = None # type: typing.Optional[str] - self._model_pattern = None # type: typing.Optional[str] self._win_id = win_id self._cmd = cmd self._active = False Like you say, I think it would make more sense to make `CompletionModel.set_pattern` save the pattern in `self.pattern`, so that we can use `self.model().pattern` instead of saving it here. Or maybe that logic should be in `CompletionModel` entirely, so that calling `set_pattern` on it just is a no-op if the pattern is already the same? parent: QWidget = None) -> None: super().__init__(parent) self.pattern = None # type: typing.Optional[str] self._win_id = win_id self._cmd = cmd self._active = False
codereview_python_data_7661
from __future__ import division from __future__ import print_function -from tensorflow_addons.losses.python.triplet import triplet_semihard_loss from tensorflow_addons.losses.python.lifted import lifted_struct_loss swap those two imports? from __future__ import division from __future__ import print_function from tensorflow_addons.losses.python.lifted import lifted_struct_loss +from tensorflow_addons.losses.python.triplet import triplet_semihard_loss
codereview_python_data_7663
@memoize.wrap(memoize.FifoOnDisk(DISK_CACHE_SIZE)) @memoize.wrap(memoize.Memcache(60 * 60 * 24 * 30)) # 30 day TTL -def revision_to_branched_from(uri, rev): """Interrogates git code review server to find the branch-from revision of a component.""" - full_uri = "%s/+/%s?format=JSON" % (uri, rev) url_content = _get_url_content(full_uri) # Hatefully, gerrit returns nonsense in the first line. url_content = '\n'.join(url_content.splitlines()[1:]) Nit: rev->revision for consistency @memoize.wrap(memoize.FifoOnDisk(DISK_CACHE_SIZE)) @memoize.wrap(memoize.Memcache(60 * 60 * 24 * 30)) # 30 day TTL +def revision_to_branched_from(uri, revision): """Interrogates git code review server to find the branch-from revision of a component.""" + full_uri = "%s/+/%s?format=JSON" % (uri, revision) url_content = _get_url_content(full_uri) # Hatefully, gerrit returns nonsense in the first line. url_content = '\n'.join(url_content.splitlines()[1:])
codereview_python_data_7666
def test_valid_callback_false(self): """Verify valid rules returns True.""" self.firewall_rules._add_rule_callback = lambda _: False - self.assertFalse(self.firewall_rules._check_rule_before_adding(self.test_rule)) def test_unknown_key(self): """A rule with an unknown key raises InvalidFirewallRuleError.""" Nit: ensure line is no longer than 80 chars. Here and any line below that is too long as well. def test_valid_callback_false(self): """Verify valid rules returns True.""" self.firewall_rules._add_rule_callback = lambda _: False + self.assertFalse( + self.firewall_rules._check_rule_before_adding(self.test_rule)) def test_unknown_key(self): """A rule with an unknown key raises InvalidFirewallRuleError."""
codereview_python_data_7669
self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) - self.fp16_enabled = True self._init_layers() Why is the default value True? self.loss_cls = build_loss(loss_cls) self.loss_bbox = build_loss(loss_bbox) + self.fp16_enabled = False self._init_layers()
codereview_python_data_7672
if isinstance(edges, dict): # TODO(zhengda) we need to directly generate subgraph of all relations with # one invocation. - subg = {etype: self.find_edges(edges[etype], etype) for etype in edges} num_nodes = {ntype: self.number_of_nodes(ntype) for ntype in self.ntypes} subg = dgl_heterograph(subg, num_nodes_dict=num_nodes) else: In which function we store the original edge IDs in subg? if isinstance(edges, dict): # TODO(zhengda) we need to directly generate subgraph of all relations with # one invocation. + if isinstance(edges, tuple): + subg = {etype: self.find_edges(edges[etype], etype[1]) for etype in edges} + else: + subg = {self._etype2canonical[etype]: self.find_edges(edges[etype], etype) \ + for etype in edges} num_nodes = {ntype: self.number_of_nodes(ntype) for ntype in self.ntypes} subg = dgl_heterograph(subg, num_nodes_dict=num_nodes) else:
codereview_python_data_7676
import nvidia.dali.ops as ops import nvidia.dali.types as types from nvidia.dali.pipeline import Pipeline -from PIL import Image as Image video_directory = '/tmp/labelled_videos/' Maybe you can use ElementExtract to extract all frames from a sequence as a separate batches and call resize on it instead of going through ExternalSource? import nvidia.dali.ops as ops import nvidia.dali.types as types from nvidia.dali.pipeline import Pipeline video_directory = '/tmp/labelled_videos/'
codereview_python_data_7677
input, _ = fn.file_reader(file_root=img_dir) decoded = fn.image_decoder(input, device='cpu', output_type=types.RGB) decoded = decoded.gpu() if device == 'gpu' else decoded - tile = fn.cast(fn.uniform(range=(50, 200), shape=[1]), dtype=types.INT32) - ratio = fn.uniform(range=(0.3, 0.7), shape=[1]) - angle = fn.uniform(range=(-math.pi, math.pi), shape=[1]) grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle) pipe.set_outputs(grided, decoded, tile, ratio, angle) return pipe These should be scalars - i.e. no shape or `shape=[]` input, _ = fn.file_reader(file_root=img_dir) decoded = fn.image_decoder(input, device='cpu', output_type=types.RGB) decoded = decoded.gpu() if device == 'gpu' else decoded + tile = fn.cast(fn.uniform(range=(50, 200)), dtype=types.INT32) + ratio = fn.uniform(range=(0.3, 0.7)) + angle = fn.uniform(range=(-math.pi, math.pi)) grided = fn.grid_mask(decoded, device=device, tile=tile, ratio=ratio, angle=angle) pipe.set_outputs(grided, decoded, tile, ratio, angle) return pipe
codereview_python_data_7680
req["url"] = None try: requests.append(self.__parse_request(req)) - except: raise TaurusConfigError("Wrong request:\n %s" % req) return requests It would be nice to have exception dumped somewhere (debug logs, info logs). req["url"] = None try: requests.append(self.__parse_request(req)) + except BaseException as exc: + logging.debug("%s\n%s" % traceback.format_exc()) raise TaurusConfigError("Wrong request:\n %s" % req) return requests
codereview_python_data_7682
"""Check if a `txid` was already used as an input. A transaction can be used as an input for another transaction. Bigchain - needs to make sure that a given `txid` is only used once. - This method will check if the `txid` and `output` has already been spent in a transaction that is in either the `VALID`, `UNDECIDED` or `BACKLOG` state. Isn't it "a given (txid, output) is only used once" """Check if a `txid` was already used as an input. A transaction can be used as an input for another transaction. Bigchain + needs to make sure that a given `(txid, output)` is only used once. + This method will check if the `(txid, output)` has already been spent in a transaction that is in either the `VALID`, `UNDECIDED` or `BACKLOG` state.
codereview_python_data_7687
@send_with_retry def __send_monitoring(self): - src_name = platform.node() data = self.monitoring_buffer.get_monitoring_json(self._session) - self._session.send_monitoring_data(id(self.engine), data) @send_with_retry def __send_custom_metrics(self): The engine id is the same as in `worker_index` from `__upload_artifacts`, with default of zero @send_with_retry def __send_monitoring(self): + engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '0') data = self.monitoring_buffer.get_monitoring_json(self._session) + self._session.send_monitoring_data(engine_id, data) @send_with_retry def __send_custom_metrics(self):
codereview_python_data_7697
abbreviate('Something - I made up from thin air'), 'SIMUFTA') def test_apostrophes(self): - self.assertEqual(abbreviate('Halley\'s Comet'), 'HC') if __name__ == '__main__': For readability, can you please use double quotes around the input string to negate the need for escaping the apostrophe? abbreviate('Something - I made up from thin air'), 'SIMUFTA') def test_apostrophes(self): + self.assertEqual(abbreviate("Halley's Comet"), 'HC') if __name__ == '__main__':
codereview_python_data_7698
@deprecated_posargs def complex_numbers( *, - min_magnitude: Optional[Real] = 0, max_magnitude: Real = None, allow_infinity: bool = None, allow_nan: bool = None ```suggestion min_magnitude: Real = 0, ``` There's no real reason to accept `min_magnitude=None`, as there's a clear lower-bound on possible magnitudes. We went through a similar transition with `min_size` arguments in #1618, but missed magnitudes... happy for you to just do the small fix above. Alternatively you could update the function - inline the `check_valid_magnitude` logic, add a deprecation for `None`, and change `min_magnitude is None` to `min_magnitude == 0`. @deprecated_posargs def complex_numbers( *, + min_magnitude: Real = 0, max_magnitude: Real = None, allow_infinity: bool = None, allow_nan: bool = None
codereview_python_data_7699
schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, - ) -> Union[CreateScalarType, sd.CommandGroup]: cmd = super()._cmd_tree_from_ast(schema, astnode, context) if isinstance(cmd, sd.CommandGroup): for subcmd in cmd.get_subcommands(): if isinstance(subcmd, cls): - create_cmd: ScalarTypeCommand = subcmd break else: raise errors.InternalServerError( You don't need to be very specific as to the return type here, `sd.Command` is sufficient. schema: s_schema.Schema, astnode: qlast.DDLOperation, context: sd.CommandContext, + ) -> sd.Command: cmd = super()._cmd_tree_from_ast(schema, astnode, context) if isinstance(cmd, sd.CommandGroup): for subcmd in cmd.get_subcommands(): if isinstance(subcmd, cls): + create_cmd: sd.Command = subcmd break else: raise errors.InternalServerError(
codereview_python_data_7706
return RTMPStream(self.session, params=params) def _get_vod_stream(self, page): - m = self._vod_re.search(page.text) - if m: - return HLSStream.parse_variant_playlist(self.session, m.group(1).replace('\\/', '/')) def _get_streams(self): url_channel_name = self._url_re.match(self.url).group(1) You could use `parse_json` if you included the `"` characters in the match group. It looks like JSON so there might be other things escaped for other streams... return RTMPStream(self.session, params=params) def _get_vod_stream(self, page): + data = self.data_schema.validate(page.text) + + if data: + return HLSStream.parse_variant_playlist(self.session, data["vod"]) def _get_streams(self): url_channel_name = self._url_re.match(self.url).group(1)
codereview_python_data_7712
The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}` is size of output feature. """ if isinstance(feat, tuple): feat_src = self.feat_drop(feat[0]) feat_dst = self.feat_drop(feat[1]) Would you mind adding a check in the sparse `SAGEConv` for GCN aggregator for the src and dst node feature lengths? If I understand it correctly, they must be the same. The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}` is size of output feature. """ + check_eq_shape(feat) if isinstance(feat, tuple): feat_src = self.feat_drop(feat[0]) feat_dst = self.feat_drop(feat[1])
codereview_python_data_7714
# shape of position array set here, use span in last dimension # from this point on self._position_array[self._frame_index] = ( - self._atoms.positions[:, self._dim]) def _conclude(self): if self.fft: This is a place where I disagree with pep8bot and think the below looks much nicer, but up to you. ```python self._position_array[self._frame_index] = self._atoms.positions[:, self._dim] ``` # shape of position array set here, use span in last dimension # from this point on self._position_array[self._frame_index] = ( + self.ag.positions[:, self._dim]) def _conclude(self): if self.fft:
codereview_python_data_7715
ctypes.c_int(end_iteration))) return self - def loads_params(self): """Loads model parameters by calling LGBM_BoosterGetConfig.""" buffer_len = 2 << 20 tmp_out_len = ctypes.c_int64(0) ```suggestion def _load_params(self): ``` Can you please prefix this with a `_`, to make it an internal-only function? That will allow developers to make changes in the future without breaking users' existing code. I don't think we should encourage users to call this function directly. ctypes.c_int(end_iteration))) return self + def _load_params(self): """Loads model parameters by calling LGBM_BoosterGetConfig.""" buffer_len = 2 << 20 tmp_out_len = ctypes.c_int64(0)
codereview_python_data_7721
if isinstance(offline, Packet): tempfile_written, offline = _write_to_pcap([offline]) - elif (isinstance(offline, list) or - isinstance(offline, PacketList)) and \ all(isinstance(elt, Packet) for elt in offline): tempfile_written, offline = _write_to_pcap(offline) We need to find something more efficient than this. We can probably just check the first element and assume the type list is consistent.. if isinstance(offline, Packet): tempfile_written, offline = _write_to_pcap([offline]) + elif isinstance(offline, (list, PacketList)) and \ all(isinstance(elt, Packet) for elt in offline): tempfile_written, offline = _write_to_pcap(offline)
codereview_python_data_7722
# is 'No' then we need to prepare suitable subgraph view. partition_nodes = set().union(*partition) if len(partition_nodes) != len(G): - if create_using is None: - # This hack is necessary because we will - # be passing subgraph instance. - create_using = G G = G.subgraph(partition_nodes) return _quotient_graph(G, partition, edge_relation, node_data, edge_data, relabel, create_using) Instead of hacking create_using, could you fix the create_using treatment in _quotient_graph()? We don't use ```type(create_using)()``` because it doesn't work with graph views (including subgraphs). Please change line 247 in your file from H = type(create_using)() if create_using is not None else type(G)() to H = G.fresh_copy() if create_using is None else create_using.fresh_copy() Thanks for helping me catch bugs! :) # is 'No' then we need to prepare suitable subgraph view. partition_nodes = set().union(*partition) if len(partition_nodes) != len(G): G = G.subgraph(partition_nodes) return _quotient_graph(G, partition, edge_relation, node_data, edge_data, relabel, create_using)
codereview_python_data_7738
self.exRadius = float(tokens.popleft()) self.sel = parser.parse_expression(self.precedence) @return_empty_on_empty_selection def apply(self, group): indices = [] Rather than replacing `return_empty_on_apply`, you can usually stack decorators like ``` @return_empty_on_apply @return_empty_on_empty_selection ``` self.exRadius = float(tokens.popleft()) self.sel = parser.parse_expression(self.precedence) + @return_empty_on_apply @return_empty_on_empty_selection def apply(self, group): indices = []
codereview_python_data_7739
return self def infer_type(self, env): - # AttributeNode (and possibly others) have ErrorType by default, so make sure to infer - # if the result is an error, rather than just accepting it - if self.type is not None and not self.type.is_error: return self.type if self.expression is not None: - if self.expression.type is not None and not self.type.is_error: return self.expression.type return self.expression.infer_type(env) assert False, "cannot infer type of ResultRefNode" maybe it's not a good idea that they have `ErrorType` by default? Should we change that instead? return self def infer_type(self, env): + if self.type is not None: return self.type if self.expression is not None: + if self.expression.type is not None: return self.expression.type return self.expression.infer_type(env) assert False, "cannot infer type of ResultRefNode"
codereview_python_data_7745
if not path: return path - return os.path.join(self.chroot.directory, path[1:]) def _get_chroot_corpus_paths(self, corpus_directories): """Return chroot relative paths for the given corpus directories. instead of path[1:], can we explicitly do lstrip(os.sep), seems more clear ? if not path: return path + return os.path.join(self.chroot.directory, path.lstrip(os.sep)) def _get_chroot_corpus_paths(self, corpus_directories): """Return chroot relative paths for the given corpus directories.
codereview_python_data_7746
.. Note:: This ``close()`` method is non-standard. ``del NamedStream`` always closes the underlying stream. - - .. versionchanged:: 2.1.0 - Calls to ``close()`` will no longer attempt to close or flush the - stream if :attr:`closed` is `True`. - """ if self.closed: return Won't it get pretty verbose if you start adding `versionchanged` directives for bug fixes as opposed to select new features or API changes? .. Note:: This ``close()`` method is non-standard. ``del NamedStream`` always closes the underlying stream. """ if self.closed: return
codereview_python_data_7747
Returns ------- - iterable - An iterable of node names in topological sorted order. - iterable[tuples[node, int]] - If with_generation=True, returns an iterable of tuples where the first element is the node and the second element is the generation. In topologically sorted order. This description should be combined with the above rather than listed as a separate line so that it's clear that only one iterable is ever returned, but the items that are iterated over depends on `with_generation` Returns ------- + iterable | iterable[tuple[node, int]] + If with_generation=False: An iterable of node names in topological sorted order. + If with_generation=True: An iterable of tuples where the first element is the node and the second element is the generation. In topologically sorted order.
codereview_python_data_7749
for m in self.modules(): # DeformConv2dPack, ModulatedDeformConv2dPack if hasattr(m, 'conv_offset'): - m.conv_offset.weight.data.zero_() - m.conv_offset.bias.data.zero_() @abstractmethod def loss(self, **kwargs): DCN should not be able to use `init_cfg`. for m in self.modules(): # DeformConv2dPack, ModulatedDeformConv2dPack if hasattr(m, 'conv_offset'): + constant_init(m.conv_offset, 0) @abstractmethod def loss(self, **kwargs):
codereview_python_data_7760
scope_tree=ctx.path_scope, ctx=inf_ctx, ) - cardinality = inference.infer_cardinality( - ir, - scope_tree=ctx.path_scope, - ctx=inf_ctx, - ) # Fix up weak namespaces _rewrite_weak_namespaces(ir, ctx) Hm. Why a second call to `infer_cardinality`? scope_tree=ctx.path_scope, ctx=inf_ctx, ) # Fix up weak namespaces _rewrite_weak_namespaces(ir, ctx)
codereview_python_data_7767
if os.path.isfile(self._filename): # The file already exists, so ask the user if it should be # overwritten. - self._ask_overwrite_question() - # FIFO, device node, etc. Don't even try. elif (os.path.exists(self._filename) and not os.path.isdir(self._filename)): - self.cancel(False) - message.error(self._win_id, "The file {} already exists, and is a " - "special file. Aborting.".format( - self._filename)) else: self._create_fileobj() Is there a reason you're not using `self._die("The file {} ...")` here? if os.path.isfile(self._filename): # The file already exists, so ask the user if it should be # overwritten. + self._ask_confirm_question(self._filename + " already exists. Overwrite?") + # FIFO, device node, etc. Make sure we want to do this elif (os.path.exists(self._filename) and not os.path.isdir(self._filename)): + self._ask_confirm_question(self._filename + " already exists and is a special file. Write to this?") else: self._create_fileobj()
codereview_python_data_7768
c = CacheClass() self.assertEqual(2, c.foo(1)) self.assertEqual(2, c.foo(1)) self.assertListEqual([ (1,), (1,), ], c.called) def test_name_clash(self): do we want to self.assertEqual(3, c.foo(2)) with different arg as well c = CacheClass() self.assertEqual(2, c.foo(1)) self.assertEqual(2, c.foo(1)) + self.assertEqual(3, c.foo(2)) self.assertListEqual([ (1,), (1,), + (2,), ], c.called) def test_name_clash(self):
codereview_python_data_7773
def check_elasticsearch(expect_shutdown=False, print_error=True): # Check internal endpoint for health - endpoint = '%s://%s:%s' % (get_service_protocol(), 'localhost', config.PORT_ELASTICSEARCH) try: req = requests.get(endpoint + '/_cluster/health') es_status = json.loads(req.text) nit: we could use `constants.LOCALHOST` here: ``` endpoint = '%s://%s:%s' % (get_service_protocol(), constants.LOCALHOST, config.PORT_ELASTICSEARCH) ``` def check_elasticsearch(expect_shutdown=False, print_error=True): # Check internal endpoint for health + endpoint = '%s://%s:%s' % (get_service_protocol(), constants.LOCALHOST, config.PORT_ELASTICSEARCH) try: req = requests.get(endpoint + '/_cluster/health') es_status = json.loads(req.text)
codereview_python_data_7774
""" newList = [] - seenNames = {} for seriesList in seriesLists: for series in seriesList: if series.name not in seenNames: - seenNames[series.name] = True newList.append(series) return newList Can be a set (`seenNames = set()`) and then `seenNames.add(series.name)` :) EDIT: I'm just looking around in this project, and happened to see this PR. """ newList = [] + seenNames = set() for seriesList in seriesLists: for series in seriesList: if series.name not in seenNames: + seenNames.add(series.name) newList.append(series) return newList
codereview_python_data_7775
tvonline\.ewe|nettv\.netcologne|tvplus\.m-net )\.de )|(?:(?: - player\.waly|www\.netplus|www\.1und1 )\.tv) |www\.bbv-tv\.net |www\.meinewelt\.cc reuse www ```regex player\.waly|www\.(?:1und1|netplus) ``` tvonline\.ewe|nettv\.netcologne|tvplus\.m-net )\.de )|(?:(?: + player\.waly|www\.(?:1und1|netplus) )\.tv) |www\.bbv-tv\.net |www\.meinewelt\.cc
codereview_python_data_7793
# To avoid interfering with the remaining pairwise2 tests, # restore the original functions pairwise2._make_score_matrix_fast = orig_make_score_matrix_fast - pairwise2.rint = orig_python_rint Did this have any functional usage? i.e. Might anyone be assuming these exist? # To avoid interfering with the remaining pairwise2 tests, # restore the original functions pairwise2._make_score_matrix_fast = orig_make_score_matrix_fast + pairwise2.rint = orig_rint
codereview_python_data_7799
raise errors.UnableToParseFile('Not a valid Firefox cache2 record.') if file_metadata_header.format_version >= 2: - flags = file_object.read(4) # pylint: disable=unused-variable url = file_object.read(file_metadata_header.key_size) header_data = file_object.read() Remove the pylint override, why not use `file_object.seek(4, os.SEEK_CUR)` ? raise errors.UnableToParseFile('Not a valid Firefox cache2 record.') if file_metadata_header.format_version >= 2: + file_object.seek(4, os.SEEK_CUR) url = file_object.read(file_metadata_header.key_size) header_data = file_object.read()
codereview_python_data_7809
# import pytest import numpy as np -from numpy.testing import assert_equal, assert_almost_equal import itertools from itertools import combinations_with_replacement as comb use the specific import `from itertools import product` # import pytest import numpy as np +from numpy.testing import assert_equal, assert_almost_equal, assert_allclose import itertools from itertools import combinations_with_replacement as comb
codereview_python_data_7811
runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None)) - runner.register_hook_from_cfg(cfg.check_isfinite) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) Should avoid hard code and put it into custom_hook runner.register_training_hooks(cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook())
codereview_python_data_7812
def get_spike_length(values): first_spike_start = values[0][3] - for i in range(1, len(values)): if values[i][3] == first_spike_start: return i - i += 1 return len(values) why do you need inc index var? def get_spike_length(values): first_spike_start = values[0][3] + for i in range(0, len(values)): if values[i][3] == first_spike_start: return i return len(values)
codereview_python_data_7827
logger.info("Deploy ssh key pairs.") self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) - def report_event(self, message, is_success=False, duration=0): add_event(name=AGENT_NAME, message=message, duration=duration, is_success=is_success, - op=WALAEventOperation.Provision) def report_not_ready(self, sub_status, description): status = ProvisionStatus(status="NotReady", subStatus=sub_status, I would recommend leaving this one change (the ability to specify the operation name) in place. You might not need it now, but you've already written and tested it, and there's zero cost to carrying it along should you need it in the future. logger.info("Deploy ssh key pairs.") self.osutil.deploy_ssh_keypair(ovfenv.username, keypair) + def report_event(self, message, is_success=False, duration=0, + operation=WALAEventOperation.Provision): add_event(name=AGENT_NAME, message=message, duration=duration, is_success=is_success, + op=operation) def report_not_ready(self, sub_status, description): status = ProvisionStatus(status="NotReady", subStatus=sub_status,
codereview_python_data_7832
from pytest import raises -pytestmark = pytest.mark.tendermint - - class TestBlockModel(object): def test_block_initialization(self, monkeypatch): from bigchaindb.models import Block I don't think we need to fix these tests for `Block` class as it's going to be removed soon. from pytest import raises class TestBlockModel(object): def test_block_initialization(self, monkeypatch): from bigchaindb.models import Block
codereview_python_data_7838
while i < len(self.shrink_target.blocks): j = min(i + 4, len(self.shrink_target.blocks) - 2) while j >= i: - u, v = self.shrink_target.blocks[i] - r, s = self.shrink_target.blocks[j] if self.incorporate_new_buffer( - self.shrink_target.buffer[:i] + - self.shrink_target.buffer[j:] ): break j -= 1 Why do you need `u`, `v`, `r`, `s` if none of those are used? while i < len(self.shrink_target.blocks): j = min(i + 4, len(self.shrink_target.blocks) - 2) while j >= i: + u, _ = self.shrink_target.blocks[i] + _, v = self.shrink_target.blocks[j] if self.incorporate_new_buffer( + self.shrink_target.buffer[:u] + + self.shrink_target.buffer[v:] ): break j -= 1
codereview_python_data_7840
try: is_gcs_summary_enabled = ( inventory_notifier_config.get('gcs_summary').get('enabled')) - if self.notifier_config.get('email_connector'): - is_email_summary_enabled = True if inventory_notifier_config.get('email_summary'): is_email_summary_enabled = ( inventory_notifier_config.get('email_summary') I am not sure why this `if` is needed. Should `is_email_summary_enabled` really be set to true here by only checking for `email_connector`? try: is_gcs_summary_enabled = ( inventory_notifier_config.get('gcs_summary').get('enabled')) if inventory_notifier_config.get('email_summary'): is_email_summary_enabled = ( inventory_notifier_config.get('email_summary')
codereview_python_data_7849
email = models.EmailField(_("Email"), db_index=True, blank=True) # This key are used to confirm and cancel alerts for anon users - key = models.CharField(_("Key"), max_length=128, blank=True, unique=True, db_index=True) # An alert can have two different statuses for authenticated # users ``ACTIVE`` and ``CANCELLED`` and anonymous users have an This won't work, because the key is left blank if the user is logged in. To see the problem just try creating two alerts for different products, as a logged-in user, and it will fail. email = models.EmailField(_("Email"), db_index=True, blank=True) # This key are used to confirm and cancel alerts for anon users + key = models.CharField(_("Key"), max_length=128, blank=True, null=True, unique=True, db_index=True) # An alert can have two different statuses for authenticated # users ``ACTIVE`` and ``CANCELLED`` and anonymous users have an
codereview_python_data_7857
-def transform(old): pass `legacy_data` will look better. What do you think? +def transform(legacy_data): pass
codereview_python_data_7861
stride=1, bias=True) self.init_cfg = dict( - override=dict(type='Constant', val=0, name='rfp_conv')) def rfp_forward(self, x, rfp_feat): """The forward function that also takes the RFP features as input.""" in __init__ self.init_cfg = dict(type='Constant', val=0), override=dict(name='rfp_conv')) if inherit `BaseModule`, there is no need to call init_weights(), stride=1, bias=True) self.init_cfg = dict( + type='Constant', val=0, override=dict(name='rfp_conv')) def rfp_forward(self, x, rfp_feat): """The forward function that also takes the RFP features as input."""
codereview_python_data_7866
return res # Cell -def _dcm2dict(fn,window,**kwargs): return fn.dcmread().as_dict(window=window, **kwargs) # Cell @delegates(parallel) should `window` have a default argument? return res # Cell +def _dcm2dict(fn, window=dicom_windows.brain, **kwargs): return fn.dcmread().as_dict(window=window, **kwargs) # Cell @delegates(parallel)
codereview_python_data_7871
:param destination_s3_path: URL for target S3 location :param kwargs: Keyword arguments are passed to the boto function `put_object` """ - self._check_deprecated_argument(kwargs) # put the file self.put_multipart(local_path, destination_s3_path, **kwargs) it should already be clear by the method name, but could we `:param content: Data str` or something. `Object data` just makes me hesitate. :param destination_s3_path: URL for target S3 location :param kwargs: Keyword arguments are passed to the boto function `put_object` """ + self._check_deprecated_argument(**kwargs) # put the file self.put_multipart(local_path, destination_s3_path, **kwargs)
codereview_python_data_7872
in_channels=[128, 256, 512, 1024], out_channels=256, num_outs=5)) - -find_unused_parameters = True This should be unnecessary as only the backbone and neck are changed. Otherwise, we should clean the code of implementation. in_channels=[128, 256, 512, 1024], out_channels=256, num_outs=5))