id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_916
total_epochs = 12 dist_params = dict(backend='nccl') log_level = 'INFO' -work_dir = './work_dirs/faster_rcnn_r50_fpn_1x' load_from = None resume_from = None workflow = [('train', 1)] Use the same name as the config file. total_epochs = 12 dist_params = dict(backend='nccl') log_level = 'INFO' +work_dir = './work_dirs/faster_rcnn_r50_fpn_carafe_1x' load_from = None resume_from = None workflow = [('train', 1)]
codereview_python_data_917
spotipy_call = getattr(spotipy_client, endpoint) recently_played = spotipy_call(**kwargs) break - except (AttributeError, TypeError) as err: - current_app.logger.critical("Invalid spotipy endpoint or arguments:", err, exc_info=True) return None except SpotifyException as e: retries -= 1 arguments to logger functions don't work like print. If you want to give an argument you need a % formatter placeholder in the string. this should be something like: > current_app.logger.critical("Invalid spotipy endpoint or arguments: %s", err, exc_info=True) However, do we even need to pass the exception if we also pass `exc_info=True`? spotipy_call = getattr(spotipy_client, endpoint) recently_played = spotipy_call(**kwargs) break + except (AttributeError, TypeError): + current_app.logger.critical("Invalid spotipy endpoint or arguments:", exc_info=True) return None except SpotifyException as e: retries -= 1
codereview_python_data_926
def declare_var(self, name, type, pos, cname = None, visibility = 'private', - api = 0, in_pxd = 0, is_cdef = True, - walrus_target = False): - if walrus_target: - # should be declared in the parent scope instead - entry = self.parent_scope.declare_var(name, type, pos, - cname=cname, visibility=visibility, - api=api, in_pxd=in_pxd, is_cdef=is_cdef, - walrus_target=walrus_target) - entry.is_walrus_assigned_in_genexpr = True - return entry if type is unspecified_type: # if the outer scope defines a type for this variable, inherit it outer_entry = self.outer_scope.lookup(name) Probably worth looking at how `nonlocal` is implemented, might be similar enough to benefit from. def declare_var(self, name, type, pos, cname = None, visibility = 'private', + api = 0, in_pxd = 0, is_cdef = True): if type is unspecified_type: # if the outer scope defines a type for this variable, inherit it outer_entry = self.outer_scope.lookup(name)
codereview_python_data_927
self.props_file = None self.class_path = [] self._tools = [] - self.hamcrest_path = "~/.bzt/selenium-taurus/tools/junit/hamcrest-core.jar" - self.json_jar_path = "~/.bzt/selenium-taurus/tools/junit/json.jar" - self.selenium_server_path = "~/.bzt/selenium-taurus/selenium-server.jar" def install_required_tools(self): self._check_tools(self._tools) def prepare(self): self.script = self.get_script_path(required=True) I would argue for `self.tools` for consistency (all other attributes are not private). self.props_file = None self.class_path = [] self._tools = [] def install_required_tools(self): self._check_tools(self._tools) + def _add_jar_tool(self, req_tool): + self._tools.append(req_tool) + self.class_path.append(req_tool.tool_path) + def prepare(self): self.script = self.get_script_path(required=True)
codereview_python_data_935
# pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. stack = deque(self.tasks) - i = 0 while stack: task = maybe_signature(stack.popleft(), app=self._app).clone() if isinstance(task, group): Perhaps minor, but I think we could also name the variable `group_index` instead of `i`. # pylint: disable=redefined-outer-name # XXX chord is also a class in outer scope. stack = deque(self.tasks) + group_index = 0 while stack: task = maybe_signature(stack.popleft(), app=self._app).clone() if isinstance(task, group):
codereview_python_data_937
model.eval() print("start eval") embed = model(test_graph, test_node_id, test_rel, test_norm) - mrr = utils.calc_filtered_mrr(embed, model.w_relation, torch.LongTensor(train_data), - valid_data, test_data, hits=[1, 3, 10]) # save best model if mrr < best_mrr: if epoch >= args.n_epochs: I suggest you to output both mrr and filtered_mrr here. model.eval() print("start eval") embed = model(test_graph, test_node_id, test_rel, test_norm) + mrr = utils.calc_mrr(embed, model.w_relation, torch.FloatTensor(train_data), + valid_data, test_data, hits=[1, 3, 10], eval_bz=args.eval_batch_size, + eval_p=args.eval_protocol) # save best model if mrr < best_mrr: if epoch >= args.n_epochs:
codereview_python_data_940
values = data_handler.get_additional_values_for_variable( name, testcase.job_type, testcase.fuzzer_name) - # TODO(ochang): Find a less hacky way of concatenating multiple values. return values + data_handler.get_additional_values_for_variable( name + '_1', testcase.job_type, testcase.fuzzer_name) Should we use ADDITIONAL_* , atleast that gets rid of this TODO, and matches slightly the style of like ADDITIONAL_ASAN_OPTIONS. values = data_handler.get_additional_values_for_variable( name, testcase.job_type, testcase.fuzzer_name) return values + data_handler.get_additional_values_for_variable( name + '_1', testcase.job_type, testcase.fuzzer_name)
codereview_python_data_942
If the graph is homogeneous, one can directly pass the above formats. Otherwise, the argument must be a dictionary with keys being node types and values being the nodes. - store_raw_ids : bool, optional If True, it will store the raw IDs of the extracted nodes and edges in the ``ndata`` and ``edata`` of the resulting graph under name ``dgl.NID`` and ``dgl.EID``, respectively. `store_raw_ids` or `store_ids`? If the graph is homogeneous, one can directly pass the above formats. Otherwise, the argument must be a dictionary with keys being node types and values being the nodes. + store_ids : bool, optional If True, it will store the raw IDs of the extracted nodes and edges in the ``ndata`` and ``edata`` of the resulting graph under name ``dgl.NID`` and ``dgl.EID``, respectively.
codereview_python_data_944
image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) - pos_inds (Tensor): Inds of postive anchor with shape (num_pos,). - neg_inds (Tensor): Inds of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags, Inds -> Indices image with shape (N, 4). bbox_weights (Tensor): BBox weights of all anchors in the image with shape (N, 4) + pos_inds (Tensor): Indices of postive anchor with shape (num_pos,). + neg_inds (Tensor): Indices of negative anchor with shape (num_neg,). """ inside_flags = anchor_inside_flags(flat_anchors, valid_flags,
codereview_python_data_945
self.dedup_names() return True - # This is a noop operation for the python data def shallow_copy(self): self.df_shallow_copy = self.df.copy() self.df_deep_copy = copy.deepcopy(self.df) Does `copy.deepcopy` even works on a Frame? self.dedup_names() return True + # This is a noop for the python data def shallow_copy(self): self.df_shallow_copy = self.df.copy() self.df_deep_copy = copy.deepcopy(self.df)
codereview_python_data_953
class TestResidueGroup(object): # Legacy tests from before 363 - # def setUp(self): - # """Set up the standard AdK system in implicit solvent.""" - # self.universe = mda.Universe(PSF, DCD) - # self.rg = self.universe.residues - @pytest.fixture() def universe(self): return mda.Universe(PSF, DCD) this should be deleted class TestResidueGroup(object): # Legacy tests from before 363 @pytest.fixture() def universe(self): return mda.Universe(PSF, DCD)
codereview_python_data_956
lock. metadata (dict): Metadata to be stored along with the Transaction. """ if operation not in Transaction.ALLOWED_OPERATIONS: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS) This should not be removed but we could change the type of version to `str` lock. metadata (dict): Metadata to be stored along with the Transaction. + version (string): Defines the version number of a Transaction. """ if operation not in Transaction.ALLOWED_OPERATIONS: allowed_ops = ', '.join(self.__class__.ALLOWED_OPERATIONS)
codereview_python_data_962
return no_js = config.val.hints.find_implementation != 'javascript' rect = self.elem.rect_on_view(no_js=no_js) - if rect.y()<0 and (rect.y()+rect.height()-15>0): - self.move(rect.x(), rect.y()+rect.height()-15) - else: - self.move(rect.x(), rect.y()) def cleanup(self): """Clean up this element and hide it.""" I don't think hard-coding 15 here is a good idea. We can get the current height of the label with `self.height()` however, at this point, the label dosen't seem to have been scaled to it's contents yet, and seems to have the default size. One option here is to call `self.adjustSize()` to size the element before calling the height function. Note that there will be no text at this point, so the hint's width will be incorrect, but I think the height should be right. When calling `adjustSize` before `height`, I get a value of 16 on my computer (which may vary depending on the fonts used, for example). return no_js = config.val.hints.find_implementation != 'javascript' rect = self.elem.rect_on_view(no_js=no_js) + self.move(max(rect.x(),0),max(rect.y(),0)) def cleanup(self): """Clean up this element and hide it."""
codereview_python_data_967
h[:], edges[:] = np.histogramdd(coord, bins=bins, range=arange, normed=False) grid += h # accumulate average histogram - start, stop, step = u.trajectory.check_slice_indices(start, stop, step) n_frames = len(range(start, stop, step)) grid /= float(n_frames) The slice check should come before the loop over frames - we (I?) forgot it. h[:], edges[:] = np.histogramdd(coord, bins=bins, range=arange, normed=False) grid += h # accumulate average histogram + n_frames = len(range(start, stop, step)) grid /= float(n_frames)
codereview_python_data_978
# explicit relative cimport # error of going beyond top-level is handled in cimport node relative_to = self - while relative_level > 0 and relative_to and self._is_package_scope_or_module(): relative_to = relative_to.parent_module relative_level -= 1 elif relative_level != 0: This will probably be reworked, but it seems wasteful to do something as expensive as this as part of a loop, even though it's obvious that the result will never change. Which, BTW, seems problematic. My guess is that you'd want to check parent packages as well, not just the package directory itself. # explicit relative cimport # error of going beyond top-level is handled in cimport node relative_to = self + while relative_level > 0 and relative_to and not self.is_package: relative_to = relative_to.parent_module relative_level -= 1 elif relative_level != 0:
codereview_python_data_982
tags = params.get("Tags") or [] topic_name = params.get("TopicName") if dedup is not None: - attributes["ContentBasedDeduplication"] = ( - "true" if str(dedup).lower() == "true" else "false" - ) if display_name: attributes["DisplayName"] = display_name if fifo_topic is not None: - attributes["FifoTopic"] = "true" if str(fifo_topic).lower() == "true" else "false" if kms_master_key: attributes["KmsMasterKeyId"] = kms_master_key result = {"Name": topic_name, "Attributes": attributes, "Tags": tags} nit: Looks like this could be a useful utility that could be used in different places. We could extract it into a small function in `common.py` (e.g. something like `canonicalize_bool_str(...)`). tags = params.get("Tags") or [] topic_name = params.get("TopicName") if dedup is not None: + attributes["ContentBasedDeduplication"] = canonicalize_bool_to_str(dedup) if display_name: attributes["DisplayName"] = display_name if fifo_topic is not None: + attributes["FifoTopic"] = canonicalize_bool_to_str(fifo_topic) if kms_master_key: attributes["KmsMasterKeyId"] = kms_master_key result = {"Name": topic_name, "Attributes": attributes, "Tags": tags}
codereview_python_data_984
return [obj['comment']['content']] if obj['comment'] else [] def parse_entity(obj, section_comment=[]): - translation = FTLSerializer().dumpEntity(obj).split(' = ', 1)[1] self.entities[obj['id']['name']] = L20NEntity( obj['id']['name'], translation, The string is coming from the serializer so we control it right now, but it might be prudent to consider FTL's whitespace semantics here in case the serializer chages in the future. There might not be whitespace around `=` or there might be more than one after it. The following examples result in the exact same value of "Foo": ``` foo=Foo foo = Foo foo = Foo ``` For leading whitespace to be significant and considered part of the translation it needs to be surrounded by quotes: ``` foo = " Foo" ``` Perhaps something like the following will be more future-proof? ``` translation = FTLSerializer().dumpEntity(obj).split('=', 1)[1].lstrip() ``` return [obj['comment']['content']] if obj['comment'] else [] def parse_entity(obj, section_comment=[]): + translation = FTLSerializer().dumpEntity(obj).split('=', 1)[1].lstrip(' ') self.entities[obj['id']['name']] = L20NEntity( obj['id']['name'], translation,
codereview_python_data_985
if action_data is not None: groupdata.append(action_data) groups.append('\n'.join(groupdata)) - # pylint: enable=protected-access options = '\n'.join(groups) # epilog if parser.epilog is not None: I think you can move the disable/enable around the `for group in parser._action_groups:` line here. if action_data is not None: groupdata.append(action_data) groups.append('\n'.join(groupdata)) options = '\n'.join(groups) # epilog if parser.epilog is not None:
codereview_python_data_989
can handle negative edge weights. If a negative cycle is detected, you can use :func:`find_negative_cycle` - to return the cycle and examine it. Shoftest paths are not defined when a negative cycle exists because once reached, the path can cycle forever to build up arbitrarily low weights. ```suggestion to return the cycle and examine it. Shortest paths are not defined when ``` can handle negative edge weights. If a negative cycle is detected, you can use :func:`find_negative_cycle` + to return the cycle and examine it. Shortest paths are not defined when a negative cycle exists because once reached, the path can cycle forever to build up arbitrarily low weights.
codereview_python_data_993
selected_scale = ProtParamData.gravy_scales.get(scale, -1) if selected_scale == -1: - raise ValueError("scale: {} not know".format(scale)) total_gravy = sum(selected_scale[aa] for aa in self.sequence) Typo: know -> known Style: Please use an f-string rather than the format method Ideally there would be a simple test for the unknown scale, using ``.assertRaises`` selected_scale = ProtParamData.gravy_scales.get(scale, -1) if selected_scale == -1: + raise ValueError(f"scale: {scale} not known") total_gravy = sum(selected_scale[aa] for aa in self.sequence)
codereview_python_data_994
def __init__(self, name, cname, typedef_flag, namespace=None, doc=None): self.name = name - if doc is None: - self.doc = "An enumeration." - else: - self.doc = doc self.cname = cname self.values = [] self.typedef_flag = typedef_flag The string constant would need to be an `StringEncoding.EncodedString()`, but rather, I think we should inherit the docstring from the superclass here and not set a default. ```suggestion self.doc = doc ``` I understand that this makes the test a little more difficult, but I'd rather not make a test rely on the docstring of Python's `IntEnum` class at all, so I'm fine with a test that the docstrings set for other enum classes do not leak into the enums that have no docstring, and not testing for a specific string value. def __init__(self, name, cname, typedef_flag, namespace=None, doc=None): self.name = name + self.doc = doc self.cname = cname self.values = [] self.typedef_flag = typedef_flag
codereview_python_data_997
} ] ) - time.sleep(3) resp = cfn.describe_stacks(StackName=stack_name) stack_outputs = [stack['Outputs'] for stack in resp['Stacks'] if stack['StackName'] == stack_name] Let's better use this here, instead of hardcoding a sleep time: ``` _await_stack_completion(stack_name) ``` } ] ) + _await_stack_completion(stack_name) resp = cfn.describe_stacks(StackName=stack_name) stack_outputs = [stack['Outputs'] for stack in resp['Stacks'] if stack['StackName'] == stack_name]
codereview_python_data_1001
mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) if (result.cmdline[0] != 'repeat-command' and - result.cmd.mode_allowed(mode_manager.mode)): last_command[mode_manager.mode] = ( self._parse_count(text)[1], count if count is not None else result.count) Is this check still needed? If I'm not missing anything, if the check isn't allowed in the current mode, `result.cmd.run` above will already bail out with a `cmdexc.PrerequisitesError`. That means you should be able to get rid of this and `mode_allowed` above. mode_manager = objreg.get('mode-manager', scope='window', window=self._win_id) if (result.cmdline[0] != 'repeat-command' and + result.cmdline[0] != 'prompt-accept'): last_command[mode_manager.mode] = ( self._parse_count(text)[1], count if count is not None else result.count)
codereview_python_data_1003
return result, log_output - def port(self): - """ Return a randomly container port""" - return self.port - class ContainerInfo: """ I think we should remove that method, looks like it is not being used. (also, it potentially aliases the variable `self.port` with the same name) return result, log_output class ContainerInfo: """
codereview_python_data_1004
wrapper = os.path.join(get_full_path(__file__, step_up=2), "resources", "locustio-taurus-wrapper.py") self.env.set({"LOCUST_DURATION": dehumanize_time(load.duration)}) self.log_file = self.engine.create_artifact("locust", ".log") args = [sys.executable, wrapper, '-f', self.script] Both of these are important. Without it, locust won't find my libraries in the cloud. wrapper = os.path.join(get_full_path(__file__, step_up=2), "resources", "locustio-taurus-wrapper.py") self.env.set({"LOCUST_DURATION": dehumanize_time(load.duration)}) + self.env.add_path({"PYTHONPATH": get_full_path(__file__, step_up=3)}) self.log_file = self.engine.create_artifact("locust", ".log") args = [sys.executable, wrapper, '-f', self.script]
codereview_python_data_1009
output, _ = docker_client.exec_in_container( container_info.container_id, env_vars=env, command=["env"] ) - print(output) output = output.decode(config.DEFAULT_ENCODING) assert "MYVAR" not in output nit: could be removed output, _ = docker_client.exec_in_container( container_info.container_id, env_vars=env, command=["env"] ) output = output.decode(config.DEFAULT_ENCODING) assert "MYVAR" not in output
codereview_python_data_1013
ex_info.match("can have one and only one SCHEMA with name GVT and version 1.0'") -def test_submit_schema_without_role(looper, public_repo_for_client, - schema): with pytest.raises(OperationError) as ex_info: looper.run( public_repo_for_client.submitSchema(schema) It's better to name it `test_can_not_submit_schema_by_identity_owner`. ex_info.match("can have one and only one SCHEMA with name GVT and version 1.0'") +def test_can_not_submit_schema_by_identity_owner(looper, + public_repo_for_client, + schema): with pytest.raises(OperationError) as ex_info: looper.run( public_repo_for_client.submitSchema(schema)
codereview_python_data_1015
__all__ = [ 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net', - 'Hourglass' ] HourglassNet might be more appropriate. __all__ = [ 'RegNet', 'ResNet', 'ResNetV1d', 'ResNeXt', 'SSDVGG', 'HRNet', 'Res2Net', + 'HourglassNet' ]
codereview_python_data_1018
method = lambda x: None # noqa: E731 if f: p = os.path.join(tutorial_base, f) - method.__doc__ = "%s\n\n>>> import os\n>>> os.chdir(%r)\n%s\n" % ( - n, - p, - d, - ) else: method.__doc__ = "%s\n\n%s\n" % (n, d) method._folder = f That looks horrible. How about an f-string here? ```python method.__doc__ = f"{n}\n\n>>> import os\n>>> os.chdir({p!r})\n{d}\n" ``` method = lambda x: None # noqa: E731 if f: p = os.path.join(tutorial_base, f) + method.__doc__ = f"{n}\n\n>>> import os\n>>> os.chdir({p!r})\n{d}\n" else: method.__doc__ = "%s\n\n%s\n" % (n, d) method._folder = f
codereview_python_data_1028
def _adjust_cbar(self, cbar, label, dim): noalpha = math.floor(self.style[self.cyclic_index].get('alpha', 1)) == 1 - for label in ['clabel', 'labels']: - labelsize = self._fontsize(label, common=False).get('fontsize') if labelsize is not None: break This loop variable is overriding the ``label`` variable below. def _adjust_cbar(self, cbar, label, dim): noalpha = math.floor(self.style[self.cyclic_index].get('alpha', 1)) == 1 + for lb in ['clabel', 'labels']: + labelsize = self._fontsize(lb, common=False).get('fontsize') if labelsize is not None: break
codereview_python_data_1029
.. deprecated:: 2.6 Use `partition_quality` instead. - The *performance* of a partition is the ratio of the number of intra-community edges plus inter-community non-edges divided by the total number of potential edges. The original wording had "the ratio of" which implies the division of the two quantities. I personally think your wording choice is better, but it should also include removing the "the ratio of" from the previous line. .. deprecated:: 2.6 Use `partition_quality` instead. + The *performance* of a partition is the number of intra-community edges plus inter-community non-edges divided by the total number of potential edges.
codereview_python_data_1034
def get_domain_arn(domain_name: str, region: str = None, account_id: str = None) -> str: - region = region or aws_stack.get_region() - account_id = account_id or TEST_AWS_ACCOUNT_ID - return "arn:aws:es:%s:%s:domain/%s" % (region, account_id, domain_name) def parse_domain_arn(arn: str): nit: could also reuse `aws_stack.elasticsearch_domain_arn(..)` here. (note to self: ideally we should extract all the ARN util functions out of `aws_stack.py` into a separate file..). def get_domain_arn(domain_name: str, region: str = None, account_id: str = None) -> str: + return aws_stack.elasticsearch_domain_arn( + domain_name=domain_name, account_id=account_id, region_name=region + ) def parse_domain_arn(arn: str):
codereview_python_data_1036
return bool(data.draw_bits(1)) -def biased_coin(data, p, forced=None): """Return True with probability p (assuming a uniform generator), shrinking towards False. If ``forced`` is set to a non-None value, this will always return that value but will write choices appropriate to having I'd consider making `forced` a keyword-only argument, for much the same reason as we use them in the public API. return bool(data.draw_bits(1)) +def biased_coin(data, p, *, forced=None): """Return True with probability p (assuming a uniform generator), shrinking towards False. If ``forced`` is set to a non-None value, this will always return that value but will write choices appropriate to having
codereview_python_data_1042
elif self.client_type == 'kerberos': from hdfs.ext.kerberos import KerberosClient return KerberosClient(url=self.url) - elif self.client_type == 'token': - import hdfs - return hdfs.TokenClient(url=self.url, token=self.token) else: raise ValueError("Error: Unknown client type specified in webhdfs client_type" "configuration parameter") You could skip this if you use the ChoiceParameter. elif self.client_type == 'kerberos': from hdfs.ext.kerberos import KerberosClient return KerberosClient(url=self.url) else: raise ValueError("Error: Unknown client type specified in webhdfs client_type" "configuration parameter")
codereview_python_data_1047
``` """ if mode not in ['multiclass', 'multilabel']: - raise TypeError('mode must be: [multiclass, multilabel]') if threshold is None: threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True) nit: mode must be: `multiclass` or `multilabel`. The current way looks as though it takes in a list ``` """ if mode not in ['multiclass', 'multilabel']: + raise TypeError('mode must be either multiclass or multilabel]') if threshold is None: threshold = tf.reduce_max(y_pred, axis=-1, keepdims=True)
codereview_python_data_1050
def test_get_wrong_n_atoms(self): with pytest.raises(ValueError, match=r"Supplied n_atoms *"): - mda.Universe(TRZ, n_atoms = 8080) class TestTRZWriter(RefTRZ): ```suggestion mda.Universe(TRZ, n_atoms=8080) ``` def test_get_wrong_n_atoms(self): with pytest.raises(ValueError, match=r"Supplied n_atoms *"): + mda.Universe(TRZ, n_atoms=8080) class TestTRZWriter(RefTRZ):
codereview_python_data_1052
@raises( RuntimeError, - glob="Assert on \"HasArgument(name)\" failed: Argument \"preprocessed_annotations_dir\" is not supported by operator \"readers__COCO\".") def test_invalid_args(): pipeline = Pipeline(batch_size=2, num_threads=4, device_id=0) with pipeline: 1. Don't include assertion condition in the message. 2. I remember that on one meeting there was a conclusion that putting entire error messages verbatim is excessive. Here, I'd suggest putting a wildcard in place of `__`, which we hope to replace with proper module name at some point. 3. Nitpick: use single quotes, so you don't have to escape the double quotes - it keeps the pattern more readable. 4. Nitpick: glob can be specified with a positional argument - keeps the line shorter ```suggestion 'Argument "preprocessed_annotations_dir" is not supported by operator *readers*COCO') ``` @raises( RuntimeError, + glob='Argument "preprocessed_annotations_dir" is not supported by operator *readers*COCO') def test_invalid_args(): pipeline = Pipeline(batch_size=2, num_threads=4, device_id=0) with pipeline:
codereview_python_data_1054
self.data_layout = data_layout if self.data_layout == "DHWC": - D, H, W = self.data_shape[0], self.data_shape[1], self.data_shape[2] elif self.data_layout == "CDHW": - D, H, W = self.data_shape[1], self.data_shape[2], self.data_shape[3] elif self.data_layout == "FHWC" and crop_seq_as_depth: - D, H, W = self.data_shape[0], self.data_shape[1], self.data_shape[2] else: assert(False) Is that possible to have "FCHW" as well? self.data_layout = data_layout if self.data_layout == "DHWC": + D, H, W, _ = self.data_shape elif self.data_layout == "CDHW": + _, D, H, W = self.data_shape elif self.data_layout == "FHWC" and crop_seq_as_depth: + D, H, W, _ = self.data_shape + elif self.data_layout == "FCHW" and crop_seq_as_depth: + D, _, H, W = self.data_shape else: assert(False)
codereview_python_data_1071
'playbook': 'playbook.yml', 'raw_ssh_args': [ '-o UserKnownHostsFile=/dev/null', - '-o IdentitiesOnly=yes', '-o ControlMaster=auto', '-o ControlPersist=60s', '-o IdentitiesOnly=yes', Aren't you duplicating this option? 'playbook': 'playbook.yml', 'raw_ssh_args': [ '-o UserKnownHostsFile=/dev/null', '-o ControlMaster=auto', '-o ControlPersist=60s', '-o IdentitiesOnly=yes',
codereview_python_data_1075
print('Done saving data into cached files.') def _get_hash(self): return abs(hash(self._hash_key)) @property I think, you need to put a example here. print('Done saving data into cached files.') def _get_hash(self): + """Compute the hash of the input tuple + + Example + ------- + >>> hash_value = self._get_hash((10, False, True)) + >>> hash_value + 6299899980521991026 + """ return abs(hash(self._hash_key)) @property
codereview_python_data_1076
input_ = InputCell(1) output = ComputeCell([input_], lambda inputs: inputs[0] + 1) - def callback1(value): - return value output.add_callback(callback1) input_.value = 3 - self.assertEqual(output.expect_callback_values(callback1), [4]) def test_callbacks_only_fire_on_change(self): input_ = InputCell(1) I'm not sure that I like `expect_callback_values` being a member of `ComputeCell`... What do you think about something like this? ```Python callback_buffer = [] output.add_callback(callback_buffer.append) input_.value = 3 self.assertEqual(callback_buffer, [4]) ``` input_ = InputCell(1) output = ComputeCell([input_], lambda inputs: inputs[0] + 1) + observer = [] + callback1 = self.callback_factory(observer) output.add_callback(callback1) input_.value = 3 + self.assertEqual(observer[-1], 4) def test_callbacks_only_fire_on_change(self): input_ = InputCell(1)
codereview_python_data_1079
import json import os from pokemongo_bot.base_task import BaseTask from pokemongo_bot.worker_result import WorkerResult from pokemongo_bot.tree_config_builder import ConfigException Excuse my python noobishness, but what is the difference between `class _Item:` and `class _Item(object):`? import json import os +from pokemongo_bot import inventory from pokemongo_bot.base_task import BaseTask from pokemongo_bot.worker_result import WorkerResult from pokemongo_bot.tree_config_builder import ConfigException
codereview_python_data_1094
pruner_stats['edge_coverage'], pollinator_stats['edge_coverage'], pruner_stats['feature_coverage'], pollinator_stats['feature_coverage']) - result = CorpusPruningResult( coverage_info=coverage_info, crashes=list(crashes.values()), fuzzer_binary_name=fuzzer_binary_name, revision=environment.get_value('APP_REVISION'), cross_pollination_stats=cross_pollination_stats) - return result - def _process_corpus_crashes(context, result): """Process crashes found in the corpus.""" nit: no need for result variable now. can just return the CorpusPruningResult directly. pruner_stats['edge_coverage'], pollinator_stats['edge_coverage'], pruner_stats['feature_coverage'], pollinator_stats['feature_coverage']) + return CorpusPruningResult( coverage_info=coverage_info, crashes=list(crashes.values()), fuzzer_binary_name=fuzzer_binary_name, revision=environment.get_value('APP_REVISION'), cross_pollination_stats=cross_pollination_stats) def _process_corpus_crashes(context, result): """Process crashes found in the corpus."""
codereview_python_data_1101
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4478-SEA 1645522606 3883447466</p> <hr> <p>Varnish cache server</p> </body> I noticed in the IAM & Admin -> Quotas dashboard, under "All Quotas", that there doesn't seem to be a ratelimit for the Admin SDK, but just the total number of queries that can be made to it in a day. Do you happen to know what would happen if someone should exceed that quota? It looks like the default is 150,000 queries per day. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4421-SEA 1645522606 1375859326</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_1103
"**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"] } -issues_github_path = "modin-project/modin" \ No newline at end of file ```suggestion issues_github_path = "modin-project/modin" ``` "**": ["globaltoc.html", "relations.html", "sourcelink.html", "searchbox.html"] } \ No newline at end of file +issues_github_path = "modin-project/modin"
codereview_python_data_1108
self.engine.aggregator.add_listener(self) disable = str(self.settings.get('disable', 'auto')).lower() - if (disable == 'true') or ((disable == 'auto') and (not is_tty())): self.disabled = True return for case of "true" you'll get boolean value of option. This condition can be simplified into something like `self.disable=(disable==auto and not is_tty()) or disable` self.engine.aggregator.add_listener(self) disable = str(self.settings.get('disable', 'auto')).lower() + if (disable == 'true') or ((disable == 'auto') and (not sys.stdout.isatty())): self.disabled = True return
codereview_python_data_1118
as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. n_communities: int - desired number of communities, defaults to 1 and falls back to 1 if - the given number is larger than the initial amount of communities Returns ------- ```suggestion n_communities: int Desired number of communities: the community merging process is terminated once this number of communities is reached. Must be between 1 and the total number of nodes in `G`. Default is ``1``, meaning the community merging process runs to completion. ``` Just some wording suggestion(s) to try to illustrate exactly what this parameter does. I'm not sure the wording I proposed is great, so feel free to improve --- my goal was to try to really highlight what this parameter does. as a weight. If None, then each edge has weight 1. The degree is the sum of the edge weights adjacent to the node. n_communities: int + Desired number of communities: the community merging process is + terminated once this number of communities is reached, or until + modularity can not be further increased. Must be between 1 and the + total number of nodes in `G`. Default is ``1``, meaning the community + merging process continues until all nodes are in the same community + or until the best community structure is found. Returns -------
codereview_python_data_1127
self.coords = coords n = reference_coords.shape m = coords.shape - if n != m or not n[1] == m[1] == 3: raise Exception("Coordinate number/dimension mismatch.") self.n = n[0] Is this definitely equivalent? The comparison of three terms is in itself perhaps not the best style... self.coords = coords n = reference_coords.shape m = coords.shape + if n != m or n[1] != 3 or m[1] != 3: raise Exception("Coordinate number/dimension mismatch.") self.n = n[0]
codereview_python_data_1135
return Layout([self, obj]) def __radd__(self, other): - if isinstance(other, list): # Hack for Annotators? - return NotImplemented if isinstance(other, int): raise TypeError("unsupported operand type(s) for +: 'int' and 'Overlay'. " "If you are using `sum(elements)` to combine a list of elements," ```suggestion "If you are trying to use a reduction like `sum(elements)` to combine a list of elements," ``` return Layout([self, obj]) def __radd__(self, other): if isinstance(other, int): raise TypeError("unsupported operand type(s) for +: 'int' and 'Overlay'. " "If you are using `sum(elements)` to combine a list of elements,"
codereview_python_data_1136
ds_layout = None -class graph_redim(redim): """ Extension for the redim utility that allows re-dimensioning Graph objects including their nodes and edgepaths. """ def __call__(self, specs=None, **dimensions): - redimmed = super(graph_redim, self).__call__(specs, **dimensions) new_data = (redimmed.data,) if self.parent.nodes: new_data = new_data + (self.parent.nodes.redim(specs, **dimensions),) Minor point but I would call this ``redim_graph`` instead. ds_layout = None +class redim_graph(redim): """ Extension for the redim utility that allows re-dimensioning Graph objects including their nodes and edgepaths. """ def __call__(self, specs=None, **dimensions): + redimmed = super(redim_graph, self).__call__(specs, **dimensions) new_data = (redimmed.data,) if self.parent.nodes: new_data = new_data + (self.parent.nodes.redim(specs, **dimensions),)
codereview_python_data_1141
""" # ensure we get a 200 - success, service_error, resp = self.get_metadata('instance/compute', is_health=False) - if not success: - raise ValueError(resp) - data = json.loads(ustr(resp, encoding="utf-8")) compute_info = ComputeInfo() set_properties('compute', compute_info, data) Why is it a value error here? """ # ensure we get a 200 + result = self.get_metadata('instance/compute', is_health=False) + if not result.success: + raise HttpError(result.response) + data = json.loads(ustr(result.response, encoding="utf-8")) compute_info = ComputeInfo() set_properties('compute', compute_info, data)
codereview_python_data_1143
forest.union(u, v) -def _kruskal_mst_partition_edges( G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, - partition="partition", ): """ Iterate over edge of a Kruskal's algorithm min/max spanning tree with Add a "Yields" section to the docstring? forest.union(u, v) +def kruskal_mst_edges_partition( G, minimum, weight="weight", keys=True, data=True, ignore_nan=False, + partition=None, ): """ Iterate over edge of a Kruskal's algorithm min/max spanning tree with
codereview_python_data_1155
return worker_ip_to_port -def _find_random_open_port(): """Find a random open port on the machine. Returns we've been trying to use type hints everywhere in the Dask module, in a step towards #3756 can you please add this return hint? ```suggestion def _find_random_open_port() -> int: ``` return worker_ip_to_port +def _find_random_open_port() -> int: """Find a random open port on the machine. Returns
codereview_python_data_1158
def test_parse_upstream_auth(): tutils.raises("Invalid upstream auth specification", cmdline.parse_upstream_auth, "") assert cmdline.parse_upstream_auth( "test:test") == "Basic" + " " + base64.b64encode("test:test") def test_parse_setheaders(): I'm not sure what the RFC says, but are these allowed? - `test:` (empty password, maybe valid) - `:test` (empty username, probably invalid) - `:` (both empty - is it then a username by default, with empty password? or the other way around) We should make sure to cover these cases: in the tests as well as the cli-parsing part. def test_parse_upstream_auth(): tutils.raises("Invalid upstream auth specification", cmdline.parse_upstream_auth, "") + tutils.raises("Invalid upstream auth specification", cmdline.parse_upstream_auth, ":") + tutils.raises("Invalid upstream auth specification", cmdline.parse_upstream_auth, ":test") assert cmdline.parse_upstream_auth( "test:test") == "Basic" + " " + base64.b64encode("test:test") + assert cmdline.parse_upstream_auth( + "test:") == "Basic" + " " + base64.b64encode("test:") def test_parse_setheaders():
codereview_python_data_1164
config_obj.changed.connect(self.set_colors) QTimer.singleShot(0, self.autohide) config_obj.changed.connect(self.autohide) config_obj.changed.connect(self.on_tab_colors_changed) def __repr__(self): This won't get called when `hide-always` is updated because of the `@config.change_filter`, so the UI isn't updated automatically when the user uses `:set tabs hide-always true`. Unfortunately `@config.change_filter` doesn't support multiple options, so I suggest writing a helper option: ``` python def on_hide_config_changed(self, section, option): """Hide tabbar if needed when the config is changed.""" if section == 'tabs' and option in ('hide-auto', 'hide-always'): self.autohide() ``` Then you can remove the `@config.change_filter(...)` decorator here, and adjust the `config_obj.changed.connect(...)` call above. config_obj.changed.connect(self.set_colors) QTimer.singleShot(0, self.autohide) config_obj.changed.connect(self.autohide) + config_obj.changed.connect(self.alwayshide) config_obj.changed.connect(self.on_tab_colors_changed) def __repr__(self):
codereview_python_data_1180
except OSError: self.log.debug("JMeter check failed.") return False - except ValueError: - raise def install(self): dest = os.path.dirname(os.path.dirname(os.path.expanduser(self.tool_path))) Why have it? Let it be just silently raised :) except OSError: self.log.debug("JMeter check failed.") return False def install(self): dest = os.path.dirname(os.path.dirname(os.path.expanduser(self.tool_path)))
codereview_python_data_1185
"""Test if the CLI hits specific client methods.""" tmp_config = os.path.join(self.test_dir, '.forseti') with mock.patch.dict( - os.environ, {'FORSETI_CLIENT_CONFIG': tmp_config}) as mock_config: for commandline, client_func, func_args,\ func_kwargs, config_string, config_expect\ in test_cases: nit: you don't need to use 'as var' if you don't need to access the var in the context. """Test if the CLI hits specific client methods.""" tmp_config = os.path.join(self.test_dir, '.forseti') with mock.patch.dict( + os.environ, {'FORSETI_CLIENT_CONFIG': tmp_config}): for commandline, client_func, func_args,\ func_kwargs, config_string, config_expect\ in test_cases:
codereview_python_data_1187
env = BetterDict() env.merge(dict(os.environ)) - java_opts = "".join([" -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala]) - java_opts += " " + env.get("JAVA_OPTS", "") + " " + properties.get("java-opts", "") env.merge({"JAVA_OPTS": java_opts}) it should be "settings" env = BetterDict() env.merge(dict(os.environ)) + java_opts = ''.join([" -D%s=%s" % (key, params_for_scala[key]) for key in params_for_scala]) + java_opts += ' ' + env.get('JAVA_OPTS', '') + ' ' + self.settings.get('java-opts', '') env.merge({"JAVA_OPTS": java_opts})
codereview_python_data_1193
except Exception: yield None finally: - devnull.close() def array_to_bytes(buff): # Python 3.9 removed the tostring() method on arrays, the new alias is tobytes() if open raises devnull will be None, so devnull.close() would raise except Exception: yield None finally: + if devnull is not None: + devnull.close() def array_to_bytes(buff): # Python 3.9 removed the tostring() method on arrays, the new alias is tobytes()
codereview_python_data_1209
last_required_arg = max_positional_args-1 if max_positional_args > num_posonly_args: code.putln('switch (pos_args) {') - for i, arg in enumerate(all_args[:last_required_arg+1]): - if i < num_posonly_args: - continue if max_positional_args > num_posonly_args and i <= max_positional_args: if i != num_posonly_args: code.putln('CYTHON_FALLTHROUGH;') Maybe like this? `for i, arg in enumerate(all_args[num_posonly_args:last_required_arg+1], num_posonly_args):` last_required_arg = max_positional_args-1 if max_positional_args > num_posonly_args: code.putln('switch (pos_args) {') + for i, arg in enumerate(all_args[num_posonly_args:last_required_arg+1], num_posonly_args): if max_positional_args > num_posonly_args and i <= max_positional_args: if i != num_posonly_args: code.putln('CYTHON_FALLTHROUGH;')
codereview_python_data_1214
'DISTINCT len(ObjectType.name) failed to filter out dupplicates') async def test_edgeql_expr_setop_12(self): await self.assert_query_result( r'''SELECT DISTINCT (SELECT ({1,2,3}, ()) FILTER .0 > 1).1;''', [[]], Let's also add the explicit set literal case `SELECT DISTINCT {(), ()}` just in case set literals get some special handling. 'DISTINCT len(ObjectType.name) failed to filter out dupplicates') async def test_edgeql_expr_setop_12(self): + await self.assert_query_result( + r'''SELECT DISTINCT {(), ()};''', + [[]], + ) + await self.assert_query_result( r'''SELECT DISTINCT (SELECT ({1,2,3}, ()) FILTER .0 > 1).1;''', [[]],
codereview_python_data_1215
# The METIS runs on the symmetric graph to generate the node assignment to partitions. start = time.time() sym_gidx = _CAPI_DGLMakeSymmetric_Hetero(g._graph) - sym_g = DGLHeteroGraph(gidx=sym_gidx, ntypes=['_N'], etypes=['_E']) print('Convert a graph into a bidirected graph: {:.3f} seconds'.format( time.time() - start)) vwgt = [] ntypes=['_N'] and etypes=['_E'] are the default value. We can omit it. # The METIS runs on the symmetric graph to generate the node assignment to partitions. start = time.time() sym_gidx = _CAPI_DGLMakeSymmetric_Hetero(g._graph) + sym_g = DGLHeteroGraph(gidx=sym_gidx) print('Convert a graph into a bidirected graph: {:.3f} seconds'.format( time.time() - start)) vwgt = []
codereview_python_data_1223
-import sys - import dask.array as da from distributed import Client, LocalCluster from sklearn.datasets import make_blobs If the examples were working on Mac, can we remove this check until something breaks? These are supposed to be examples that teach users how to use `lightgbm.dask`, and I'm really uncomfortable with including details only necessary for CI in them. import dask.array as da from distributed import Client, LocalCluster from sklearn.datasets import make_blobs
codereview_python_data_1231
def clear(self): "Clears the file archive" - return self._files.clear() Don't think `dict.clear()` returns anything ```suggestion self._files.clear() ``` def clear(self): "Clears the file archive" + self._files.clear()
codereview_python_data_1233
d = self.__dict__ for field_name, factory in self._field_factories: - value = factory() - d[field_name] = value d.update(kwargs) I'm not sure if unconditionally calling `factory()` is better than checking if `field_name not in kwargs` first. I mean it can be slightly faster until someone adds a more expensive factory. d = self.__dict__ for field_name, factory in self._field_factories: + if field_name not in kwargs: + d[field_name] = factory() d.update(kwargs)
codereview_python_data_1236
""" if self.metadata or self._try_cache(): - if self.metadata_expire != -1 \ and self._check_config_file_age \ and self.repofile \ and dnf.util.file_age(self.repofile) < self.metadata._age: hmm, what about `< 0`? """ if self.metadata or self._try_cache(): + if self.metadata_expire < 0 \ and self._check_config_file_age \ and self.repofile \ and dnf.util.file_age(self.repofile) < self.metadata._age:
codereview_python_data_1243
url(r"^pontoon\.js$", pontoon_js_view), url(r"^static/js/pontoon\.js$", pontoon_js_view), # Include URL configurations from installed apps url(r"^translations/", include("pontoon.translations.urls")), url(r"", include("pontoon.teams.urls")), url(r"", include("pontoon.tour.urls")), - url(r"", include("pontoon.terminology.urls")), url(r"", include("pontoon.tags.urls")), url(r"", include("pontoon.sync.urls")), url(r"", include("pontoon.projects.urls")), Nit: We haven't done that much so far but I'm a fan of namespaces in URLs. So I'd advocate for putting this in a `^terminology/` namespace, thus making the get terms view called via `/terminology/get-terms/`. I think it makes things clearer, and also reduces the chances of conflicts in URL paths. url(r"^pontoon\.js$", pontoon_js_view), url(r"^static/js/pontoon\.js$", pontoon_js_view), # Include URL configurations from installed apps + url(r"^terminology/", include("pontoon.terminology.urls")), url(r"^translations/", include("pontoon.translations.urls")), url(r"", include("pontoon.teams.urls")), url(r"", include("pontoon.tour.urls")), url(r"", include("pontoon.tags.urls")), url(r"", include("pontoon.sync.urls")), url(r"", include("pontoon.projects.urls")),
codereview_python_data_1244
sampled = param.Boolean(default=False, doc=""" Allows defining a DynamicMap in closed mode without defining the - dimension bounds or values. Useful for allowing to let a HoloMap - in a composite plot to define the dimension sampling. """) def __init__(self, callback, initial_items=None, **params): stray 'to let' in this docstring. sampled = param.Boolean(default=False, doc=""" Allows defining a DynamicMap in closed mode without defining the + dimension bounds or values. The DynamicMap may then be explicitly + sampled via getitem or the sampling is determined during plotting + by a HoloMap with fixed sampling. """) def __init__(self, callback, initial_items=None, **params):
codereview_python_data_1256
if n < 0: n = max(0, len(self.index) + n) if n == 0: - index = self.index[:0] else: index = self.index[-n:] if self._is_transposed: shouldn't this be `pandas.Index([])`? This can return a `RangeIndex` where `start` and `stop` are 0, which isn't what pandas does. if n < 0: n = max(0, len(self.index) + n) if n == 0: + index = pandas.Index([]) else: index = self.index[-n:] if self._is_transposed:
codereview_python_data_1257
y_true: TensorLike, y_pred: TensorLike, margin: FloatTensorLike = 1.0, - angular: bool = False, soft: bool = False, ) -> tf.Tensor: """Computes the triplet loss with hard negative and hard positive mining. please move this to the last arg. y_true: TensorLike, y_pred: TensorLike, margin: FloatTensorLike = 1.0, soft: bool = False, + angular: bool = False, ) -> tf.Tensor: """Computes the triplet loss with hard negative and hard positive mining.
codereview_python_data_1261
], ) def test_update_conda_requirements(setup_commands_source): - major = sys.version_info.major - minor = sys.version_info.minor - micro = sys.version_info.micro - python_version_lower = f"python>{major}.{minor}" - python_version_higher = f"python<={major}.{minor}.{micro}" with mock.patch( "modin.experimental.cloud.rayscale._bootstrap_config", lambda config: config maybe we should mock `sys.version_info` to some fixed bogus value like `7.12.45` and check for that string? ], ) def test_update_conda_requirements(setup_commands_source): + major, minor, micro = 7, 12, 45 + version_info = {"major": major, "minor": minor, "micro": micro} with mock.patch( "modin.experimental.cloud.rayscale._bootstrap_config", lambda config: config
codereview_python_data_1262
log.error("MICROSOFT_TRANSLATOR_API_KEY not set") return JsonResponse({ 'status': False, - 'message': 'Bad Request: {error}'.format(error='Missing api key.'), }, status=400) # Validate if locale exists in the database to avoid any potential XSS attacks. Hehe, I guess this can be simplified to a simple string. :wink: log.error("MICROSOFT_TRANSLATOR_API_KEY not set") return JsonResponse({ 'status': False, + 'message': 'Bad Request: Missing api key.', }, status=400) # Validate if locale exists in the database to avoid any potential XSS attacks.
codereview_python_data_1263
pass model = _init_model(miscmodels.BookmarkCompletionModel, 'url') _instances[usertypes.Completion.bookmark_by_url] = model - model = _init_model(miscmodels.BookmarkCompletionModel, 'title') @pyqtSlot() This seems unneeded, as the "bookmark by title" model isn't used anywhere. pass model = _init_model(miscmodels.BookmarkCompletionModel, 'url') _instances[usertypes.Completion.bookmark_by_url] = model @pyqtSlot()
codereview_python_data_1268
Raises: InvalidHash: If the block's id is not corresponding to its data. - InvalidSignature: If the block's signature is not corresponding - to it's data or `node_pubkey`. """ # Validate block id block = block_body['block'] I guess the `from_dict()` method won't raise `InvalidSignature` any more. Raises: InvalidHash: If the block's id is not corresponding to its data. """ # Validate block id block = block_body['block']
codereview_python_data_1269
assert crashes[0].security_flag == c.security_flag self.crashes = crashes - fully_qualified_fuzzer_name = context.fuzzer_name if context.fuzz_target: fully_qualified_fuzzer_name = context.fuzz_target.fully_qualified_name() self.main_crash, self.one_time_crasher_flag = find_main_crash( crashes, fully_qualified_fuzzer_name, context.test_timeout) nit: why not ``` else: fully_qualified_fuzzer_name = context.fuzzer_name ``` IMO that'd be a bit more readable, even though one line more :) assert crashes[0].security_flag == c.security_flag self.crashes = crashes if context.fuzz_target: fully_qualified_fuzzer_name = context.fuzz_target.fully_qualified_name() + else: + fully_qualified_fuzzer_name = context.fuzzer_name self.main_crash, self.one_time_crasher_flag = find_main_crash( crashes, fully_qualified_fuzzer_name, context.test_timeout)
codereview_python_data_1271
Return only the value for nodes u distance : edge attribute key, optional (default=None) Use the specified edge attribute as the edge distance in shortest - path calculations Returns ------- What happens when `distance=None`? The docs should tell the user what to expect. Return only the value for nodes u distance : edge attribute key, optional (default=None) Use the specified edge attribute as the edge distance in shortest + path calculations. If `None`, then each edge will have distance equal to 1. + Returns -------
codereview_python_data_1273
shutil.rmtree(self.tmp_dir, ignore_errors=True) def _get_backups(self): - files = [os.path.join(self.backup_dir, file) - for file in os.listdir(self.backup_dir)] - files = [file for file in files if os.path.isfile( - file) and self.backup_name_prefix in file] return sorted(files, key=os.path.getmtime, reverse=True) def _remove_old_backups(self): file is a built-in function, it's better to use another name for the variable shutil.rmtree(self.tmp_dir, ignore_errors=True) def _get_backups(self): + files = [os.path.join(self.backup_dir, bk_file) + for bk_file in os.listdir(self.backup_dir)] + files = [bk_file for bk_file in files if os.path.isfile( + bk_file) and self.backup_name_prefix in bk_file] return sorted(files, key=os.path.getmtime, reverse=True) def _remove_old_backups(self):
codereview_python_data_1275
functools.partial(self._on_title_changed, tab)) tab.icon_changed.connect( functools.partial(self._on_icon_changed, tab)) - tab.search_match_changed.connect( - functools.partial(self._on_search_match_changed, tab)) tab.pinned_changed.connect( functools.partial(self._on_pinned_changed, tab)) tab.load_progress.connect( This should go through `self._filter` like above, so that it ensures that only search match changes from the current tab are processed. Also, the signal should be named `cur_search_match_changed` then. functools.partial(self._on_title_changed, tab)) tab.icon_changed.connect( functools.partial(self._on_icon_changed, tab)) tab.pinned_changed.connect( functools.partial(self._on_pinned_changed, tab)) tab.load_progress.connect(
codereview_python_data_1285
async def test_constraints_ddl_07(self): await self.con.execute(""" SET MODULE test; - CREATE TYPE Test { CREATE PROPERTY first_name -> str; CREATE PROPERTY last_name -> str; CREATE CONSTRAINT exclusive on (__subject__.first_name); Make sure that only `SINGLE` pointers are allowed, and test constraints on links too. async def test_constraints_ddl_07(self): await self.con.execute(""" SET MODULE test; + CREATE TYPE ObjCnstr { CREATE PROPERTY first_name -> str; CREATE PROPERTY last_name -> str; CREATE CONSTRAINT exclusive on (__subject__.first_name);
codereview_python_data_1287
n_tags = potentials.shape[-1] transition_params = tf.random.normal([n_tags, n_tags]) - backpointers, last_score = text.crf_decode_forward( inputs, initial_state, transition_params, sequence_length_less_one ) Since the variable `last_score` is not used in the context. The standard way to handle it is to name this variable `_`. n_tags = potentials.shape[-1] transition_params = tf.random.normal([n_tags, n_tags]) + backpointers, _ = text.crf_decode_forward( inputs, initial_state, transition_params, sequence_length_less_one )
codereview_python_data_1296
def asnumpy(a): return a.cpu().numpy() - -def reduce_sum(a): - if isinstance(a, list): - return sum(a) - elif isinstance(a, Tensor): - return torch.sum(a, 0, keepdim=True) - else: - raise Exception("reduce_sum only supports input of type Tensor or list of Tensor") These functions are only intended for system usage. Where do we need `reduce_sum`? def asnumpy(a): return a.cpu().numpy()
codereview_python_data_1298
f'revision {last_tested_revision}.') return fuzz_target = testcase.get_fuzz_target() if fuzz_target: fuzz_target_name = fuzz_target.binary Do we need to refetch testcase to avoid race updates. Same for some branches like line 88, line 81 f'revision {last_tested_revision}.') return + # TODO(ochang): Record fuzz target. fuzz_target = testcase.get_fuzz_target() if fuzz_target: fuzz_target_name = fuzz_target.binary
codereview_python_data_1306
booster.best_iteration = earlyStopException.best_iteration + 1 evaluation_result_list = earlyStopException.best_score break - booster._reverse_update_params() booster.best_score = collections.defaultdict(dict) for dataset_name, eval_name, score, _ in evaluation_result_list: booster.best_score[dataset_name][eval_name] = score this seems cannot solve the problem. for example, when constructing Booster fails, this line will not be called. So the parameters cannot be reverted. booster.best_iteration = earlyStopException.best_iteration + 1 evaluation_result_list = earlyStopException.best_score break booster.best_score = collections.defaultdict(dict) for dataset_name, eval_name, score, _ in evaluation_result_list: booster.best_score[dataset_name][eval_name] = score
codereview_python_data_1312
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4435-SEA 1645523407 485524714</p> <hr> <p>Varnish cache server</p> </body> Can you please rewrite this `return` in a different way so that it's easier for our ide debugger to step through? ``` if (scan_time - created_time).days > self.key_max_age: return True return False ``` <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4423-SEA 1645523407 4113993721</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_1314
"""Check whether the anchors are inside the border Args: - flat_anchors (torch.Tensor): Flatten anchors - valid_flags (torch.Tensor): An existing valid flags of anchors - img_shape (tuple(int)): Shape of current image allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. For tensors, it is better to illustrate the shape. """Check whether the anchors are inside the border Args: + flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). + valid_flags (torch.Tensor): An existing valid flags of anchors. + img_shape (tuple(int)): Shape of current image. allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0.
codereview_python_data_1340
gi.from_networkx(graph_data) return gi -def _map_to_subgraph_nid(gi, v): - return _CAPI_DGLGraphSubgraphMapVFromParent(gi._handle, v) - _init_api("dgl.graph_index") You can directly put in `DGLSubGraph`. It is not necessary that all the CAPI calls should be in GraphIndex. gi.from_networkx(graph_data) return gi _init_api("dgl.graph_index")
codereview_python_data_1341
class Brabex(ExchangeBase): async def get_rates(self, ccy): - json = await self.get_json('exchange.brabex.com.br', 'api/v1/BRL/ticker?crypto_currency=BTC') return {'BRL': Decimal(json['last'])} ```suggestion json = await self.get_json('exchange.brabex.com.br', '/api/v1/BRL/ticker?crypto_currency=BTC') ``` class Brabex(ExchangeBase): async def get_rates(self, ccy): + json = await self.get_json('exchange.brabex.com.br', '/api/v1/BRL/ticker?crypto_currency=BTC') return {'BRL': Decimal(json['last'])}
codereview_python_data_1347
if (((len(plot) == 1 and not plot.dynamic) or (len(plot) > 1 and self.holomap is None) or (plot.dynamic and len(plot.keys[0]) == 0)) or - not unbound_dimensions(plot.streams, plot.dimensions, False)): fmt = fig_formats[0] if self.fig=='auto' else self.fig else: fmt = holomap_formats[0] if self.holomap=='auto' else self.holomap Again, `no_duplicates=False` would be clearer here... if (((len(plot) == 1 and not plot.dynamic) or (len(plot) > 1 and self.holomap is None) or (plot.dynamic and len(plot.keys[0]) == 0)) or + not unbound_dimensions(plot.streams, plot.dimensions, no_duplicates=False)): fmt = fig_formats[0] if self.fig=='auto' else self.fig else: fmt = holomap_formats[0] if self.holomap=='auto' else self.holomap
codereview_python_data_1348
label = create_method_name(request.label[:40]) elif isinstance(request, SetVariables): body = self._gen_set_vars(request) - label = "set_variables" else: return If user sets label himself - can we use it? label = create_method_name(request.label[:40]) elif isinstance(request, SetVariables): body = self._gen_set_vars(request) + label = request.config.get("label", "set_variables") else: return
codereview_python_data_1354
code.putln('{') all_args = tuple(positional_args) + tuple(kw_only_args) non_posonly_args = [arg for arg in all_args if not arg.pos_only] - do_generate_kw_unpacking = bool(non_posonly_args) or self.starstar_arg - if do_generate_kw_unpacking: - code.putln("static PyObject **%s[] = {%s};" % ( - Naming.pykwdlist_cname, - ','.join(['&%s' % code.intern_identifier(arg.name) - for arg in non_posonly_args] + ['0']))) # Before being converted and assigned to the target variables, # borrowed references to all unpacked argument values are If you care about a `bool` here, then the whole expression on the right side should be wrapped in `bool( )`. code.putln('{') all_args = tuple(positional_args) + tuple(kw_only_args) non_posonly_args = [arg for arg in all_args if not arg.pos_only] + code.putln("static PyObject **%s[] = {%s};" % ( + Naming.pykwdlist_cname, + ','.join(['&%s' % code.intern_identifier(arg.name) + for arg in non_posonly_args] + ['0']))) # Before being converted and assigned to the target variables, # borrowed references to all unpacked argument values are
codereview_python_data_1355
from google.cloud.forseti.services.utils import to_full_resource_name from google.cloud.forseti.services import db from google.cloud.forseti.services.utils import get_sql_dialect -from google.cloud.forseti.common.util import log_util -LOGGER = log_util.get_logger(__name__) POOL_RECYCLE_SECONDS = 300 PER_YIELD = 1024 Please add a newline before `Returns` from google.cloud.forseti.services.utils import to_full_resource_name from google.cloud.forseti.services import db from google.cloud.forseti.services.utils import get_sql_dialect +from google.cloud.forseti.common.util import logger + +LOGGER = logger.get_logger(__name__) POOL_RECYCLE_SECONDS = 300 PER_YIELD = 1024
codereview_python_data_1365
def gen_parameter_code( config_hpp: Path, config_out_cpp: Path -) -> Tuple[List, List]: """Generate auto config file. Parameters ```suggestion ) -> Tuple[List[Tuple[str, int]], List[List[Dict[str, Any]]]]: ``` def gen_parameter_code( config_hpp: Path, config_out_cpp: Path +) -> Tuple[List[Tuple[str, int]], List[List[Dict[str, Any]]]]: """Generate auto config file. Parameters
codereview_python_data_1367
add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, message=msg, log_event=False) - - logger.periodic_info(logger.EVERY_HALF_DAY, "[PERIODIC] Incarnation: {0}; ContainerId: {2}", - incarnation, self.counter, get_container_id_from_env()) self.counter += 1 - io_errors = IOErrorCounter.get_and_reset() hostplugin_errors = io_errors.get("hostplugin") protocol_errors = io_errors.get("protocol") Let's add more context to the message (what is it?) Let's not include the container ID (I'm even tempted to remove the incarnation from the heartbeat if we are not using it), we are still working on those calls to update_goal_state. If we do the heartbeat on the main thread (extension handling) then I do not have concerns about this. add_event(name=AGENT_NAME, version=CURRENT_VERSION, op=WALAEventOperation.HeartBeat, is_success=True, message=msg, log_event=False) self.counter += 1 io_errors = IOErrorCounter.get_and_reset() hostplugin_errors = io_errors.get("hostplugin") protocol_errors = io_errors.get("protocol")
codereview_python_data_1369
axis = self._dataframe._get_axis_number(axis) # FIXME: this should be converted into a dict to ensure simplicity # of handling resample parameters at the query compiler level. - self.resample_args = { "rule": rule, "axis": axis, "closed": closed, pls rename the attribute to be consistent to `self.resample_kwargs` axis = self._dataframe._get_axis_number(axis) # FIXME: this should be converted into a dict to ensure simplicity # of handling resample parameters at the query compiler level. + self.resample_kwargs = { "rule": rule, "axis": axis, "closed": closed,
codereview_python_data_1371
msg = "File %s exceeds maximum size quota of %s and won't be included into upload" self.log.warning(msg, filename, max_file_size) - for filename in logs: zfh.write(filename, os.path.basename(filename)) return mfile def __upload_artifacts(self): """ - If token provided, upload artifacts folder contents and jmeter_log - else: jmeter_log only :return: """ if self.client.token: remove this todo, it's irrelevant now msg = "File %s exceeds maximum size quota of %s and won't be included into upload" self.log.warning(msg, filename, max_file_size) + for filename in logs: # upload logs unconditionally zfh.write(filename, os.path.basename(filename)) return mfile def __upload_artifacts(self): """ + If token provided, upload artifacts folder contents and bzt.log + :return: """ if self.client.token:
codereview_python_data_1373
"It seems as though you've passed an incompable object type!" "Please check the type being passed again" ) except ValueError: six.raise_from( There is a missing `)` here, this is why tests fail. "It seems as though you've passed an incompable object type!" "Please check the type being passed again" ) + ) except ValueError: six.raise_from(
codereview_python_data_1375
inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 - bin_label_weights = label_weights.view(-1, 1).expand( - label_weights.size(0), label_channels) return bin_labels, bin_label_weights This function cannot be deleted since it is used by other methods. inds = torch.nonzero(labels >= 1).squeeze() if inds.numel() > 0: bin_labels[inds, labels[inds] - 1] = 1 + bin_label_weights = label_weights.view(-1, + 1).expand(label_weights.size(0), + label_channels) return bin_labels, bin_label_weights
codereview_python_data_1376
super(NdWidget, self).__init__(**params) self.id = plot.comm.target if plot.comm else uuid.uuid4().hex self.plot = plot - dims, keys = drop_streams(plot.streams, plot.keys, plot.dimensions) - self.dimensions, self.keys = dims, keys self.json_data = {} if self.plot.dynamic: self.embed = False if renderer is None: As the variables `dims` and `keys` don't seem to be used in this method for anything other than setting `self.dimensions` and `self.keys`, I would consider using: ``` python self.dimensions, self.keys = drop_streams(plot.streams, plot.keys, plot.dimensions) ``` super(NdWidget, self).__init__(**params) self.id = plot.comm.target if plot.comm else uuid.uuid4().hex self.plot = plot + self.dimensions, self.keys = drop_streams(plot.streams, + plot.dimensions, + plot.keys) + self.json_data = {} if self.plot.dynamic: self.embed = False if renderer is None:
codereview_python_data_1381
Parameters ---------- nbunch : single node, container, or all nodes (default= all nodes) - The view will only report edges from these nodes (outgoing if directed). data : string or bool, optional (default=False) The edge attribute returned in 3-tuple (u, v, ddict[data]). If True, return edge attribute dict in 3-tuple (u, v, ddict). The "(outgoing if directed)" seems unnecessary here, since this is in the docstring for the DiGraph class (so "if directed" will always be true). Parameters ---------- nbunch : single node, container, or all nodes (default= all nodes) + The view will only report edges from these nodes. data : string or bool, optional (default=False) The edge attribute returned in 3-tuple (u, v, ddict[data]). If True, return edge attribute dict in 3-tuple (u, v, ddict).
codereview_python_data_1382
"""Initialize sids and other state variables. :Arguments: - granularity : str (daily, hourly or minutely) The duration of the bars. annualizer : int <optional> Which constant to use for annualizing risk metrics. - If not provided, will extract from granularity. capital_base : float <default: 1.0e5> How much capital to start with. """ Just making sure I'm reading this correctly, the annualizer is in an integer in the units of the granularity. """Initialize sids and other state variables. :Arguments: + data_frequency : str (daily, hourly or minutely) The duration of the bars. annualizer : int <optional> Which constant to use for annualizing risk metrics. + If not provided, will extract from data_frequency. capital_base : float <default: 1.0e5> How much capital to start with. """