id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_12671
'service': 'admin.googleapis.com'}, {'name': 'AppEngine Admin', 'service': 'appengine.googleapis.com'}, {'name': 'Cloud Resource Manager', 'service': 'cloudresourcemanager.googleapis.com'}, {'name': 'Cloud SQL', bigquery API needs to be added. bigquery-json.googleapis.com. 'service': 'admin.googleapis.com'}, {'name': 'AppEngine Admin', 'service': 'appengine.googleapis.com'}, + {'name': 'BigQuery', + 'service': 'bigquery-json.googleapis.com'}, {'name': 'Cloud Resource Manager', 'service': 'cloudresourcemanager.googleapis.com'}, {'name': 'Cloud SQL',
codereview_python_data_12675
def can_double_down(card_one, card_two): """Determine if a blackjack player can place a double down bet. - :param card_one, card_two: str - first and second cards dealt. :return: bool - if the hand can be doubled down (i.e. totals 9, 10 or 11 points). """ ```suggestion :param card_one, card_two: str - first and second cards in hand. ``` def can_double_down(card_one, card_two): """Determine if a blackjack player can place a double down bet. + :param card_one, card_two: str - first and second cards in hand. :return: bool - if the hand can be doubled down (i.e. totals 9, 10 or 11 points). """
codereview_python_data_12680
-_base_ = [ - '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' -] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskFormer', Can we make this config inherit from the config of r50? +_base_ = ['./maskformer_r50_mstrain_64x1_300e_coco.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskFormer',
codereview_python_data_12687
# mode: run # ticket: 5 -# Some more tests for the same sort of thing are in "methodmangling_T1382" class CyTest(object): """ " and the reason why there are two test files is " (It's nice to not have to think too much about where a new test should go.) # mode: run # ticket: 5 +# A small number of extra tests checking: +# 1) this works correctly with pure-Python-mode decorators - methodmangling_pure.py. +# 2) this works correctly with cdef classes - methodmangling_cdef.pyx class CyTest(object): """
codereview_python_data_12688
warn(message) print(final_message) -def load_data(path:PathOrStr, file:PathLikeOrBinaryStream= 'data_save.pkl', bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False, **kwargs)->DataBunch: - """Load from `path/file` a saved `DataBunch`. - If `file` is a binary stream (file or buffer), read from it (`path` is still required to set the working directory).""" source = Path(path)/file if is_pathlike(file) else file ll = torch.load(source, map_location='cpu') if defaults.device == torch.device('cpu') else torch.load(source) return ll.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, dl_tfms=dl_tfms, device=device, fastai style is only one shortish line "about" docstring with the rest of the details inside the doc notebooks. warn(message) print(final_message) +def load_data(path:PathOrStr, file:PathLikeOrBinaryStream='data_save.pkl', bs:int=64, val_bs:int=None, num_workers:int=defaults.cpus, dl_tfms:Optional[Collection[Callable]]=None, device:torch.device=None, collate_fn:Callable=data_collate, no_check:bool=False, **kwargs)->DataBunch: + "Load a saved `DataBunch` from `path/file`. `file` can be file-like (file or buffer)" source = Path(path)/file if is_pathlike(file) else file ll = torch.load(source, map_location='cpu') if defaults.device == torch.device('cpu') else torch.load(source) return ll.databunch(path=path, bs=bs, val_bs=val_bs, num_workers=num_workers, dl_tfms=dl_tfms, device=device,
codereview_python_data_12694
.. autofunction:: Merge """ import errno import numpy as np import logging import copy import uuid -import os -import six import MDAnalysis import sys Could you move the import of `six` as the first import, and skip a line after it? I like having the compatibility import first to easily now what behaviors are being changed. .. autofunction:: Merge """ +import six + import errno import numpy as np import logging import copy import uuid import MDAnalysis import sys
codereview_python_data_12698
assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero' -@pytest.mark.parametrize(['num_sample', 'num_batch'], [[2, 2], [2, 0], [2, 0]]) def test_bbox_head_get_bboxes(num_sample, num_batch): self = BBoxHead(reg_class_agnostic=True) add a num_sample is 0 and num_sample, num_batch both are 0. assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero' +@pytest.mark.parametrize(['num_sample', 'num_batch'], [[2, 2], [0, 2], [0, 0]]) def test_bbox_head_get_bboxes(num_sample, num_batch): self = BBoxHead(reg_class_agnostic=True)
codereview_python_data_12705
['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] """ # alternative implementation that calculates the matrix exponential - import numpy import scipy.linalg - nodelist = list(G) # ordering of nodes in array - A = nx.to_numpy_recarray(G,nodelist,dtype=numpy.dtype(int)) - # convert to 0-1 array A[A!=0.0] = 1 - expA = scipy.linalg.expm(A) # convert diagonal to dictionary keyed by node sc = dict(zip(nodelist,map(float,expA.diagonal()))) return sc A simpler fix is to just use `scipy.linalg.expm(A.A)` or `scipy.linalg.expm(scipy.asarray(A))` ['1 3.90', '2 3.90', '3 3.64', '4 3.71', '5 3.64', '6 3.71', '7 3.64', '8 3.90'] """ # alternative implementation that calculates the matrix exponential import scipy.linalg + nodelist = list(G) # ordering of nodes in matrix + A = nx.to_numpy_matrix(G,nodelist) + # convert to 0-1 matrix A[A!=0.0] = 1 + expA = scipy.linalg.expm(A.A) # convert diagonal to dictionary keyed by node sc = dict(zip(nodelist,map(float,expA.diagonal()))) return sc
codereview_python_data_12709
# See the License for the specific language governing permissions and # limitations under the License. -"""Email utility module.""" # The pre-commit linter will complain about useless disable of no-member, but # this is needed because quiet the Sendgrid no-member error on Travis. Please make this more clear that this is the mailjet module, rather than utility module. # See the License for the specific language governing permissions and # limitations under the License. +"""Mailjet email connector module.""" # The pre-commit linter will complain about useless disable of no-member, but # this is needed because quiet the Sendgrid no-member error on Travis.
codereview_python_data_12711
is_internal=False, log_event=True): - if not is_success and log_event: _log_event(name, op, message, duration, is_success=is_success) self._add_event(duration, evt_type, is_internal, is_success, message, name, op, version, eventId=1) Consider adding parentheses to make precedence clear without requiring the reader to look up whether `not` or `and` binds more tightly. is_internal=False, log_event=True): + if (not is_success) and log_event: _log_event(name, op, message, duration, is_success=is_success) self._add_event(duration, evt_type, is_internal, is_success, message, name, op, version, eventId=1)
codereview_python_data_12716
from plaso.parsers import amcache from plaso.parsers import android_app_usage from plaso.parsers import apache_access -from plaso.parsers import apthistory from plaso.parsers import bash_history from plaso.parsers import bencode_parser from plaso.parsers import bsm Please rename to "apt_history" - and associated files. See "bash_history" for a similarly named parser/formatter etc. from plaso.parsers import amcache from plaso.parsers import android_app_usage from plaso.parsers import apache_access +from plaso.parsers import apt_history from plaso.parsers import bash_history from plaso.parsers import bencode_parser from plaso.parsers import bsm
codereview_python_data_12717
return 'Removed %s host group' % group[0]['name'] async def delete_agent(self, id): - agent = await self.dao.get('core_agent', dict(id=id)) await self.dao.delete('core_agent', data=dict(id=id)) await self.dao.delete('core_group_map', data=dict(agent_id=id)) - return 'Removed %s agent' % agent i don't think we need to get the agent here; we're only using the ID, which is passed in, which we should have delete ability on. i could see an argument for doing it for validity-sake, but i don't see much of a value here-- if anything, i would have the delete always check for validity before allowing a delete (but again, don't think it's worth it) return 'Removed %s host group' % group[0]['name'] async def delete_agent(self, id): await self.dao.delete('core_agent', data=dict(id=id)) await self.dao.delete('core_group_map', data=dict(agent_id=id)) + return 'Removed agent id: %s' % id
codereview_python_data_12718
a = M.sum(axis=0) b = M.sum(axis=1) vara = (a[idx] * x ** 2).sum() - ((a[idx] * x).sum()) ** 2 - varb = (b[idx] * x ** 2).sum() - ((b[idx] * x).sum()) ** 2 xy = np.outer(x, y) ab = np.outer(a[idx], b[idx]) return (xy * (M - ab)).sum() / np.sqrt(vara * varb) I know the values are the same, but could you change "x" to "y" in "varb"? This also makes me wonder if the mixing matrix for a directed graph ought to have in_degree for rows and out_degree for columns. But I think that is beyond this issue. It might be a topic of a whole paper on mixing over graphs (if it hasn't been written already). a = M.sum(axis=0) b = M.sum(axis=1) vara = (a[idx] * x ** 2).sum() - ((a[idx] * x).sum()) ** 2 + varb = (b[idx] * y ** 2).sum() - ((b[idx] * y).sum()) ** 2 xy = np.outer(x, y) ab = np.outer(a[idx], b[idx]) return (xy * (M - ab)).sum() / np.sqrt(vara * varb)
codereview_python_data_12719
return start_moto_server('acm', port, name='ACM', asynchronous=asynchronous) -def start_ses(port=None, asynchronous=False, update_listener=None): - port = port or config.PORT_SES - return start_moto_server('ses', port, name='SES', asynchronous=asynchronous, update_listener=update_listener) - - # TODO move to es_starter.py? def start_elasticsearch_service(port=None, asynchronous=False): port = port or config.PORT_ES I think we can remove this function, right? (duplicate with `ses_starter.py`) return start_moto_server('acm', port, name='ACM', asynchronous=asynchronous) # TODO move to es_starter.py? def start_elasticsearch_service(port=None, asynchronous=False): port = port or config.PORT_ES
codereview_python_data_12721
self.annotations[name] = value def ast_ignore_ownership(self) -> bool: - """Whether to force something into the AST even though it is owned""" return False Did you mean "not owned" here? self.annotations[name] = value def ast_ignore_ownership(self) -> bool: + """Whether to force generating an AST even though it isn't owned""" return False
codereview_python_data_12722
class GroupDao(dao.Dao): - """Data access object (DAO) for Organizations.""" def __init__(self): super(GroupDao, self).__init__() DAO for Groups class GroupDao(dao.Dao): + """Data access object (DAO) for Groups.""" def __init__(self): super(GroupDao, self).__init__()
codereview_python_data_12723
f"'{text_non_ascii_str}' != '{ref_text_non_ascii_literal[idx]}'" -alias_batch_size=64 -@pipeline_def(batch_size=alias_batch_size, device_id=0, num_threads=4) def nemo_pipe(nemo_op, path, read_text, read_sample_rate, dtype, downmix): if read_sample_rate: audio, sr = nemo_op(manifest_filepaths=path, read_sample_rate=read_sample_rate, Nitpick: this `alias` doesn't look right here... f"'{text_non_ascii_str}' != '{ref_text_non_ascii_literal[idx]}'" +batch_size_alias_test=64 +@pipeline_def(batch_size=batch_size_alias_test, device_id=0, num_threads=4) def nemo_pipe(nemo_op, path, read_text, read_sample_rate, dtype, downmix): if read_sample_rate: audio, sr = nemo_op(manifest_filepaths=path, read_sample_rate=read_sample_rate,
codereview_python_data_12724
json_data = [] with open(os.path.join( - self._destination_path, 'hashes.json'), 'w') as write_file: - for sha256, paths in self._report_output.items(): json_data.append({"sha256": sha256, "paths": paths}) json.dump(json_data, write_file) Please move the filename out to a class constant - _HASHES_FILENAME or similar. json_data = [] with open(os.path.join( + self._destination_path, self._HASHES_FILENAME), 'w') as write_file: + for sha256, paths in self._paths_by_hash.items(): json_data.append({"sha256": sha256, "paths": paths}) json.dump(json_data, write_file)
codereview_python_data_12725
fields_desc = [ ShortEnumField("type", 0x8002, sctpchunkparamtypes), FieldLenField("len", None, length_of="random", adjust = lambda pkt,x:x+4), - PadField(StrLenField("random", os.urandom(32), length_from=lambda pkt: pkt.len-4), 4, padwith=b"\x00"),] You probably want to replace `os.urandom(32)` with `RandBin(32)`, for at least two reasons: - `RandBin()` and other volatile values exist in Scapy for this purpose. - With your code, the random string would be generated when the module is loaded and will be the same for every packet within the same Scapy session or program execution. You would have to import `RandBin` from `scapy.volatile`, and remove the `import os`. What do you think? fields_desc = [ ShortEnumField("type", 0x8002, sctpchunkparamtypes), FieldLenField("len", None, length_of="random", adjust = lambda pkt,x:x+4), + PadField(StrLenField("random", RandBin(32), length_from=lambda pkt: pkt.len-4), 4, padwith=b"\x00"),]
codereview_python_data_12726
def is_lpm_fuzz_target(fuzzer_path): """Returns True if |fuzzer_path| is a libprotobuf-mutator based fuzz target.""" - with open(fuzzer_path) as fuzzer_handle: - return utils.search_string_in_file('TestOneProtoInput', fuzzer_handle) def get_issue_owners(fuzz_target_path): nit: maybe `file_handle` instead of `fuzzer_handle`? def is_lpm_fuzz_target(fuzzer_path): """Returns True if |fuzzer_path| is a libprotobuf-mutator based fuzz target.""" + with open(fuzzer_path) as file_handle: + return utils.search_string_in_file('TestOneProtoInput', file_handle) def get_issue_owners(fuzz_target_path):
codereview_python_data_12727
def get_names( - infos: List -) -> List: """Get names of all parameters. Parameters ```suggestion infos: List[List[Dict[str, Any]]] ) -> List[str]: ``` def get_names( + infos: List[List[Dict[str, Any]]] +) -> List[str]: """Get names of all parameters. Parameters
codereview_python_data_12728
from binary_search import binary_search -# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0 class BinarySearchTests(unittest.TestCase): def test_finds_value_in_array_with_one_element(self): Looks like the version number is wrong - the canonical data is version `1.0.0`. This like also need to have an extra blank line before it in order to pass the `flake8` tests for compliance with PEP8. from binary_search import binary_search +# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0 class BinarySearchTests(unittest.TestCase): def test_finds_value_in_array_with_one_element(self):
codereview_python_data_12732
violations_as_dict, notifier_configs.get('violation').get('findings').get('gcs_path')) - inv_summary_notify(inv_index_id, service_config) log_message = 'Notification completed!' progress_queue.put(log_message) progress_queue.put(None) The name of this helper method would be much better to start with a verb: `run_inv_summary()` or `send_inv_summary()` violations_as_dict, notifier_configs.get('violation').get('findings').get('gcs_path')) + run_inv_summary(inv_index_id, service_config) log_message = 'Notification completed!' progress_queue.put(log_message) progress_queue.put(None)
codereview_python_data_12741
-def is_paired(value): pass I think that `input_string` is more descriptive. To me, `value` is too generic. I would prefer that we leave this as-is. +def is_paired(input_string): pass
codereview_python_data_12742
'force': 'kcal/(mol*Angstrom)'} _Timestep = Timestep - def __init__(self, filename, n_atoms=None, **kwargs): - self._mmap = kwargs.pop('mmap', None) super(NCDFReader, self).__init__(filename, **kwargs) please be explicit about the `mmap` keyword in the function instead of poping from kwargs. This will also make it easier to spot the default value in an interpreter setting. 'force': 'kcal/(mol*Angstrom)'} _Timestep = Timestep + def __init__(self, filename, n_atoms=None, mmap=None, **kwargs): + self._mmap = mmap super(NCDFReader, self).__init__(filename, **kwargs)
codereview_python_data_12743
kwds['name'] = module_name - sources = [file] - sources += [m for m in template.sources if m != filepattern] if 'sources' in kwds: # allow users to add .c files etc. for source in kwds['sources']: Just put this into the line above. kwds['name'] = module_name + sources = [file] + [m for m in template.sources if m != filepattern] if 'sources' in kwds: # allow users to add .c files etc. for source in kwds['sources']:
codereview_python_data_12745
"""Retrieve terms for given source string and Locale.""" try: source_string = request.GET["source_string"] - locale = request.GET["locale"] except MultiValueDictKeyError as e: return JsonResponse( {"status": False, "message": "Bad Request: {error}".format(error=e)}, status=400, ) - locale = get_object_or_404(Locale, code=locale) payload = [] for term in Term.objects.for_string(source_string): Nit: I'd rename this to `locale_code` to avoid having the `locale` variable contain two wildly different types of values. *(Gosh look at me I'm becoming a TypeScript developer! :scream: )* """Retrieve terms for given source string and Locale.""" try: source_string = request.GET["source_string"] + locale_code = request.GET["locale] except MultiValueDictKeyError as e: return JsonResponse( {"status": False, "message": "Bad Request: {error}".format(error=e)}, status=400, ) + locale = get_object_or_404(Locale, code=locale_code) payload = [] for term in Term.objects.for_string(source_string):
codereview_python_data_12751
valid_values=self.valid_values, minlen=self.minlen, maxlen=self.maxlen, forbidden=self.forbidden, - _completions=self._completions, encoding=self.encoding) This should probably be `completions` - while it's saved as `self._completions`, it's passed to the constructor as simply `completions`. valid_values=self.valid_values, minlen=self.minlen, maxlen=self.maxlen, forbidden=self.forbidden, + completions=self._completions, encoding=self.encoding)
codereview_python_data_12753
p = p[:1] + struct.pack(">B", res[1]) + p[2:] if res[0] != 0: p = p[:-res[0]] - print(repr(p)) return p + pay That's probably a debug print(). You can remove it. p = p[:1] + struct.pack(">B", res[1]) + p[2:] if res[0] != 0: p = p[:-res[0]] return p + pay
codereview_python_data_12756
if 'Range' not in headers: return - if response.status_code == 404: return s3_client = aws_stack.connect_to_service('s3') nitpick: Should we return here for any type of error status code? ``` if response.status_code >= 400: ``` if 'Range' not in headers: return + if response.status_code >= 400: return s3_client = aws_stack.connect_to_service('s3')
codereview_python_data_12760
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4454-SEA 1645543197 2149529969</p> <hr> <p>Varnish cache server</p> </body> I think this could become `min(extract(year from to_timestamp(listened_at))::INT) = %s` and then the array_agg in select cause could become just a `count(*)`. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4475-SEA 1645543197 93235762</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_12769
to=settings.PROJECT_MANAGERS, cc=locale.managers_group.user_set.exclude(pk=user.pk) .values_list('email', flat=True) if locale else '', ).send() else: raise ImproperlyConfigured( What's the purpose of this change? to=settings.PROJECT_MANAGERS, cc=locale.managers_group.user_set.exclude(pk=user.pk) .values_list('email', flat=True) if locale else '', + reply_to=[user.email], ).send() else: raise ImproperlyConfigured(
codereview_python_data_12771
origin = req.identifier op = req.operation - s, reason = self.canNymRequestBeProcessed(origin, op) - if not s: - raise InvalidClientRequest(origin, req.reqId, reason) - try: originRole = self.idrCache.getRole( origin, isCommitted=False) or None Do we need this call at all? Doesn't `_validateExistingNym` already do similar validation based on `Authoriser`? origin = req.identifier op = req.operation try: originRole = self.idrCache.getRole( origin, isCommitted=False) or None
codereview_python_data_12786
code.putln("new((void*)&(p->%s)) %s();" % ( entry.cname, decl_code)) - if scope.is_internal == 2 and py_attrs: # create a hybrid "optional initialization" where the kwds # argument is used to signal that initialization happens. # This is needed for pickleable closures, where the attributes I have a feeling that `is_x` should only be `True` or `False` and if we want to represent more states then it should not be in `is_x` pattern code.putln("new((void*)&(p->%s)) %s();" % ( entry.cname, decl_code)) + if scope.internal_mode == 2 and py_attrs: # create a hybrid "optional initialization" where the kwds # argument is used to signal that initialization happens. # This is needed for pickleable closures, where the attributes
codereview_python_data_12792
# coding: utf-8 # pylint: disable = C0103 """Plotting library.""" -from __future__ import absolute_import -from __future__ import division import warnings from copy import deepcopy Please make it one-line. ``` from __future__ import absolute_import, division ``` # coding: utf-8 # pylint: disable = C0103 """Plotting library.""" +from __future__ import absolute_import, division import warnings from copy import deepcopy
codereview_python_data_12794
else: cmd = compose_cmd(['dpkg', '--compare-versions', v1, 'gt', v2]) try: - NodeControlUtil.run_shell_script(cmd) except ShellError as exc: if exc.stderr: raise Why do we need this? else: cmd = compose_cmd(['dpkg', '--compare-versions', v1, 'gt', v2]) try: + NodeControlUtil.run_shell_script_extended(cmd) except ShellError as exc: if exc.stderr: raise
codereview_python_data_12798
np.random.seed(0) image1 = np.random.randint(0, 255, (3, 5, 5), np.uint8) image2 = np.random.randint(0, 255, (3, 5, 5), np.uint8) factor = tf.random.uniform(shape=[], maxval=1, dtype=tf.dtypes.float32, seed=0) blended = compose_ops.blend( tf.convert_to_tensor(image1), tf.convert_to_tensor(image2), factor Need to seed `tf` as well. np.random.seed(0) image1 = np.random.randint(0, 255, (3, 5, 5), np.uint8) image2 = np.random.randint(0, 255, (3, 5, 5), np.uint8) + tf.random.set_seed(0) factor = tf.random.uniform(shape=[], maxval=1, dtype=tf.dtypes.float32, seed=0) blended = compose_ops.blend( tf.convert_to_tensor(image1), tf.convert_to_tensor(image2), factor
codereview_python_data_12804
if self.add_extra_convs == 'on_input': orig = inputs[self.backbone_end_level - 1] outs.append(self.fpn_convs[used_backbone_levels](orig)) elif self.add_extra_convs == 'on_output': outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) else: `on_lateral` can also be implemented if self.add_extra_convs == 'on_input': orig = inputs[self.backbone_end_level - 1] outs.append(self.fpn_convs[used_backbone_levels](orig)) + elif self.add_extra_convs == 'on_lateral': + outs.append(self.fpn_convs[used_backbone_levels](laterals[-1])) elif self.add_extra_convs == 'on_output': outs.append(self.fpn_convs[used_backbone_levels](outs[-1])) else:
codereview_python_data_12810
check_parameters_default_constructible(name, estimator) check_no_fit_attributes_set_in_init(name, estimator) # we cannot leave default params (see https://github.com/Microsoft/LightGBM/issues/833) - estimator = estimator(min_data=1, min_data_in_bin=1) for check in _yield_all_checks(name, estimator): if check.__name__ == 'check_estimators_nan_inf': continue # skip test because LightGBM deals with nan Can you try`min_child_samples=1`, not `min_data=1`? check_parameters_default_constructible(name, estimator) check_no_fit_attributes_set_in_init(name, estimator) # we cannot leave default params (see https://github.com/Microsoft/LightGBM/issues/833) + estimator = estimator(min_child_samples=1, min_data_in_bin=1) for check in _yield_all_checks(name, estimator): if check.__name__ == 'check_estimators_nan_inf': continue # skip test because LightGBM deals with nan
codereview_python_data_12819
def get_ids(obj): """ - Returns a list of all ids in the supplied object. - Useful for determining if a json representation contains - references to other objects. """ ids = [] if isinstance(obj, list): Might be worth mentioning why this knowing if the JSON has references is useful information (if it can be summarized in a simple way). def get_ids(obj): """ + Returns a list of all ids in the supplied object. Useful for + determining if a json representation contains references to other + objects. Since only the references between objects are required + this allows determining whether a particular branch of the json + representation is required. """ ids = [] if isinstance(obj, list):
codereview_python_data_12821
`gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and `gt_masks_ignore`. - - If the crop is not located in any bbox and `allow_negative_crop` is - set to False, skip this image. """ def __init__(self, crop_size, allow_negative_crop=False): The description `the crop is not located in any bbox` is unclear. `gt_bboxes` corresponds to `gt_labels` and `gt_masks`, and `gt_bboxes_ignore` corresponds to `gt_labels_ignore` and `gt_masks_ignore`. + - If the crop does not contain any gt-bbox region and + `allow_negative_crop` is set to False, skip this image. """ def __init__(self, crop_size, allow_negative_crop=False):
codereview_python_data_12830
fuzzer_path)): fuzzing_strategies.append(strategy.RECOMMENDED_DICTIONARY_STRATEGY.name) - # Entropic isn't compatible with focus function. - if (strategy_pool.do_strategy(strategy.ENTROPIC_STRATEGY) and not any( - [arg.startswith(constants.FOCUS_FUNCTION_FLAG) for arg in arguments])): - arguments.append(constants.ENTROPIC_ARGUMENT) - fuzzing_strategies.append(strategy.ENTROPIC_STRATEGY.name) - if strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY): arguments.append(constants.VALUE_PROFILE_ARGUMENT) fuzzing_strategies.append(strategy.VALUE_PROFILE_STRATEGY.name) Should we prioritize dataflow over entropic or entropic over dataflow. i think entropic over dataflow, so move the block up and then check entropic not in any arguments in dataflow check ? if does not matter, skip. fuzzer_path)): fuzzing_strategies.append(strategy.RECOMMENDED_DICTIONARY_STRATEGY.name) if strategy_pool.do_strategy(strategy.VALUE_PROFILE_STRATEGY): arguments.append(constants.VALUE_PROFILE_ARGUMENT) fuzzing_strategies.append(strategy.VALUE_PROFILE_STRATEGY.name)
codereview_python_data_12835
for rel in rel_names }) - if weight: - self.basis = dglnn.WeightBasis(in_feat, out_feat, num_bases, len(self.rel_names)) - else: - self.basis = None # bias if bias: I don't like this. This is more like a hack. for rel in rel_names }) + self.use_weight = weight + self.use_basis = num_bases >= len(self.rel_names) and weight + if self.use_weight: + if self.use_basis: + self.basis = dglnn.WeightBasis(in_feat, out_feat, num_bases, len(self.rel_names)) + else: + self.weight = nn.Parameter(th.Tensor(len(self.rel_names), in_feat * out_feat)) # bias if bias:
codereview_python_data_12838
import time import base64 import requests don't rename this import import time import base64 +from typing import Sequence import requests
codereview_python_data_12841
# Always record the affected head version. start_revision, end_revision = get_start_and_end_revision( testcase.regression, testcase.job_type) - build_revision_mappings = revisions.get_build_to_revision_mappings(None) impacts.head = get_head_impact(build_revision_mappings, start_revision, end_revision) no need to pass `None` explicitly here since that's the default. # Always record the affected head version. start_revision, end_revision = get_start_and_end_revision( testcase.regression, testcase.job_type) + build_revision_mappings = revisions.get_build_to_revision_mappings() impacts.head = get_head_impact(build_revision_mappings, start_revision, end_revision)
codereview_python_data_12852
Returns Helper function which handles potential transpose. """ - if self._is_transposed: - - def helper(df, internal_indices=[]): - return pandas_func(df, **kwargs) - - else: - - def helper(df, internal_indices=[]): - return pandas_func(df, **kwargs) return helper you can remove the `if...else` here Returns Helper function which handles potential transpose. """ + def helper(df, internal_indices=[]): + return pandas_func(df, **kwargs) return helper
codereview_python_data_12853
for cname, type, manage_ref in code.funcstate.temps_in_use(): save_cname = code.funcstate.closure_temps.allocate_temp(type) saved.append((cname, save_cname, type)) - code.put_xgiveref(cname, type) if type.is_cpp_class: cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % cname code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname)) code.put_xgiveref(Naming.retval_cname, py_object_type) These two cases are exclusive. ```suggestion if type.is_cpp_class: cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % cname else: code.put_xgiveref(cname, type) ``` for cname, type, manage_ref in code.funcstate.temps_in_use(): save_cname = code.funcstate.closure_temps.allocate_temp(type) saved.append((cname, save_cname, type)) if type.is_cpp_class: + code.globalstate.use_utility_code( + UtilityCode.load_cached("MoveIfSupported", "CppSupport.cpp")) cname = "__PYX_STD_MOVE_IF_SUPPORTED(%s)" % cname + else: + code.put_xgiveref(cname, type) code.putln('%s->%s = %s;' % (Naming.cur_scope_cname, save_cname, cname)) code.put_xgiveref(Naming.retval_cname, py_object_type)
codereview_python_data_12857
class MemoryCgroup(CGroup): - def __init__(self, extension_name, cgroup_path, controller="memory"): """ - Initialize data collection for the Memory hierarchy :param CGroupsTelemetry cgt: The telemetry object for which memory metrics should be collected :return: """ - super(MemoryCgroup, self).__init__(extension_name, cgroup_path, controller) def _get_memory_usage(self): """ Is there a reason you are not using `self.controller` instead of `'memory'` like in L224? class MemoryCgroup(CGroup): + def __init__(self, name, cgroup_path, controller="memory"): """ + Initialize _data collection for the Memory hierarchy :param CGroupsTelemetry cgt: The telemetry object for which memory metrics should be collected :return: """ + super(MemoryCgroup, self).__init__(name, cgroup_path, controller) def _get_memory_usage(self): """
codereview_python_data_12858
elif irtyputils.is_collection(styperef) and not expr.ser_safe: val = coll_as_json_object(expr, styperef=styperef, env=env) elif ( irtyputils.is_bytes(styperef) and not expr.ser_safe I'd add a TODO: note here to remind us to find a way to generalize this, since `bytes` wouldn't be the last scalar with a custom JSON cast. elif irtyputils.is_collection(styperef) and not expr.ser_safe: val = coll_as_json_object(expr, styperef=styperef, env=env) + # TODO: We'll probably want to generalize this to other custom JSON + # casts once they exist. elif ( irtyputils.is_bytes(styperef) and not expr.ser_safe
codereview_python_data_12870
"userscripts", cmd) log.misc.debug("Userscript to run: {}".format(cmd_path)) - runner.run(cmd, *args, env=env, verbose=verbose) runner.finished.connect(commandrunner.deleteLater) runner.finished.connect(runner.deleteLater) Shouldn't this be `cmd_path`? "userscripts", cmd) log.misc.debug("Userscript to run: {}".format(cmd_path)) + runner.run(cmd_path, *args, env=env, verbose=verbose) runner.finished.connect(commandrunner.deleteLater) runner.finished.connect(runner.deleteLater)
codereview_python_data_12875
for iter in range(args.iters): for pipe in pipes: - image, labels, \ image_ssd_crop, image_decode_crop, \ image_slice_cpu, image_slice_gpu, \ boxes_ssd_crop, boxes_random_crop, \ Do you use this `image` for anything? for iter in range(args.iters): for pipe in pipes: + labels, \ image_ssd_crop, image_decode_crop, \ image_slice_cpu, image_slice_gpu, \ boxes_ssd_crop, boxes_random_crop, \
codereview_python_data_12882
def run(self): connection = self.output().connect() - if self.autocommit: - connection.autocommit=self.autocommit cursor = connection.cursor() sql = self.query You'll want to initialize this in `rdbms.Query`. Set its default to `False`? def run(self): connection = self.output().connect() + connection.autocommit=self.autocommit cursor = connection.cursor() sql = self.query
codereview_python_data_12887
if tfm_y is not None: self.tfm_y=tfm_y self.tfms_y=tfms return self def transform_labels(self, tfms:TfmList=None, **kwargs): Don't we need to pass the kwargs to self.tfmargs_y here? I think this is going to make camvid and object detect fail. if tfm_y is not None: self.tfm_y=tfm_y self.tfms_y=tfms + self.tfmargs_y = kwargs return self def transform_labels(self, tfms:TfmList=None, **kwargs):
codereview_python_data_12893
display_name=None, parent=None, lifecycle_state=BucketLifecycleState.UNSPECIFIED, - lifecycle=None): """Initialize. Args: Perhaps a name like `lifecycle_configuration` or `lifecycle_rules` will help (a little bit) to distinguish between this and lifecycle_state. display_name=None, parent=None, lifecycle_state=BucketLifecycleState.UNSPECIFIED, + retentions=None): """Initialize. Args:
codereview_python_data_12894
assert F.allclose(h_0, h_1) assert h_0.shape[-1] == 10 def test_gin_conv(): for aggregator_type in ['mean', 'max', 'sum']: Try using sed to replace all the occurance of torch with tf. assert F.allclose(h_0, h_1) assert h_0.shape[-1] == 10 +def test_appnp_conv(): + g = dgl.DGLGraph(sp.sparse.random(100, 100, density=0.1), readonly=True) + appnp = nn.APPNPConv(10, 0.1) + feat = F.randn((100, 5)) + + h = appnp(g, feat) + assert h.shape[-1] == 5 def test_gin_conv(): for aggregator_type in ['mean', 'max', 'sum']:
codereview_python_data_12899
"priority", "contact_person", "pk", - "enabled", - "sync_disabled", "system_project", - "pretranslation_enabled", "visibility", ) ordering = ("disabled",) Nit: let's change the order of these columns: ``` "system_project", "visibility", "pretranslation_enabled", "sync_disabled", "enabled, ``` "priority", "contact_person", "pk", "system_project", "visibility", + "pretranslation_enabled", + "sync_disabled", + "enabled", ) ordering = ("disabled",)
codereview_python_data_12901
return results def __repr__(self): - repr_str = self.__class__.__name__ - repr_str += f'(transforms={self.transforms})' return repr_str `repr_str = self.__class__.__name__ + f'(transforms={self.transforms})'` return results def __repr__(self): + repr_str = self.__class__.__name__ + f'(transforms={self.transforms})' return repr_str
codereview_python_data_12907
DALIDatasetWithInputs.__doc__ = _experimental_dataset_docstring _insert_experimental_member(DALIDatasetWithInputs, "DALIDatasetWithInputs") - class input: """Wrapper for input passed to DALIDataset. Allows to pass additional options. Parameters why not `Input`? I personally got confused when I saw `experimental.input` earlier, as I thought it was an instance DALIDatasetWithInputs.__doc__ = _experimental_dataset_docstring _insert_experimental_member(DALIDatasetWithInputs, "DALIDatasetWithInputs") + class Input: """Wrapper for input passed to DALIDataset. Allows to pass additional options. Parameters
codereview_python_data_12912
class Coin(object): def __init__(self, p): - self.probabibility = p assert 0 < p < 1 n_bits = 1 What is `force` in context of this function? class Coin(object): def __init__(self, p): + self.probability = p assert 0 < p < 1 n_bits = 1
codereview_python_data_12925
self.after_conv3_plugin_names = self.make_block_plugins( planes * self.expansion, self.after_conv3_plugins) - self.rfp_inplanes = rfp_inplanes - if self.rfp_inplanes: - self.rfp_conv = build_conv_layer( - None, - self.rfp_inplanes, - planes * self.expansion, - 1, - stride=1, - bias=True) - constant_init(self.rfp_conv, 0) - def make_block_plugins(self, in_channels, plugins): """ make plugins for block We may move it into `init_weights()`. self.after_conv3_plugin_names = self.make_block_plugins( planes * self.expansion, self.after_conv3_plugins) def make_block_plugins(self, in_channels, plugins): """ make plugins for block
codereview_python_data_12927
# sort the results ordered_results = [] for res in itertools.zip_longest(*part_list): - ordered_results.extend(filter(lambda x: x is not None, list(res))) # the dataloader may pad some samples ordered_results = ordered_results[:size] # remove tmp dir `list(res)` is no longer needed, `res` is enough. # sort the results ordered_results = [] for res in itertools.zip_longest(*part_list): + ordered_results.extend(filter(lambda x: x is not None, res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] # remove tmp dir
codereview_python_data_12932
# Now we prepare the requested data if requestOptions['graphType'] == 'pie': - targets = [target for target in requestOptions['targets'] if target.find(':') < 0] if settings.REMOTE_PREFETCH_DATA and not requestOptions.get('localOnly'): log.rendering("Prefetching remote data") pathExpressions = extractPathExpressions(targets) prefetchRemoteData(STORE.remote_stores, requestContext, pathExpressions) Looking good, let's just move this inside the `if` to avoid doing the extra work if we don't need it. # Now we prepare the requested data if requestOptions['graphType'] == 'pie': if settings.REMOTE_PREFETCH_DATA and not requestOptions.get('localOnly'): + targets = [target for target in requestOptions['targets'] if target.find(':') < 0] log.rendering("Prefetching remote data") pathExpressions = extractPathExpressions(targets) prefetchRemoteData(STORE.remote_stores, requestContext, pathExpressions)
codereview_python_data_12933
if _find_unsafe(s) is None: return s - if not is_win32: - # use single quotes, and put single quotes into double quotes - # the string $'b is then quoted as '$'"'"'b' - return "'" + s.replace("'", "'\"'\"'") + "'" - else: - # use double quote, windows shell does not support single quotes - return '"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"' - - __all__ = ["is_py2", "is_py3", "is_win32", "input", "stdout", "file", "shlex_quote", "get_terminal_size"] Can revert this as it's not being used any more. if _find_unsafe(s) is None: return s __all__ = ["is_py2", "is_py3", "is_win32", "input", "stdout", "file", "shlex_quote", "get_terminal_size"]
codereview_python_data_12938
self.update_id = update.update_id+1 if update.message: self.bot.logger.info("message from {} ({}): {}".format(update.message.from_user.username, update.message.from_user.id, update.message.text)) - if self.master and str(self.master) != str(update.message.from_user.id): continue if update.message.text == "/info": stats = self._get_player_stats() Original code has two way to config the master: username or chat_id, you changed to only chat_id. self.update_id = update.update_id+1 if update.message: self.bot.logger.info("message from {} ({}): {}".format(update.message.from_user.username, update.message.from_user.id, update.message.text)) + if self.master and self.master not in [update.message.from_user.id, "@{}".format(update.message.from_user.username)]: continue if update.message.text == "/info": stats = self._get_player_stats()
codereview_python_data_12942
self._session = Session(self._user, {'id': sess_id}) self._session['userId'] = self.parameters.get("user-id", None) self._session['testId'] = self.parameters.get("test-id", None) - data_address = self.settings.get("data-address", None) - if data_address: - self._session['data-address'] = data_address self._test = Test(self._user, {'id': self._session['testId']}) exc = TaurusConfigError("Need signature for session") self._session.data_signature = self.parameters.get("signature", exc) Why do we need this change? self._session = Session(self._user, {'id': sess_id}) self._session['userId'] = self.parameters.get("user-id", None) self._session['testId'] = self.parameters.get("test-id", None) self._test = Test(self._user, {'id': self._session['testId']}) exc = TaurusConfigError("Need signature for session") self._session.data_signature = self.parameters.get("signature", exc)
codereview_python_data_12943
def add_to_cgroup(): self.cgroups_api.add_process_to_extension_cgroup(extension_name, os.getpid()) - self._invoke_cgroup_operation(add_to_cgroup, "Failed add extension '{0}' to its cgroup; resource usage will not be tracked".format(extension_name)) process = subprocess.Popen( command, nit: "Failed add extension" -> "Failed to add extension" def add_to_cgroup(): self.cgroups_api.add_process_to_extension_cgroup(extension_name, os.getpid()) + self._invoke_cgroup_operation(add_to_cgroup, "Failed to add extension '{0}' to its cgroup; resource usage will not be tracked".format(extension_name)) process = subprocess.Popen( command,
codereview_python_data_12949
triclinic and must be provided in the same format as returned by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx, ly, lz, alpha, beta, gamma]``. - method : {'bruteforce', 'pkdtree'}, optional Keyword to override the automatic guessing of the employed search method. return_distances : bool, optional ```suggestion method : {'bruteforce', 'nsgrid', 'pkdtree'}, optional ``` as above triclinic and must be provided in the same format as returned by :attr:`MDAnalysis.coordinates.base.Timestep.dimensions`: ``[lx, ly, lz, alpha, beta, gamma]``. + method : {'bruteforce', 'nsgrid', 'pkdtree'}, optional Keyword to override the automatic guessing of the employed search method. return_distances : bool, optional
codereview_python_data_12953
""" Generate int data with caching. Parameters ---------- nrows: int what does "caching" mean here? """ Generate int data with caching. + The generated data are saved in the dictionary and on a subsequent call, + if the keys match, saved data will be returned. Therefore, we need + to carefully monitor the changing of saved data and make its copy if needed. + Parameters ---------- nrows: int
codereview_python_data_12954
Examples -------- >>> G = nx.star_graph(10) - >>> soc = second_order_centrality(G) >>> print(sorted(soc.items(), key=lambda x:x[1])[0][0]) # pick first id 0 This call should use ```nx.second_order_centrality``` instead of ```second_order_centrality```. It works fine as is for nosetests, but once the docstring is process by sphinx to buid the html pages, the module context is lost so we have to refer to it through the ```nx.``` syntax. This also tests the namespace to make sure ```second_order_centrality``` is at the top level. Examples -------- >>> G = nx.star_graph(10) + >>> soc = nx.second_order_centrality(G) >>> print(sorted(soc.items(), key=lambda x:x[1])[0][0]) # pick first id 0
codereview_python_data_12955
@classmethod def _process_comm_msg(cls, msg): """ - Processes global comm messages to process deletions. """ if msg['event_type'] == 'delete': Renderer._delete_plot(msg['id']) Maybe this method could have a more specific name like ``_delete_plot_by_id``? @classmethod def _process_comm_msg(cls, msg): """ + Processes comm messages to handle global actions such as + cleaning up plots. """ if msg['event_type'] == 'delete': Renderer._delete_plot(msg['id'])
codereview_python_data_12957
class _EmptyExtensionsGoalState(ExtensionsGoalState): def get_redacted_text(self): - return None class _ExtensionsGoalStateFromExtensionsConfig(ExtensionsGoalState): NIT: Maybe make these values an Enum? class _EmptyExtensionsGoalState(ExtensionsGoalState): def get_redacted_text(self): + return '' class _ExtensionsGoalStateFromExtensionsConfig(ExtensionsGoalState):
codereview_python_data_12958
'sort_series' : 0, } -class RenderConfig(object): def __init__(self): self.last_read = 0 self.schemes = [defaultScheme] Use new style classes please. For new code please inherit all classes from object. We'll fix the existing stuff slowly as it is changed. 'sort_series' : 0, } +class RenderConfig: def __init__(self): self.last_read = 0 self.schemes = [defaultScheme]
codereview_python_data_12960
-def on_square(integer): pass -def total_after(integer): pass `integer_number` sounds more natural, don't you agree? +def on_square(integer_number): pass +def total_after(integer_number): pass
codereview_python_data_12961
bindings = {} defaults = config.val.bindings.default modes = set(defaults.keys()).union(config.val.bindings.commands) for mode in modes: bindings[mode] = config.key_instance.get_bindings_for(mode) I haven't thought about this before, but having a set here actually makes them show up in a random order. I think it'd be good to have them sorted, and force `normal` to be the first. Something like this would probably work: ```python modes.remove('normal') modes = ['normal'] + sorted(list(modes)) ``` bindings = {} defaults = config.val.bindings.default modes = set(defaults.keys()).union(config.val.bindings.commands) + modes.remove('normal') + modes = ['normal'] + sorted(list(modes)) for mode in modes: bindings[mode] = config.key_instance.get_bindings_for(mode)
codereview_python_data_12962
self.distributed_servers = self.execution.get('distributed', self.distributed_servers) scenario = self.get_scenario() self.resource_files() - system_props = self.settings.get("system-properties") if Scenario.SCRIPT in scenario: self.original_jmx = self.__get_script() self.engine.existing_artifact(self.original_jmx) elif "requests" in scenario: - if system_props: - if not system_props.get("sun.net.inetaddr.ttl"): - system_props["sun.net.inetaddr.ttl"] = 0 self.original_jmx = self.__jmx_from_requests() else: raise ValueError("There must be a JMX file to run JMeter") Why do we set proprty here? If no prop has been set - then we should not set it. self.distributed_servers = self.execution.get('distributed', self.distributed_servers) scenario = self.get_scenario() self.resource_files() + if Scenario.SCRIPT in scenario: self.original_jmx = self.__get_script() self.engine.existing_artifact(self.original_jmx) elif "requests" in scenario: self.original_jmx = self.__jmx_from_requests() else: raise ValueError("There must be a JMX file to run JMeter")
codereview_python_data_12965
:Author: Zhiyi Wu :Year: 2017-2018 :Copyright: GNU Public License v3 -:Maintainer: Zhiyi Wu <zhiyi.wu@gtc.ox.ac.uk>, `@xiki-tempula`_ on GitHub .. _`@xiki-tempula`: https://github.com/xiki-tempula to ensure proper backwards compatibility we'll need six.move's range and zip too (worth having a look for other deprecated py2 things, but I _think_ that's it?). Probably worth adding absolute_import here too. :Author: Zhiyi Wu :Year: 2017-2018 :Copyright: GNU Public License v3 +:Maintainer: Zhiyi Wu <zhiyi.wu@bioch.ox.ac.uk>, `@xiki-tempula`_ on GitHub .. _`@xiki-tempula`: https://github.com/xiki-tempula
codereview_python_data_12968
from plenum.test.helper import sdk_send_and_check def test_send_get_revoc_reg_def(looper, txnPoolNodeSet, sdk_wallet_steward, Please check all data, not just ID from plenum.test.helper import sdk_send_and_check +def compare_request_reply(req, reply): + assert req['operation'][CRED_DEF_ID] == reply['result']['data'][CRED_DEF_ID] + assert req['operation'][ID] == reply['result']['data'][ID] + assert req['operation'][TYPE] == reply['result']['data'][TYPE] + assert req['operation'][TAG] == reply['result']['data'][TAG] + assert req['operation'][VALUE] == reply['result']['data'][VALUE] + + def test_send_get_revoc_reg_def(looper, txnPoolNodeSet, sdk_wallet_steward,
codereview_python_data_12983
) except Exception, e: current_app.logger.error("Redis rpush playing_now write error: " + str(e)) - raise InternalServerError("Cannot record playing_now at this time.") else: submit.append(listen) Will this error message be in json format? ) except Exception, e: current_app.logger.error("Redis rpush playing_now write error: " + str(e)) + raise ServiceUnavailable("Cannot record playing_now at this time.") else: submit.append(listen)
codereview_python_data_13001
root = etree.Element("FinalStatus") report_info = get_bza_report_info(self.engine, self.log) if report_info: - link, text = report_info[0] - report_element = etree.Element("BlazeMeterReport", link=link, name=text) root.append(report_element) if self.last_sec: for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]): Let's be neutral with tag names. Let's just have "ReportURL" root = etree.Element("FinalStatus") report_info = get_bza_report_info(self.engine, self.log) if report_info: + link, _ = report_info[0] + report_element = etree.Element("ReportURL") + report_element.text = link root.append(report_element) if self.last_sec: for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]):
codereview_python_data_13002
files_to_create.append((vmextensions_slice, _VMEXTENSIONS_SLICE_CONTENTS)) if not os.path.exists(logcollector_slice): - from azurelinuxagent.ga.collect_logs import CollectLogsHandler - cpu_quota, memory_limit = CollectLogsHandler.get_resource_limits() - slice_contents = _LOGCOLLECTOR_SLICE_CONTENTS_FMT.format(cpu_quota=cpu_quota, memory_limit=memory_limit) files_to_create.append((logcollector_slice, slice_contents)) if fileutil.findre_in_file(agent_unit_file, r"Slice=") is not None: Update: this indeed does cause a cyclical import, so I'm inclined to leave it as is. files_to_create.append((vmextensions_slice, _VMEXTENSIONS_SLICE_CONTENTS)) if not os.path.exists(logcollector_slice): + slice_contents = _LOGCOLLECTOR_SLICE_CONTENTS_FMT.format(cpu_quota=_LOGCOLLECTOR_CPU_QUOTA, + memory_limit=_LOGCOLLECTOR_MEMORY_LIMIT) + files_to_create.append((logcollector_slice, slice_contents)) if fileutil.findre_in_file(agent_unit_file, r"Slice=") is not None:
codereview_python_data_13007
return (w*dpi, h*dpi) - def patch(self, plot): data = None if self.mode != 'nbagg': if self.mode == 'mpld3': I think `diff` would be a clearer name. return (w*dpi, h*dpi) + def diff(self, plot): + """ + Returns the latest plot data to update an existing plot. + """ data = None if self.mode != 'nbagg': if self.mode == 'mpld3':
codereview_python_data_13010
# operand ExprNode # arg_type ExprNode # is_variable boolean - # mangle_cname string type = PyrexTypes.error_type Can't we use a temp variable for this instead of an explicitly declared one? (Look for usages of allocate_temp() in this module.) # operand ExprNode # arg_type ExprNode # is_variable boolean type = PyrexTypes.error_type
codereview_python_data_13011
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4451-SEA 1645523408 3341741419</p> <hr> <p>Varnish cache server</p> </body> Just like above, how about this? ``` KEY_AGE_LESS_THAN_MAX_AGE = 99 KEY_DATETIME_LESS_THAN_MAX_AGE = ... KEY_TIME_LESS_THAN_MAX_AGE = ... ``` <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4439-SEA 1645523408 2939132103</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_13012
return "Multipart form", self._format(v) def render_priority(self, data: bytes, *, content_type: Optional[str] = None, **metadata) -> float: - if content_type and content_type.startswith("multipart/form-data"): - return 1 - else: - return 0 see above - this is only used to select the correct view, we don't need to handle the boundary information here. return "Multipart form", self._format(v) def render_priority(self, data: bytes, *, content_type: Optional[str] = None, **metadata) -> float: + return float(content_type == "multipart/form-data")
codereview_python_data_13018
'Characters %r are present in both whitelist_characters=%r, and ' 'blacklist_characters=%r' % ( sorted(overlap), whitelist_characters, blacklist_characters)) - blacklist_categories = validate_categories(blacklist_categories) - whitelist_categories = validate_categories(whitelist_categories) both_cats = set( blacklist_categories or ()).intersection(whitelist_categories or ()) if both_cats: I find the "e.g. longer unicode strings" kinda confusing even knowing what it means! (also it's technically wrong: The unicode string could be one character). Maybe "this includes unicode strings" instead? 'Characters %r are present in both whitelist_characters=%r, and ' 'blacklist_characters=%r' % ( sorted(overlap), whitelist_characters, blacklist_characters)) + blacklist_categories = as_general_categories( + blacklist_categories, 'blacklist_categories') + whitelist_categories = as_general_categories( + whitelist_categories, 'whitelist_categories') both_cats = set( blacklist_categories or ()).intersection(whitelist_categories or ()) if both_cats:
codereview_python_data_13022
import os import os.path as osp -import imageio import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np def parse_args(): parser = argparse.ArgumentParser(description='Create GIF for demo') Is this a new dependency? can we use API in cv2? import os import os.path as osp import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np +try: + import imageio +except ImportError: + imageio = None + def parse_args(): parser = argparse.ArgumentParser(description='Create GIF for demo')
codereview_python_data_13036
'Compose' ] class BaseTransform: r""" I feel we only need `__call__` and `__repr__`. Could you explain why we need the other methods? 'Compose' ] +# pylint: disable=E0001 class BaseTransform: r"""
codereview_python_data_13040
RESOURCE = 'resource' IAM_POLICY = 'iam_policy' GCS_POLICY = 'gcs_policy' - Supported_TypeClass = [RESOURCE, IAM_POLICY, GCS_POLICY] class InventoryIndex(BASE): either SUPPORTED_TYPECLASS or supported_typeclass. Camelcase only for class names. RESOURCE = 'resource' IAM_POLICY = 'iam_policy' GCS_POLICY = 'gcs_policy' + SUPPORTED_TYPECLASS = [RESOURCE, IAM_POLICY, GCS_POLICY] class InventoryIndex(BASE):
codereview_python_data_13046
>>> idx = torch.Tensor([0,1,2]).to('cuda') >>> output = feats[idx] - For the multi-GPU operation, one can simply think UnifiedTensor like a GPU - tensor. For example, to copy a tensor ``feats`` to multiple GPUs, one - should do the following: - - >>> feats = torch.rand((128,128)) - >>> feats_gpu0 = feats.to('cuda:0') - >>> feats_gpu1 = feats.to('cuda:1') - >>> feats_gpu2 = feats.to('cuda:2') - - Similar to that, to allow multiple GPUs to access the original CPU tensor ``feats`` using UnifiedTensor, one can do the following: >>> feats = torch.rand((128,128)) ```suggestion For the multi-GPU operation, to allow multiple GPUs to access the original CPU tensor ``feats`` using UnifiedTensor, one can do the following: >>> feats = torch.rand((128,128)) >>> feats_gpu0 = dgl.contrib.UnifiedTensor(feats, device=torch.device('cuda:0')) >>> feats_gpu1 = dgl.contrib.UnifiedTensor(feats, device=torch.device('cuda:1')) >>> feats_gpu2 = dgl.contrib.UnifiedTensor(feats, device=torch.device('cuda:2')) Now, the ``cuda:0``, ``cuda:1``, and ``cuda:2`` devices will be able to access the identical tensor located in the CPU memory using ``feats_gpu0``, ``feats_gpu1``, and ``feats_gpu2`` tensors, respectively. One can simply use following operations to slice the sub tensors into different GPU devices directly. >>> feats_idx_gpu0 = torch.randint(128, 16, device='cuda:0') >>> feats_idx_gpu1 = torch.randint(128, 16, device='cuda:1') >>> feats_idx_gpu2 = torch.randint(128, 16, device='cuda:2') >>> sub_feat_gpu0 = feats_gpu0[feats_idx_gpu0] >>> sub_feat_gpu1 = feats_gpu1[feats_idx_gpu1] >>> sub_feat_gpu2 = feats_gpu2[feats_idx_gpu2] ``` >>> idx = torch.Tensor([0,1,2]).to('cuda') >>> output = feats[idx] + For the multi-GPU operation, to allow multiple GPUs to access the original CPU tensor ``feats`` using UnifiedTensor, one can do the following: >>> feats = torch.rand((128,128))
codereview_python_data_13050
return AzureBlobTarget("luigi-test", "movie-cheesy.txt", client, download_when_reading=False) def run(self): - print(client.connection.create_container("luigi-test")) with self.output().open("w") as op: op.write("I'm going to make him an offer he can't refuse.\n") op.write("Toto, I've got a feeling we're not in Kansas anymore.\n") Is there anyway to mock this for Travis testing? return AzureBlobTarget("luigi-test", "movie-cheesy.txt", client, download_when_reading=False) def run(self): + client.connection.create_container("luigi-test") with self.output().open("w") as op: op.write("I'm going to make him an offer he can't refuse.\n") op.write("Toto, I've got a feeling we're not in Kansas anymore.\n")
codereview_python_data_13051
args.strict_rel_part = args.mix_cpu_gpu and (train_data.cross_part == False) args.soft_rel_part = args.mix_cpu_gpu and args.soft_rel_part and train_data.cross_part train_samplers = [] for i in range(args.num_client): train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', - num_workers=1, shuffle=True, exclude_positive=False, rank=i) is `num_workers=1` always the best option? args.strict_rel_part = args.mix_cpu_gpu and (train_data.cross_part == False) args.soft_rel_part = args.mix_cpu_gpu and args.soft_rel_part and train_data.cross_part + args.num_workers = 8 # fix num_workers to 8 train_samplers = [] for i in range(args.num_client): train_sampler_head = train_data.create_sampler(args.batch_size, args.neg_sample_size, args.neg_sample_size, mode='head', + num_workers=args.num_workers, shuffle=True, exclude_positive=False, rank=i)
codereview_python_data_13053
LOGGER.info('SQLite used, disabling parallel threads.') parallel = False - client = _api_client_factory(storage, config, parallel, tracer) crawler_impl = _crawler_factory(storage, progresser, client, parallel) resource = _root_resource_factory(config, client) progresser = crawler_impl.run(resource) No need to pass `tracer` explicitly here, since this method call is in the same context as the calling method. The tracer will be inferred from context and set to the `tracer` argument of the method automatically. LOGGER.info('SQLite used, disabling parallel threads.') parallel = False + client = _api_client_factory(storage, config, parallel) crawler_impl = _crawler_factory(storage, progresser, client, parallel) resource = _root_resource_factory(config, client) progresser = crawler_impl.run(resource)
codereview_python_data_13056
config, root_id, content_type)) - gcs_paths = [] for future in concurrent.futures.as_completed(futures): - gcs_paths.append(future.result()) - return gcs_paths def load_cloudasset_data(engine, config): Make this yield the gcs path instead of return a list, then when the first file is complete the download can begin, rather than having to wait for both exports to complete. config, root_id, content_type)) for future in concurrent.futures.as_completed(futures): + yield future.result() def load_cloudasset_data(engine, config):
codereview_python_data_13063
return None revision_pattern = revision_pattern_from_build_bucket_path(bucket_path) - for build_url_item in build_url_list: - match = re.match(revision_pattern, build_url_item) if not match: continue current_revision = convert_revision_to_integer(match.group(1)) if current_revision == revision: - return build_url_item return None Why not just build_url ? return None revision_pattern = revision_pattern_from_build_bucket_path(bucket_path) + for build_url in build_url_list: + match = re.match(revision_pattern, build_url) if not match: continue current_revision = convert_revision_to_integer(match.group(1)) if current_revision == revision: + return build_url return None
codereview_python_data_13066
# Calculate locs against a tz-naive cal, as the ex_dates are tz- # naive. tz_naive_calendar = calendar.tz_localize(None) day_locs = tz_naive_calendar.get_indexer(ex_dates, method='bfill') Rather than localizing the calendar to naive, can we localize the ex_dates to the calendar's time zone? They should be semantically identical, but using the same calendar prevents allocation of a new hash table. # Calculate locs against a tz-naive cal, as the ex_dates are tz- # naive. + # + # TODO: A better approach here would be to localize ex_date to + # the tz of the calendar, but currently get_indexer does not + # preserve tz of the target, which throws of the comparison. tz_naive_calendar = calendar.tz_localize(None) day_locs = tz_naive_calendar.get_indexer(ex_dates, method='bfill')
codereview_python_data_13075
'--firebase-api-key', type=str, required=True, - help='Firebase web API key.') parser_create_config.add_argument( '--oauth-client-secrets-path', type=str, nit: maybe add "(for authentication)" in help ? '--firebase-api-key', type=str, required=True, + help='Firebase web API key (for authentication).') parser_create_config.add_argument( '--oauth-client-secrets-path', type=str,
codereview_python_data_13082
def icon(self) -> None: raise NotImplementedError - def current_search_match(self) -> (int, int): - raise NotImplementedError - def set_html(self, html: str, base_url: QUrl = QUrl()) -> None: raise NotImplementedError This shouldn't be needed - the caller can just use the two attributes directly. def icon(self) -> None: raise NotImplementedError def set_html(self, html: str, base_url: QUrl = QUrl()) -> None: raise NotImplementedError
codereview_python_data_13084
result : Dict[str, int] Dictionary where keys are worker addresses and values are an open port for LightGBM to use. """ - lightgbm_ports : Set[Client] = set() worker_ip_to_port = {} for worker_address in worker_addresses: port = client.submit( ```suggestion lightgbm_ports: Set[int] = set() ``` This should be `Set[int]`. Each step in the for loop below runs a function that returns an `int`. result : Dict[str, int] Dictionary where keys are worker addresses and values are an open port for LightGBM to use. """ + lightgbm_ports: Set[int] = set() worker_ip_to_port = {} for worker_address in worker_addresses: port = client.submit(
codereview_python_data_13101
return False # Optimization that does not require pulling in issue's actions. - if any([label.lower() == l.lower() for l in issue.labels]): return True for action in issue.actions: nit: no need for surrounding []. It'll make this faster too. return False # Optimization that does not require pulling in issue's actions. + if any(label.lower() == l.lower() for l in issue.labels): return True for action in issue.actions:
codereview_python_data_13102
"-r", "--record", metavar="FILENAME", help=""" - Write stream data to FILENAME while also playing it. You will be prompted if the file already exists. """ same description as `-r` both should have more details. "-r", "--record", metavar="FILENAME", help=""" + Open the stream in the player, while at the same time writing it to FILENAME. You will be prompted if the file already exists. """