id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_9173 | import tensorflow as tf
from tensorflow_addons.image import utils as img_utils
from tensorflow_addons.utils.resource_loader import LazySO
-from tensorflow_addons.utils.types import FloatTensorLike, TensorLike
-from typing import Optional
_image_so = LazySO("custom_ops/image/_image_ops.so")
dtype should be a tensorflow type here. Not sure what's the base class though.
import tensorflow as tf
from tensorflow_addons.image import utils as img_utils
from tensorflow_addons.utils.resource_loader import LazySO
+from tensorflow_addons.utils.types import TensorLike
+from typing import Optional, Type
_image_so = LazySO("custom_ops/image/_image_ops.so") |
codereview_python_data_9185 | self.excl_tax = excl_tax
if incl_tax is not None:
self.incl_tax = incl_tax
self.is_tax_known = True
elif tax is not None:
self.incl_tax = excl_tax + tax
self.is_tax_known = True
else:
self.incl_tax = None
I feel like it's better to check both `self.incl_tax == other.incl_tax` and `self.excl_tax == other.excl_tax` instead of looking at `is_tax_known`. What do you think?
self.excl_tax = excl_tax
if incl_tax is not None:
self.incl_tax = incl_tax
+ self.tax = incl_tax - excl_tax
self.is_tax_known = True
elif tax is not None:
self.incl_tax = excl_tax + tax
+ self.tax = tax
self.is_tax_known = True
else:
self.incl_tax = None |
codereview_python_data_9186 | assert str(r1.seq).replace(".", "-") == str(r2.seq), \
"Seq does not match %s vs %s (%s vs %s)" \
% (r1.seq, r2.seq, r1.id, r2.id)
- else:
- assert str(r1.seq) == str(r2.seq), \
- "Seq does not match %s vs %s (%s vs %s)" \
- % (r1.seq, r2.seq, r1.id, r2.id)
return True
This code block is not needed anymore, slightly further up we have: ``` python assert str(r1.seq) == str(r2.seq) ```
assert str(r1.seq).replace(".", "-") == str(r2.seq), \
"Seq does not match %s vs %s (%s vs %s)" \
% (r1.seq, r2.seq, r1.id, r2.id)
return True |
codereview_python_data_9194 | # check that slicing is possible
try:
self.u.trajectory[0]
- except:
- raise ValueError("Trajectory must support slicing")
self.h = hydrogens
self.a = acceptors
The HBondAnalysis class doesn't replicate the definition of hbonds in this class (eg here you can pass AtomGroups). I think we can eventually deprecate this, but it will have to be after PRs like #2237 which modernise the hbond detection code a little.
# check that slicing is possible
try:
self.u.trajectory[0]
+ except Exception:
+ raise_from(ValueError("Trajectory must support slicing"), None)
self.h = hydrogens
self.a = acceptors |
codereview_python_data_9201 | ... id="NP_418483.1", name="b4059",
... description="ssDNA-binding protein",
... dbxrefs=["ASAP:13298", "GI:16131885", "GeneID:948570"])
- >>> rec
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF'), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
At the python prompt you can also use this shorthand:
Is it worth leaving this one since it is in the ``__repr__`` docstring, otherwise I think the text above needs a slight rewording.
... id="NP_418483.1", name="b4059",
... description="ssDNA-binding protein",
... dbxrefs=["ASAP:13298", "GI:16131885", "GeneID:948570"])
+ >>> print(repr(rec))
SeqRecord(seq=Seq('MASRGVNKVILVGNLGQDPEVRYMPNGGAVANITLATSESWRDKATGEMKEQTE...IPF'), id='NP_418483.1', name='b4059', description='ssDNA-binding protein', dbxrefs=['ASAP:13298', 'GI:16131885', 'GeneID:948570'])
At the python prompt you can also use this shorthand: |
codereview_python_data_9203 | ThreeBytesField("seq", RandShort()),
ConditionalField(BitField("msg_priority", 0, 4),
lambda pkt:pkt.MP == 1),
- ConditionalField(BitField("SPARE3_MP1", 0, 4),
- lambda pkt:pkt.MP == 1),
- ConditionalField(ByteField("SPARE3_MP0", 0),
- lambda pkt:pkt.MP == 0)
]
I think that the proper way of fixing this would be something like ```python ConditionalField( MultipleTypeField( [(BitField("SPARE3", 0, 4), lambda pkt: pkt.MP == 1)], ByteField("SPARE3", 0) ), lambda pkt: pkt.MP in [0, 1] ) ```
ThreeBytesField("seq", RandShort()),
ConditionalField(BitField("msg_priority", 0, 4),
lambda pkt:pkt.MP == 1),
+ ConditionalField(
+ MultipleTypeField(
+ [(BitField("SPARE3", 0, 4), lambda pkt: pkt.MP == 1)],
+ ByteField("SPARE3", 0)
+ ), lambda pkt: pkt.MP in [0, 1]
+ )
] |
codereview_python_data_9211 | for line in data:
for team, result in parse_game(line):
- print(team)
- print(result)
- print(table)
table[team][result] += 1
return format_table(table)
Please remove all `print()` statements.
for line in data:
for team, result in parse_game(line):
table[team][result] += 1
return format_table(table) |
codereview_python_data_9212 | super(MultichannelPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
- self.reader = ops.readers.File(files = multichannel_tiff_files)
decoder_device = 'mixed' if self.device == 'gpu' else 'cpu'
self.decoder = ops.decoders.Image(device = decoder_device, output_type = types.ANY_DATA)
Nitpick: Shouldn't named parameters go without spaces around assignment?
super(MultichannelPipeline, self).__init__(batch_size, num_threads, device_id)
self.device = device
+ self.reader = ops.readers.File(files=multichannel_tiff_files)
decoder_device = 'mixed' if self.device == 'gpu' else 'cpu'
self.decoder = ops.decoders.Image(device = decoder_device, output_type = types.ANY_DATA) |
codereview_python_data_9230 | if os.path.isdir(source):
copy_tree(source, destination)
else:
- if not os.path.exists(destination) or \
- os.stat(src).st_mtime - os.stat(
- dst).st_mtime > 1:
shutil.copy2(source, destination)
Are we sure about this indentation? (Not a major issue, just looks weirder than the original one:) )
if os.path.isdir(source):
copy_tree(source, destination)
else:
+ delta = os.stat(src).st_mtime - os.stat(dst).st_mtime
+ if not os.path.exists(destination) or delta > 0:
shutil.copy2(source, destination) |
codereview_python_data_9232 | fields = self.get_tab_fields(idx)
fields['current_title'] = fields['current_title'].replace('&', '&&')
- fields['index'] = str(idx + 1).rjust(2)
title = '' if fmt is None else fmt.format(**fields)
tabbar = self.tabBar()
I don't think this will actually align the indexes, if you have > 100 tabs, the tabs over 100 will be misaligned. In addition with less than 10 tabs, there will be a pointless space.
fields = self.get_tab_fields(idx)
fields['current_title'] = fields['current_title'].replace('&', '&&')
+ fields['index'] = idx + 1
+ fields['aligned_index'] = str(idx + 1).rjust(len(str(self.count())))
title = '' if fmt is None else fmt.format(**fields)
tabbar = self.tabBar() |
codereview_python_data_9236 | from system import environment
LIST_FILE_BASENAME = 'file_list.txt'
-TESTCASES_PER_DAY = 5000
def upload_testcases_if_needed(fuzzer_name, testcase_list, testcase_directory,
Let's change it to `2000` for now, or even leave `1000`? `gs://clusterfuzz-fuzzer-testcases/2019-06-17/` for example takes 35GB. I'm checking the yesterday's directory right now, as it may be even bigger.
from system import environment
LIST_FILE_BASENAME = 'file_list.txt'
+TESTCASES_PER_DAY = 1000
def upload_testcases_if_needed(fuzzer_name, testcase_list, testcase_directory, |
codereview_python_data_9238 | import MDAnalysis as mda
class GROReadBench(object):
-
def time_read_GRO_coordinates(self):
"""Benchmark reading of standard testsuite GRO file."""
GROReader(GRO)
def time_parse_GRO_file(self):
- with GROParser(GRO) as p:
- top = p.parse()
def time_create_GRO_universe(self):
"""Time to create MDA Universe of GRO"""
are you ok with having this timing report the context manager initialization on top of the `parse()` method?
import MDAnalysis as mda
class GROReadBench(object):
def time_read_GRO_coordinates(self):
"""Benchmark reading of standard testsuite GRO file."""
GROReader(GRO)
def time_parse_GRO_file(self):
+ """Time to create topology from GRO file"""
+ p = GROParser(GRO)
+ top = p.parse()
def time_create_GRO_universe(self):
"""Time to create MDA Universe of GRO""" |
codereview_python_data_9240 | class MasterProvisioning(Provisioning):
def get_rfiles(self):
- jmeter_var_pattern = re.compile("\${.+\}")
rfiles = []
additional_files = []
for executor in self.executors:
Maybe definition of it as constant of JMeterExecuter looks better as we have a lot places to reuse it (at least follow test)
class MasterProvisioning(Provisioning):
def get_rfiles(self):
rfiles = []
additional_files = []
for executor in self.executors: |
codereview_python_data_9258 | constraint_spec: qlast.Expr,
else_branch: Optional[qlast.Expr],
*, ctx: context.ContextLevel,
-) -> Tuple[Optional[irast.ConstraintRef],
- Optional[Tuple[irast.Set, irast.Set]]]:
with ctx.new() as constraint_ctx:
constraint_ctx.partial_path_prefix = subject
I'd make this a `NamedTuple` for clarity.
constraint_spec: qlast.Expr,
else_branch: Optional[qlast.Expr],
*, ctx: context.ContextLevel,
+) -> irast.OnConflictClause:
with ctx.new() as constraint_ctx:
constraint_ctx.partial_path_prefix = subject |
codereview_python_data_9271 | self.fc_neigh = nn.Linear(self._in_src_feats, out_feats, bias=False)
if bias:
self.bias = nn.parameter.Parameter(torch.zeros(self._out_feats))
self.reset_parameters()
def reset_parameters(self):
While mathematically this is equivalent to the previous implementation, it may bring issues if a user wants to load a model checkpoint.
self.fc_neigh = nn.Linear(self._in_src_feats, out_feats, bias=False)
if bias:
self.bias = nn.parameter.Parameter(torch.zeros(self._out_feats))
+ else:
+ self.register_buffer('bias', None)
self.reset_parameters()
def reset_parameters(self): |
codereview_python_data_9276 | is a minimal subset of the PDB format. Hetero supports a 3 alphanumeric code.
The NDB web interface is located at http://ndbserver.rutgers.edu
-There is no implementation of the classes defined on Bio.Crystal on Biopython,
so the module will be removed.
Bio.Crystal.Hetero substitute is Bio.PDB.Atom
This sentence does not flow well.
is a minimal subset of the PDB format. Hetero supports a 3 alphanumeric code.
The NDB web interface is located at http://ndbserver.rutgers.edu
+There is no use of the classes defined on Bio.Crystal on Biopython,
so the module will be removed.
Bio.Crystal.Hetero substitute is Bio.PDB.Atom |
codereview_python_data_9277 | bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
-
- # bbox_feats.shape[0] > 0 is mean the number of proposal is not 0.
- if self.with_semantic and 'bbox' in self.semantic_fusion and \
- bbox_feats.shape[0] > 0:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
- bbox_semantic_feat = F.adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats)
\`bbox_feats.shape[0] > 0\` requires the number of proposal is not 0.
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
+ if self.with_semantic and 'bbox' in self.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
+ bbox_semantic_feat = adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats += bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats) |
codereview_python_data_9287 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4432-SEA 1645523406 2118125277</p>
<hr>
<p>Varnish cache server</p>
</body>
Add a newline here, to separate the `Args` and `Returns` blocks. Please fix this everywhere.
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4472-SEA 1645523406 1399131315</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_9289 | -from typing import List
from data.model.external_service import ExternalServiceType
from listenbrainz import db, utils
if we have types in the function definition then they're not required in the docstrings
+from typing import List, Union
from data.model.external_service import ExternalServiceType
from listenbrainz import db, utils |
codereview_python_data_9299 | ir_set: irast.Set,
orig_stype: s_types.Type,
new_stype: s_types.Type,
- cardinality_mod: Optional[qlast.CardinalityModifier],
*,
ctx: context.ContextLevel) -> irast.Set:
orig_typeref = typegen.type_to_typeref(orig_stype, env=ctx.env)
Let's make `cardinality_mod` a keyword-only (like it is in `compile_cast()`)
ir_set: irast.Set,
orig_stype: s_types.Type,
new_stype: s_types.Type,
*,
+ cardinality_mod: Optional[qlast.CardinalityModifier],
ctx: context.ContextLevel) -> irast.Set:
orig_typeref = typegen.type_to_typeref(orig_stype, env=ctx.env) |
codereview_python_data_9303 | # This file can have a UTF-8 byte-order-marker at the beginning of
# the first row.
# TODO: Find out all the code pages this can have. Asked McAfee 10/31.
- byte_order_mark = codecs.decode(b'\xef\xbb\xbf', parser_mediator.codepage)
- byte_order_mark_length = len(byte_order_mark)
- if row['date'].startswith(byte_order_mark):
- row['date'] = row['date'][byte_order_mark_length:]
self._encoding = 'utf-8'
# Check the date format!
please do not change b'\xef\xbb\xbf' is a sequence specific to UTF-8 (UTF-8 BOM, which technically does not need a BOM ;), this is the binary sequence and should not be translated into a string. Seeing that the string u'\ufeff' can be used for UTF-16 and UTF-32 as well.
# This file can have a UTF-8 byte-order-marker at the beginning of
# the first row.
# TODO: Find out all the code pages this can have. Asked McAfee 10/31.
+ row_bytes = codecs.encode(row['date'], parser_mediator.codepage)
+ if row_bytes.startswith(b'\xef\xbb\xbf'):
+ row['date'] = row['date'][3:]
self._encoding = 'utf-8'
# Check the date format! |
codereview_python_data_9309 | self.property_list = property_list
def _translate_id(self, entity_id):
- """Return entity identifier."""
return entity_id
def __contains__(self, id):
Minor, but can you make this: ```python """Return entity identifier (PRIVATE).""" ```
self.property_list = property_list
def _translate_id(self, entity_id):
+ """Return entity identifier (PRIVATE)."""
return entity_id
def __contains__(self, id): |
codereview_python_data_9310 | width_coef: float = 0.8,
xlim: Optional[Tuple[int, int]] = None,
ylim: Optional[Tuple[int, int]] = None,
- title: str]= 'Split value histogram for feature with @index/name@ @feature@',
xlabel: str = 'Feature split value',
ylabel: str = 'Count',
figsize: Optional[Tuple[int, int]] = None,
```suggestion title: str = 'Split value histogram for feature with @index/name@ @feature@', ```
width_coef: float = 0.8,
xlim: Optional[Tuple[int, int]] = None,
ylim: Optional[Tuple[int, int]] = None,
+ title: str = 'Split value histogram for feature with @index/name@ @feature@',
xlabel: str = 'Feature split value',
ylabel: str = 'Count',
figsize: Optional[Tuple[int, int]] = None, |
codereview_python_data_9311 | def __repr__(self):
return self.reprcall()
- def items(self):
- for k, v in dict.items(self):
- yield k.decode() if isinstance(k, bytes) else k, v
@property
def name(self):
looks like here is missing yield the `v` if is instance of bytes.
def __repr__(self):
return self.reprcall()
+ if JSON_NEEDS_UNICODE_KEYS: # pragma: no cover
+ def items(self):
+ for k, v in dict.items(self):
+ yield k.decode() if isinstance(k, bytes) else k, v
@property
def name(self): |
codereview_python_data_9312 | iou_thr=0.5,
dataset=None,
logger=None,
- tpfp_func=None,
nproc=4):
"""Evaluate mAP of a dataset.
Similar to `collate_fn`, we may rename it to `tpfp_fn`.
iou_thr=0.5,
dataset=None,
logger=None,
+ tpfp_fn=None,
nproc=4):
"""Evaluate mAP of a dataset. |
codereview_python_data_9315 | else:
self.get_special(event).update(new_set)
- def get_epsilon(self, none=None):
"""
Return the mapping for epsilon, or None.
"""
- return self.special.get('', none)
- def iteritems(self, len=len):
"""
Return the mapping as an iterable of ((code1, code2), state_set) and
(special_event, state_set) pairs.
`None` is a constant now, so this is a useless (de-)optimisation. Actually, in case `self.special` is an actual `dict`, it is even entirely unnecessary to pass the argument into `.get()` at all.
else:
self.get_special(event).update(new_set)
+ def get_epsilon(self):
"""
Return the mapping for epsilon, or None.
"""
+ return self.special.get('')
+ def iteritems(self,
+ len=len):
"""
Return the mapping as an iterable of ((code1, code2), state_set) and
(special_event, state_set) pairs. |
codereview_python_data_9321 | return coords
-def minimizing_vector(reference_point, ctrpos, box, backend="serial"):
dx = reference_point - ctrpos
if len(dx) == 0:
return dx
I am not sure the name is explicit enough as many things are vectors and minimizing may mean various things. `minimize_periodic_vector` perhaps?
return coords
+def minimize_periodic_vector(reference_point, ctrpos, box, backend="serial"):
dx = reference_point - ctrpos
if len(dx) == 0:
return dx |
codereview_python_data_9324 | for i, r in enumerate(rest_results): # user-added return values
rest_results[i] = images_to_levels(r, num_level_anchors)
- return (labels_list, label_weights_list, bbox_targets_list,
- bbox_weights_list, num_total_pos, num_total_neg) \
- + tuple(rest_results)
def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples):
res is not used later.
for i, r in enumerate(rest_results): # user-added return values
rest_results[i] = images_to_levels(r, num_level_anchors)
+ return res + tuple(rest_results)
def loss_single(self, cls_score, bbox_pred, anchors, labels, label_weights,
bbox_targets, bbox_weights, num_total_samples): |
codereview_python_data_9325 | else:
LOG.info('Unsupported Events rule target ARN "%s"' % arn)
- return
def create_sqs_queue(queue_name, env=None):
nitpick: `return` technically not required
else:
LOG.info('Unsupported Events rule target ARN "%s"' % arn)
def create_sqs_queue(queue_name, env=None): |
codereview_python_data_9328 | from celery.utils.functional import noop
-from case import skip
-
-@skip.if_pypy()
class test_thread_TaskPool:
def test_on_apply(self):
Why do we need to skip if PyPy is used. PyPy 2 should be able to use ThreadPoolExecutor and PyPy 3 comes with it.
from celery.utils.functional import noop
class test_thread_TaskPool:
def test_on_apply(self): |
codereview_python_data_9333 | extras = ', ' + extras if extras else ''
apply_args= 'hv.{class_name}{extras}'.format(class_name=class_name,
extras=extras)
- msg = "Cannot construct a {class_name} from the supplied object of type DynamicMap. Implicitly creating a DynamicMap of {class_name} objects, but instead please explicitly call .apply({apply_args}) on the supplied DynamicMap."
- cls.warning(cls, msg.format(class_name=class_name, apply_args=apply_args))
return data.apply(cls, per_element=True, kdims=kdims, vdims=vdims, **kwargs)
else:
return super(Dataset, cls).__new__(cls)
```suggestion msg = "Cannot construct a {class_name} from the supplied object of type DynamicMap. Implicitly creating a DynamicMap of {class_name} objects, but instead please explicitly call .apply({apply_args}) on the supplied DynamicMap." ```
extras = ', ' + extras if extras else ''
apply_args= 'hv.{class_name}{extras}'.format(class_name=class_name,
extras=extras)
+ msg = "Cannot construct a {class_name} from the supplied object of type DynamicMap. Implicitly creating a DynamicMap of {class_name} objects, but instead please explicitly call .apply({apply_args}) on the supplied DynamicMap."
+ cls.param.warning(cls, msg.format(class_name=class_name, apply_args=apply_args))
return data.apply(cls, per_element=True, kdims=kdims, vdims=vdims, **kwargs)
else:
return super(Dataset, cls).__new__(cls) |
codereview_python_data_9340 | if count == 0 and not self._zero_count:
raise cmdexc.PrerequisitesError(
- "{}: Argument zero_count not been set to true!".format(
- self.name))
if self.deprecated:
message.warning(win_id, '{} is deprecated - {}'.format(
This is the error displayed to the user, so this would be a confusing output. I think something like "{}: A zero count is not allowed for this command!" would be better.
if count == 0 and not self._zero_count:
raise cmdexc.PrerequisitesError(
+ "{}: A zero count is not allowed for this command!"
+ .format(self.name))
if self.deprecated:
message.warning(win_id, '{} is deprecated - {}'.format( |
codereview_python_data_9343 | if subfield_len != 2:
raise ValueError("Wrong BC payload length")
if block_size is not None:
- raise AttributeError("Two BC subfields?")
block_size = struct.unpack("<H", subfield_data)[0] + 1 # uint16_t
assert x_len == extra_len, (x_len, extra_len)
if block_size is None:
Why an AttributeError?
if subfield_len != 2:
raise ValueError("Wrong BC payload length")
if block_size is not None:
+ raise ValueError("Two BC subfields?")
block_size = struct.unpack("<H", subfield_data)[0] + 1 # uint16_t
assert x_len == extra_len, (x_len, extra_len)
if block_size is None: |
codereview_python_data_9356 | await self._load_payloads(plug)
await self._load_abilities(plug)
await self._load_objectives(plug)
- await self._load_packers(plug)
await self._load_adversaries(plug)
- await self._load_sources(plug)
await self._load_planners(plug)
await self._load_extensions()
await self._verify_data_sets()
except Exception as e:
183 is there to prevent plugin load order issues. I'm not seeing a change that would ensure that removing 183 wouldn't prevent those issues from happening again
await self._load_payloads(plug)
await self._load_abilities(plug)
await self._load_objectives(plug)
await self._load_adversaries(plug)
await self._load_planners(plug)
+ await self._load_sources(plug)
+ await self._load_packers(plug)
await self._load_extensions()
await self._verify_data_sets()
except Exception as e: |
codereview_python_data_9360 | @pytest.mark.usefixtures("maybe_run_functions_eagerly")
-@pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64])
-def test_filter_response_normalization_save_h5(dtype, tmpdir):
input_layer = tf.keras.layers.Input(shape=(32, 32, 3))
frn = FilterResponseNormalization()(input_layer)
model = tf.keras.Model(input_layer, frn)
filepath = str(tmpdir / "test.h5")
- model.save(filepath)
Is this specific to h5?
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
+def test_filter_response_normalization_save(tmpdir):
input_layer = tf.keras.layers.Input(shape=(32, 32, 3))
frn = FilterResponseNormalization()(input_layer)
model = tf.keras.Model(input_layer, frn)
filepath = str(tmpdir / "test.h5")
+ model.save(filepath, save_format="h5")
+ filepath = str(tmpdir / "test")
+ model.save(filepath, save_format="tf") |
codereview_python_data_9367 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4457-SEA 1645542722 1213054291</p>
<hr>
<p>Varnish cache server</p>
</body>
what does dml stand for?
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4448-SEA 1645542722 2410609118</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_9371 | y1 = cy - 0.5 * h
y2 = cy + 0.5 * h
if bboxes.size(1) == 5:
- bboxes_ = torch.stack([inds_, x1, y1, x2, y2], dim=-1)
else:
- bboxes_ = torch.stack([x1, y1, x2, y2], dim=-1)
- return bboxes_
bboxes_ -> rescaled_bboxes
y1 = cy - 0.5 * h
y2 = cy + 0.5 * h
if bboxes.size(1) == 5:
+ rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1)
else:
+ rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
+ return rescaled_bboxes |
codereview_python_data_9374 | return plugin
return None
- def check_repeatable_abilities(self, ram):
- return any(ab.repeatable for ab_id in self.atomic_ordering for ab in ram['abilities'] if ab.ability_id == ab_id)
This is a very minor thing, but it might be more efficient and cleaner to just pass a list of abilities extracted from ram rather than the entire ram object. If the general consensus is that it doesn't really matter, that's fine, but I think filtering to abilities when calling the method and renaming ram to ability_list or the like might be cleaner.
return plugin
return None
+ def check_repeatable_abilities(self, ability_list):
+ return any(ab.repeatable for ab_id in self.atomic_ordering for ab in ability_list if ab.ability_id == ab_id) |
codereview_python_data_9378 | Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
- thresh (float, optional): If not None, threshold for prediction
- scores under this value being incorrect. Default to None.
"""
super().__init__()
self.topk = topk
Need to also modify here.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
+ thresh (float, optional): If not None, predictions with scores
+ under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk |
codereview_python_data_9382 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4471-SEA 1645545920 4126993929</p>
<hr>
<p>Varnish cache server</p>
</body>
The difference between `etypes` and `canonical_etypes` is pretty subtle, and I'm concerned could be a source of errors for non DGLHeteroGraph implementations. Can we simplify this interface to only support canonical_etypes? Or have a separate set of functions for querying graph metadata? e.g.: ``` def get_src_type(self, etype): ... def get_dst_type(self, etype): ... ``` This would require every etype to be unique however.
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4449-SEA 1645545920 1526457240</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_9389 | else:
msg = 'API Gateway action uri "%s" not yet implemented' % uri
LOGGER.warning(msg)
- return make_error(msg, 407)
elif integration['type'] == 'HTTP':
function = getattr(requests, method.lower())
HTTP code `407` is "Proxy Authentication Required" - wondering what is the rationale behind this change? Perhaps `501` (Not Implemented) would be more appropriate?
else:
msg = 'API Gateway action uri "%s" not yet implemented' % uri
LOGGER.warning(msg)
+ return make_error(msg, 404)
elif integration['type'] == 'HTTP':
function = getattr(requests, method.lower()) |
codereview_python_data_9390 | import os
import copy
-from functools import partial
import pytest
Is this needed?
import os
import copy
import pytest |
codereview_python_data_9393 | name = fullname.name
parts = name.split('@', 1)
if len(parts) == 2:
- res = name_from_string(unmangle_name(parts[0]))
- if isinstance(fullname, QualName) and isinstance(res, UnqualName):
- res = QualName(module=fullname.module, name=res.name)
- return res
else:
return fullname
Hmm. Why is this upcasting to `QualName` needed? `shortname_from_fullname` should be the reverse of `derive_name`.
name = fullname.name
parts = name.split('@', 1)
if len(parts) == 2:
+ return name_from_string(unmangle_name(parts[0]))
else:
return fullname |
codereview_python_data_9394 | # the context of the script that starts the Docker container
ENV_SCRIPT_STARTING_DOCKER = "LS_SCRIPT_STARTING_DOCKER"
-# event used to synchronize shutdown signals and handlers
-SHUTDOWN_EVENT = threading.Event()
-SHUTDOWN_EVENT_LOCK = threading.RLock()
-
def log_duration(name=None, min_ms=500):
"""Function decorator to log the duration of function invocations."""
do we need the module level members here? Seems we could localize them into the method.
# the context of the script that starts the Docker container
ENV_SCRIPT_STARTING_DOCKER = "LS_SCRIPT_STARTING_DOCKER"
def log_duration(name=None, min_ms=500):
"""Function decorator to log the duration of function invocations.""" |
codereview_python_data_9402 | """NeXML Tree object."""
def __init__(self, root=None, rooted=False, id=None, name=None, weight=1.0):
- """Initialize paramters for NeXML tree object."""
BaseTree.Tree.__init__(self, root=root or Clade(),
rooted=rooted, id=id, name=name)
self.weight = weight
\*parameters, or: "Instantiate a NeXML tree object with the given parameters."
"""NeXML Tree object."""
def __init__(self, root=None, rooted=False, id=None, name=None, weight=1.0):
+ """Instantiate a NeXML tree object with the given parameters."""
BaseTree.Tree.__init__(self, root=root or Clade(),
rooted=rooted, id=id, name=name)
self.weight = weight |
codereview_python_data_9408 | pass
- @property
- def ylim(self):
- ydata = self.data[:, 1]
- return min(ydata), max(ydata)
-
-
@property
def range(self):
"""
We may need to think about what the semantics of `__getitem__` should be...
pass
@property
def range(self):
""" |
codereview_python_data_9410 | dockerfile = '''
FROM {container_image}:{container_version}
{container_environment}
- RUN bash -c 'if [ -x "$(command -v apt-get)" ]; then apt-get update && apt-get install -y python sudo which; fi'
RUN bash -c 'if [ -x "$(command -v yum)" ]; then yum makecache fast && yum update -y && yum install -y python sudo which; fi'
- RUN bash -c 'if [ -x "$(command -v zypper)" ]; then zypper refresh && zypper update -y && zypper install -y python sudo which; fi'
''' # noqa
Apt doesn't have a package named `which`. It is part of debianutils. ```debianutils: /bin/which```
dockerfile = '''
FROM {container_image}:{container_version}
{container_environment}
+ RUN bash -c 'if [ -x "$(command -v apt-get)" ]; then apt-get update && apt-get install -y python sudo; fi'
RUN bash -c 'if [ -x "$(command -v yum)" ]; then yum makecache fast && yum update -y && yum install -y python sudo which; fi'
+ RUN bash -c 'if [ -x "$(command -v zypper)" ]; then zypper refresh && zypper update -y && zypper install -y python sudo; fi'
''' # noqa |
codereview_python_data_9423 | gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
- gt_bboxes_ignore (None | list[Tensor], optional): Bounding boxes
- which can be ignored for each image.
Returns:
dict[str, Tensor]: A dictionary of loss components.
(list[Tensor], optional): xxxx. Defaults: None.
gt_labels_list (list[Tensor]): Ground truth class indices for each
image with shape (num_gts, ).
img_metas (list[dict]): List of image meta information.
+ gt_bboxes_ignore (list[Tensor], optional): Bounding boxes
+ which can be ignored for each image. Default None.
Returns:
dict[str, Tensor]: A dictionary of loss components. |
codereview_python_data_9447 | # built documents.
#
# The short X.Y version.
-version = '1.1.0'
# The full version, including alpha/beta/rc tags.
-release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
We should prob update this to use `molecule.__version__`.
# built documents.
#
# The short X.Y version.
+sys.path.insert(0, os.path.abspath('.'))
+import molecule
+version = molecule.__version__
# The full version, including alpha/beta/rc tags.
+release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages. |
codereview_python_data_9452 | yield dataset
def iter_appengineapps(self, projectid):
response = self.appengine.get_app(projectid)
- return
- yield
def iter_cloudsqlinstances(self, projectid):
result = self.cloudsql.get_instances(projectid)
This is never going to return anything
yield dataset
def iter_appengineapps(self, projectid):
+ """ TO DO: Have to verify that the customer enabled App Engine Admin
+ API before creating inventory
+ """
response = self.appengine.get_app(projectid)
+ if not response:
+ return
+ yield
+ yield response
def iter_cloudsqlinstances(self, projectid):
result = self.cloudsql.get_instances(projectid) |
codereview_python_data_9458 | min_levels=argmin)
num_pos = torch.cat(pos_inds, 0).sum().float()
acc = self.calculate_accuracy(cls_scores, labels_list, pos_inds)
for i in range(len(losses_cls)):
- if num_pos == 0:
- avg_factor = num_pos + float(num_total_neg)
- else:
- avg_factor = num_pos
losses_cls[i] /= avg_factor
losses_bbox[i] /= avg_factor
return dict(
The computation of `avg_factor` can be moved out of the for-loop.
min_levels=argmin)
num_pos = torch.cat(pos_inds, 0).sum().float()
acc = self.calculate_accuracy(cls_scores, labels_list, pos_inds)
+
+ if num_pos == 0: # No gt
+ avg_factor = num_pos + float(num_total_neg)
+ else:
+ avg_factor = num_pos
for i in range(len(losses_cls)):
losses_cls[i] /= avg_factor
losses_bbox[i] /= avg_factor
return dict( |
codereview_python_data_9464 | if field != " ":
if field == "H":
# The hetero field consists of H_ + the residue name (e.g. H_FUC)
- field = ("H_" + resname).replace(" ", "")
res_id = (field, resseq, icode)
if field == " ":
if self.chain.has_id(res_id):
I would rather use `str.strip` here than `replace`. More explicit. You can call it on the residue name only.
if field != " ":
if field == "H":
# The hetero field consists of H_ + the residue name (e.g. H_FUC)
+ field = "H_" + resname.strip()
res_id = (field, resseq, icode)
if field == " ":
if self.chain.has_id(res_id): |
codereview_python_data_9470 | keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
- When your model is very large and cause the memory error, you can try to set this to ```True```, to avoid the model_to_string conversion.
You can still use _InnerPredictor as ``init_model`` for future continue training.
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration.
Just fix RST markdown and even more words about conversion. ```suggestion When your model is very large and cause the memory error, you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``. ```
keep_training_booster : bool, optional (default=False)
Whether the returned Booster will be used to keep training.
If False, the returned value will be converted into _InnerPredictor before returning.
+ When your model is very large and cause the memory error, you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``.
You can still use _InnerPredictor as ``init_model`` for future continue training.
callbacks : list of callables or None, optional (default=None)
List of callback functions that are applied at each iteration. |
codereview_python_data_9478 | # ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
-"""Modin benchmarks with MODIN_CPUS change"""
```suggestion """Benchmarks measuring how Modin performance scales when MODIN_CPUS are changed""" ```
# ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
+"""Benchmarks measuring how Modin performance scales when MODIN_CPUS are changed""" |
codereview_python_data_9482 | Parameters
----------
- edges : tensor
- The edge ID array.
- eid : Int Tensor
Each element is an ID. The tensor must have the same device type
and ID data type as the graph's.
There is no eid in the param list of find_edges.
Parameters
----------
+ edges : Int Tensor
Each element is an ID. The tensor must have the same device type
and ID data type as the graph's. |
codereview_python_data_9491 | ("upstream_missing_dependency", 'had missing external dependencies'),
("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'),
("upstream_scheduling_error", 'had dependencies whose scheduling failed'),
- ("unknown_reason", 'did not run successfully because of unknown reason'),
))
When the task fails `run()` we do get the reason back, as the exception, right? Was there a reason to alter wording "were left pending"?
("upstream_missing_dependency", 'had missing external dependencies'),
("upstream_run_by_other_worker", 'had dependencies that were being run by other worker'),
("upstream_scheduling_error", 'had dependencies whose scheduling failed'),
+ ("not_run", 'was not granted run permission by the scheduler'),
)) |
codereview_python_data_9492 | def dynamic_validation(self, request: Request):
# we can not add a Claim Def with existent ISSUER_DID
# sine a Claim Def needs to be identified by seqNo
- self._validate_type(request)
identifier, req_id, operation = request.identifier, request.reqId, request.operation
ref = operation[REF]
try:
Please use the latest code from master with the latest Validator approach for every txn type
def dynamic_validation(self, request: Request):
# we can not add a Claim Def with existent ISSUER_DID
# sine a Claim Def needs to be identified by seqNo
+ self._validate_request_type(request)
identifier, req_id, operation = request.identifier, request.reqId, request.operation
ref = operation[REF]
try: |
codereview_python_data_9500 | nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
- score_cofficient (Tensor): multipily with score to help nms sorting
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels
1. `score_cofficient` -> `score_factors` 2. `The factors multiplied to scores before applying NMS`
nms_thr (float): NMS IoU threshold
max_num (int): if there are more than max_num bboxes after NMS,
only top max_num will be kept.
+ score_factors (Tensor): The factors multiplied to scores before
+ applying NMS
Returns:
tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels |
codereview_python_data_9501 | """Error indicating an event is malformed."""
class InvalidFilter(Error):
- """Error indicating an invalid filter was specifiedd."""
class MalformedPresetError(Error):
"""Raised when a parser preset definition is malformed."""
Please fix typo
"""Error indicating an event is malformed."""
class InvalidFilter(Error):
+ """Error indicating an invalid filter was specified."""
class MalformedPresetError(Error):
"""Raised when a parser preset definition is malformed.""" |
codereview_python_data_9502 | create_inventory_parser.add_argument(
'import_as',
metavar=('MODEL_NAME',),
help='Import the inventory when complete, requires a model name')
create_inventory_parser.add_argument(
'--background',
Is this what we want? shouldn't the first (positional) argument be the model name?
create_inventory_parser.add_argument(
'import_as',
metavar=('MODEL_NAME',),
+ nargs='?',
help='Import the inventory when complete, requires a model name')
create_inventory_parser.add_argument(
'--background', |
codereview_python_data_9503 | action='store_true',
help='Bypass Cloud Shell requirement')
parser.add_argument('--service-account-key-path',
- help='Absolute path for service account key file')
parser.add_argument('--advanced',
action='store_true',
help='Advanced setup mode (more options)')
Can you please add into the help, that the filename is also needed? Something like: ``` help='Absolute path and filename for service account key file' ```
action='store_true',
help='Bypass Cloud Shell requirement')
parser.add_argument('--service-account-key-path',
+ help=('Absolute path and filename for service account '
+ 'key file'))
parser.add_argument('--advanced',
action='store_true',
help='Advanced setup mode (more options)') |
codereview_python_data_9505 | @classmethod
def from_string(cls, treetext):
- """Convert File handle to StringIO obejct."""
handle = StringIO(treetext)
return cls(handle)
As above: "Instantiate the Newick Tree class from the given string."
@classmethod
def from_string(cls, treetext):
+ """Instantiate the Newick Tree class from the given string."""
handle = StringIO(treetext)
return cls(handle) |
codereview_python_data_9511 | node_pubkey (str): base58 encoded public key.
Returns:
- The last block id the node has voted on. If the node didn't cast
any vote then the genesis block id is returned.
"""
Really small detail, but it seems to me that it would be more accurate to write something like: > The id of the last block the node has voted on. Otherwise, "the last block id the node has voted on" sounds like the vote is being done on the id rather than on the block itself. @ttmc thoughts?
node_pubkey (str): base58 encoded public key.
Returns:
+ The id of the last block the node has voted on. If the node didn't cast
any vote then the genesis block id is returned.
""" |
codereview_python_data_9521 | 'job': target_job.job,
'weight': target_job.weight
}
- rows.append(big_query.Insert(row, None))
client = big_query.Client(dataset_id='main', table_id='fuzzer_weights')
client.insert(rows)
row=row, insert_id=None Also, is it ok to ignore insert_id ?
'job': target_job.job,
'weight': target_job.weight
}
+ rows.append(big_query.Insert(row=row, insert_id=None))
client = big_query.Client(dataset_id='main', table_id='fuzzer_weights')
client.insert(rows) |
codereview_python_data_9522 | for client_id, _ in client_namebook.items():
register_res = rpc.ClientRegisterResponse(client_id)
rpc.send_response(client_id, register_res)
- server_state = ServerState(kv_server)
# main service loop
while True:
req, client_id = rpc.recv_request()
i don't think you can create server state here. what should `get_server_state` returns?
for client_id, _ in client_namebook.items():
register_res = rpc.ClientRegisterResponse(client_id)
rpc.send_response(client_id, register_res)
# main service loop
while True:
req, client_id = rpc.recv_request() |
codereview_python_data_9526 | direction = direction if isinstance(direction, list) else [direction]
for d in direction:
- assert d in ['horizontal', 'vertical', 'horizontal+vertical']
self.direction = direction
assert len(self.flip_ratio) == len(self.direction)
Illustrate what it means when it is a list.
direction = direction if isinstance(direction, list) else [direction]
for d in direction:
+ assert d in ['horizontal', 'vertical', 'diagonal']
self.direction = direction
assert len(self.flip_ratio) == len(self.direction) |
codereview_python_data_9532 | if self._content_consumed and isinstance(self._content, bool):
raise RuntimeError(
'The content for this response was already consumed')
- else:
- # simulate reading small chunks of the content
- reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
No need for the `else` or the subsequent indentation, we won't get this far if we raise anyway.
if self._content_consumed and isinstance(self._content, bool):
raise RuntimeError(
'The content for this response was already consumed')
+ # simulate reading small chunks of the content
+ reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate() |
codereview_python_data_9538 | from google.cloud.forseti.services.inventory.base import crawler
from google.cloud.forseti.services.inventory.base import gcp
-from google.cloud.forseti.services.inventory.base import resources
from google.cloud.forseti.common.util import logger
LOGGER = logger.get_logger(__name__)
Please sort this import line to be alphabetical order.
from google.cloud.forseti.services.inventory.base import crawler
from google.cloud.forseti.services.inventory.base import gcp
from google.cloud.forseti.common.util import logger
+from google.cloud.forseti.services.inventory.base import resources
LOGGER = logger.get_logger(__name__) |
codereview_python_data_9545 | :return:
"""
if element.tag == "ThreadGroup":
- concurrency = self._get_option_string_with_default(element, 'ThreadGroup.num_threads', "concurrency", 1)
else:
- concurrency = self._get_option_string_with_default(element, 'TargetLevel', "concurrency", 1)
self.log.debug('Got %s for concurrency in %s (%s)', concurrency, element.tag, element.get("testname"))
return concurrency
It's better to set 'field_name' according to element.tag and set concurrency later isn't it?
:return:
"""
if element.tag == "ThreadGroup":
+ concurrency_tag_name = 'ThreadGroup.num_threads'
+ concurrency = self._get_option_string_with_default(element, concurrency_tag_name, "concurrency", 1)
else:
+ concurrency_tag_name = 'TargetLevel'
+ concurrency = self._get_option_string_with_default(element, concurrency_tag_name, "concurrency", 1)
self.log.debug('Got %s for concurrency in %s (%s)', concurrency, element.tag, element.get("testname"))
return concurrency |
codereview_python_data_9547 | with_label=True,
test_mode=False,
extra_aug=None,
- keep_ratio_rescale=True):
# load annotations (and proposals)
self.img_infos = self.load_annotations(ann_file)
if proposal_file is not None:
Maybe `resize_keep_ratio` sounds better.
with_label=True,
test_mode=False,
extra_aug=None,
+ resize_keep_ratio=True):
# load annotations (and proposals)
self.img_infos = self.load_annotations(ann_file)
if proposal_file is not None: |
codereview_python_data_9552 | mock_file_config)
-#@pytest.fixture(scope='function', autouse=True)
@pytest.fixture
-def restore_config(request, ignore_local_config_file, node_config):
from bigchaindb import config_utils
config_utils.set_config(node_config)
Do we need the `request` fixture?
mock_file_config)
@pytest.fixture
+def restore_config(ignore_local_config_file, node_config):
from bigchaindb import config_utils
config_utils.set_config(node_config) |
codereview_python_data_9554 | Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model. This layer supports
- arbitrary tensors.
Output shape
Same shape as input.
Mention the assumptions made: 1. Excepts the input tensor to be at least 2D. 2. Excepts the batch dim to be `0` in all cases.
Arbitrary. Use the keyword argument `input_shape`
(tuple of integers, does not include the samples axis)
when using this layer as the first layer in a model. This layer supports
+ arbitrary tensors with the following assumptions:
+ - Expected input tensor to be at least 2D.
+ - 0th index in tensor shape is expected to be the batch dimension.
Output shape
Same shape as input. |
codereview_python_data_9559 | # https://github.com/kennethreitz/requests/issues/1846
return
- if os.path.exists(loc) and not netrc_path:
netrc_path = loc
# Abort early if there isn't one.
if netrc_path is None:
- return netrc_path
ri = urlparse(url)
While we're changing this function, can we avoid this `and not netrc_path` test by `break`ing from this loop instead? Saves us some iterations, probably.
# https://github.com/kennethreitz/requests/issues/1846
return
+ if os.path.exists(loc):
netrc_path = loc
+ break
# Abort early if there isn't one.
if netrc_path is None:
+ return
ri = urlparse(url) |
codereview_python_data_9560 | warnings.simplefilter("ignore", UserWarning)
from Bio import MarkovModel
-try:
- from StringIO import StringIO
-except ImportError:
- from io import StringIO
class TestMarkovModel(unittest.TestCase):
Can you use just ``from Bio._py3k import StringIO`` here? It is shorter, and will make it slightly easier to remove all the Python 2 specific code one day.
warnings.simplefilter("ignore", UserWarning)
from Bio import MarkovModel
+from Bio._py3k import StringIO
class TestMarkovModel(unittest.TestCase): |
codereview_python_data_9561 | )
time.sleep(36000)
except NoPlayerPositionSetException:
- wait_time = config.reconnecting_timeout * 60
bot.event_manager.emit(
'api_error',
sender=bot,
We need to move the scope of this. We use it further down the line with different excepts.
)
time.sleep(36000)
except NoPlayerPositionSetException:
bot.event_manager.emit(
'api_error',
sender=bot, |
codereview_python_data_9564 | )
# Get the molecule type
- mol_type = self._get_annotation_str(record, "molecule_type", default="DNA")
if mol_type and len(mol_type) > 7:
# Deal with common cases from EMBL to GenBank
mol_type = mol_type.replace("unassigned ", "").replace("genomic ", "")
There is also "rc" (record count) as used in WGA and other GenBank files which have no actual sequence.
)
# Get the molecule type
+ mol_type = self._get_annotation_str(record, "molecule_type", None)
+ if mol_type is None:
+ raise ValueError("missing molecule_type in annotations")
if mol_type and len(mol_type) > 7:
# Deal with common cases from EMBL to GenBank
mol_type = mol_type.replace("unassigned ", "").replace("genomic ", "") |
codereview_python_data_9567 | A new dataframe with the updated labels.
"""
- def new_labels_mapper(x):
- return str(x) + str(suffix)
if axis == 0:
return self.rename(new_row_labels=new_labels_mapper)
```suggestion def new_labels_mapper(x, suffix=str(suffix)): return str(x) + suffix ``` again, small speedup
A new dataframe with the updated labels.
"""
+ def new_labels_mapper(x, suffix=str(suffix)):
+ return str(x) + suffix
if axis == 0:
return self.rename(new_row_labels=new_labels_mapper) |
codereview_python_data_9569 | num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
- labels = anchors.new_zeros(
- num_valid_anchors, dtype=torch.long) + background_label
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
We could change it to `labels = anchors.new_empty(num_valid_anchors, dtype=torch.long).fill_(background_label)`. Just to be consistent with `bbox_target`
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_empty(
+ num_valid_anchors, dtype=torch.long).fill_(background_label)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds |
codereview_python_data_9580 | @property
def branches(self):
- branches = [
FlatMapStrategy(strategy=strategy, expand=self.expand)
for strategy in self.flatmapped_strategy.branches
]
- return branches
Is there a reason this isn't just `return [...]`?
@property
def branches(self):
+ return [
FlatMapStrategy(strategy=strategy, expand=self.expand)
for strategy in self.flatmapped_strategy.branches
] |
codereview_python_data_9583 | ctx.env.schema, json_t)):
# Turn casts from json->array<T> into json->array<json>
# and array<json>->array<T>.
- _, json_array_typ = s_types.Array.create(
- ctx.env.schema, element_type=json_t)
json_array_ir = compile_cast(
ir_expr, json_array_typ, srcctx=srcctx, ctx=ctx)
return compile_cast(
```suggestion ctx.env.schema, json_array_typ = s_types.Array.create( ```
ctx.env.schema, json_t)):
# Turn casts from json->array<T> into json->array<json>
# and array<json>->array<T>.
+ ctx.env.schema, json_array_typ = s_types.Array.from_subtypes(
+ ctx.env.schema, [json_t])
json_array_ir = compile_cast(
ir_expr, json_array_typ, srcctx=srcctx, ctx=ctx)
return compile_cast( |
codereview_python_data_9584 | self.firewall_rules.rules)
def test_add_rules_from_api_add_rule_false(self):
- """Validate that add_rules_from_api adds no rules when callback returns false.
Setup:
* Break the mock current firewall rules into two pages to validate
Nit: Reduce so this fits on one line without wrapping. Maybe: Validate function adds no rules when callback returns false.
self.firewall_rules.rules)
def test_add_rules_from_api_add_rule_false(self):
+ """Validate function adds no rules when callback returns false.
Setup:
* Break the mock current firewall rules into two pages to validate |
codereview_python_data_9593 | event = lambda x: x['IP'].dport
if dst is None:
dst = lambda x: x['IP'].dst
- sl = {} # type: Dict[Any, Tuple[int, Any]]
- el = {} # type: Dict[Any, Tuple[int, Any]]
- dl = {} # type: Dict[Any, Tuple[int, Any]]
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i)
sl, `Dict[IPField, Tuple[int, List[ShortEnumField]]]`, dl as `Dict[IPField, ShortEnumField]`, and el as `Dict[ShortEnumField, Tuple[int, List[IPField]]]`.
event = lambda x: x['IP'].dport
if dst is None:
dst = lambda x: x['IP'].dst
+ sl = {} # type: Dict[IPField, Tuple[int, List[ShortEnumField]]]
+ el = {} # type: Dict[ShortEnumField, Tuple[int, List[IPField]]]
+ dl = {} # type: Dict[IPField, ShortEnumField]
for i in self.res:
try:
s, e, d = src(i), event(i), dst(i) |
codereview_python_data_9594 | j + 1, i + 1))
print('this node has {} edgs.'.format(
nrow[1]))
-
- # Add self loops
- if self.self_loop:
- m_edges += n_nodes
- g.add_edges(F.arange(0, n_nodes), F.arange(0, n_nodes))
if nattrs != []:
nattrs = np.stack(nattrs)
Should not be introduced in this PR, please remove.
j + 1, i + 1))
print('this node has {} edgs.'.format(
nrow[1]))
if nattrs != []:
nattrs = np.stack(nattrs) |
codereview_python_data_9596 | -import torch
-
from mmdet.core import bbox2result
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
We should move this part into head rather than in the detector.
from mmdet.core import bbox2result
from ..builder import DETECTORS
from .single_stage import SingleStageDetector |
codereview_python_data_9597 | # coding=utf-8
import unittest
import re
-from time import sleep
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
When exception happens, message has to contain error message
# coding=utf-8
import unittest
+import os
import re
+from time import sleep, time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException |
codereview_python_data_9604 | def reset_states(self) -> None:
# The state of the metric will be reset at the start of each epoch.
reset_value = 0
- self.squared_sum.assign(reset_value)
- self.sum.assign(reset_value)
- self.res.assign(reset_value)
- self.count.assign(reset_value)
using `0` directly will not have duplicate initial value generation call :-)
def reset_states(self) -> None:
# The state of the metric will be reset at the start of each epoch.
reset_value = 0
+ K.batch_set_value([(v, 0) for v in self.variables]) |
codereview_python_data_9606 | p_websocket_server.start()
# connect to tendermint event stream
- p_websocket_client = mp.Process(name='ws_client',
target=event_stream.start,
args=(exchange.get_publisher_queue(),))
p_websocket_client.start()
`ws_to_tendermint`? This would make easier to know what the process is doing when doing a `top` from the terminal.
p_websocket_server.start()
# connect to tendermint event stream
+ p_websocket_client = mp.Process(name='ws_to_tendermint',
target=event_stream.start,
args=(exchange.get_publisher_queue(),))
p_websocket_client.start() |
codereview_python_data_9610 | )
# Check that disallowed things are indeed absent
if not allow_newaxis:
- if hasattr(indexer, "__len__"):
assert 0 <= len(indexer) <= len(shape) + int(allow_ellipsis)
else:
assert 1 <= len(shape) + int(allow_ellipsis)
```suggestion if isinstance(indexer, tuple): ``` Again, we might as well keep things simple :slightly_smiling_face:
)
# Check that disallowed things are indeed absent
if not allow_newaxis:
+ if isinstance(indexer, tuple):
assert 0 <= len(indexer) <= len(shape) + int(allow_ellipsis)
else:
assert 1 <= len(shape) + int(allow_ellipsis) |
codereview_python_data_9611 | return _validUpgrade
-def testNodeUpgradeScheduledOnProperDate(poolNodesStarted,
- poolUpgradeScheduled):
# Verify that the upgrade is scheduled in approximately 5 days for each node
now = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
for node in poolNodesStarted.nodes.values():
Why did we rename it? We test POOL_UPGRADE here, don't we?
return _validUpgrade
+def test_node_upgrade_scheduled_on_proper_date(poolNodesStarted,
+ poolUpgradeScheduled):
# Verify that the upgrade is scheduled in approximately 5 days for each node
now = datetime.utcnow().replace(tzinfo=dateutil.tz.tzutc())
for node in poolNodesStarted.nodes.values(): |
codereview_python_data_9614 | Returns:
object: Generator yielding access tuples.
- :param expand_resources:
"""
request = explain_pb2.GetAccessByPermissionsRequest(
This might be a copy/paste mistake, I don't think it is part of the return value.
Returns:
object: Generator yielding access tuples.
"""
request = explain_pb2.GetAccessByPermissionsRequest( |
codereview_python_data_9625 | # TODO Consider not installing the db drivers, or putting them in extras.
'rethinkdb~=2.3', # i.e. a version between 2.3 and 3.0
'pymongo~=3.4',
- 'pysha3==1.0b1',
'cryptoconditions>=0.5.0',
'statsd>=3.2.1',
'python-rapidjson>=0.0.8',
Why not go with the latest version `1.0.0`
# TODO Consider not installing the db drivers, or putting them in extras.
'rethinkdb~=2.3', # i.e. a version between 2.3 and 3.0
'pymongo~=3.4',
+ 'pysha3==1.0.0',
'cryptoconditions>=0.5.0',
'statsd>=3.2.1',
'python-rapidjson>=0.0.8', |
codereview_python_data_9629 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4483-SEA 1645521666 545558277</p>
<hr>
<p>Varnish cache server</p>
</body>
nit: Can we import these constants from `test_lambda.py` instead of duplicating the code?
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4437-SEA 1645521666 1505844791</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_9630 | self.g.set_n_repr({'h' : features})
for layer in self.layers:
# apply dropout
- if self.dropout:
- self.g.apply_nodes(apply_node_func=
- lambda node: {'h': F.dropout(node['h'], p=self.dropout, training=self.training)})
self.g.update_all(gcn_msg, gcn_reduce, layer)
return self.g.pop_n_repr('h')
def main(args):
# load and preprocess dataset
data = load_data(args)
features = torch.FloatTensor(data.features)
Will it be cleaner if we do not directly use F.dropout, but define a dropout layer instead?
self.g.set_n_repr({'h' : features})
for layer in self.layers:
# apply dropout
self.g.update_all(gcn_msg, gcn_reduce, layer)
return self.g.pop_n_repr('h')
def main(args):
# load and preprocess dataset
+ # Todo: adjacency normalization
data = load_data(args)
features = torch.FloatTensor(data.features) |
codereview_python_data_9636 | if skip_footer != 0:
skipfooter = skip_footer
ErrorMessage.default_to_pandas("`read_excel`")
- parsed = pandas.read_excel(
io,
sheet_name=sheet_name,
header=header,
This should convert each of the `pandas.DataFrame` into `modin.pandas.DataFrame`. You can use `cls.from_pandas` to do it.
if skip_footer != 0:
skipfooter = skip_footer
ErrorMessage.default_to_pandas("`read_excel`")
+ intermediate = pandas.read_excel(
io,
sheet_name=sheet_name,
header=header, |
codereview_python_data_9643 | from_email='pontoon@mozilla.com',
to=settings.PROJECT_MANAGERS,
cc=locale.managers_group.user_set.exclude(pk=user.pk)
- .values_list('email', flat=True) if type is 'projects' else '',
reply_to=[user.email],
).send()
else:
We should also CC locale managers if type is 'projects', so you could change this to: ```python if type is not None else '' ```
from_email='pontoon@mozilla.com',
to=settings.PROJECT_MANAGERS,
cc=locale.managers_group.user_set.exclude(pk=user.pk)
+ .values_list('email', flat=True) if _type is not None else '',
reply_to=[user.email],
).send()
else: |
codereview_python_data_9644 | gcb=None,
stage_with_gcb=(False, False, False, False),
gen_attention=None,
- stage_with_gen_attention=[[], [], [], []],
with_cp=False,
zero_init_residual=True):
super(ResNet, self).__init__()
It is better use tuple instead of list (or mutable types) for default arguments.
gcb=None,
stage_with_gcb=(False, False, False, False),
gen_attention=None,
+ stage_with_gen_attention=((), (), (), ()),
with_cp=False,
zero_init_residual=True):
super(ResNet, self).__init__() |
codereview_python_data_9646 | LOG = get_logger('system')
-class ThriftAuthHelper:
def __init__(self, protocol, host, port, uri,
session_token=None):
self.__host = host
I think we should use the new-style class `ThriftAuthHelper(object)`
LOG = get_logger('system')
+class ThriftAuthHelper(object):
def __init__(self, protocol, host, port, uri,
session_token=None):
self.__host = host |
codereview_python_data_9649 | import matplotlib.pyplot as plt
rmsd = R.rmsd.T # transpose makes it easier for plotting
- time = rmsd[1]facebook.com
fig = plt.figure(figsize=(4,4))
ax = fig.add_subplot(111)
ax.plot(time, rmsd[2], 'k-', label="all")
There's a random "facebook.com" in the docs. Please remove.
import matplotlib.pyplot as plt
rmsd = R.rmsd.T # transpose makes it easier for plotting
+ time = rmsd[1]
fig = plt.figure(figsize=(4,4))
ax = fig.add_subplot(111)
ax.plot(time, rmsd[2], 'k-', label="all") |
codereview_python_data_9652 | """Deprecated optimizer hook for distributed training"""
def __init__(self, *args, **kwargs):
- warnings.warn(
- '"DistOptimizerHook" is deprecated, please switch to'
- '"mmcv.runner.OptimizerHook".', DeprecationWarning)
super().__init__(*args, **kwargs)
`DeprecationWarning` is ignored by default, we may just use a `UserWarning` by leaving the second argument empty.
"""Deprecated optimizer hook for distributed training"""
def __init__(self, *args, **kwargs):
+ warnings.warn('"DistOptimizerHook" is deprecated, please switch to'
+ '"mmcv.runner.OptimizerHook".')
super().__init__(*args, **kwargs) |
codereview_python_data_9666 | with self.bot.database as conn:
cur = conn.cursor()
cur.execute(
- "SELECT DISTINCT COUNT(encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')")
catch_day = cur.fetchone()[0]
- cur.execute("SELECT DISTINCT COUNT(pokestop) FROM pokestop_log WHERE dated >= datetime('now','-1 day')")
ps_day = cur.fetchone()[0]
res = (
"*" + self.bot.config.username + "*",
incorrect sql. should be "SELECT COUNT(DISTINCT encounter_id)... and "SELECT COUNT(pokestop) That was changed recently
with self.bot.database as conn:
cur = conn.cursor()
cur.execute(
+ "SELECT COUNT(DISTINCT encounter_id) FROM catch_log WHERE dated >= datetime('now','-1 day')")
catch_day = cur.fetchone()[0]
+ cur.execute("SELECT COUNT(pokestop) FROM pokestop_log WHERE dated >= datetime('now','-1 day')")
ps_day = cur.fetchone()[0]
res = (
"*" + self.bot.config.username + "*", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.