id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_1385 | data, csvfile = get_airdrop_data(protocol_name, data_dir)
for row in data:
if len(row) < 2:
- raise InvalidData(f'Airdrop for {protocol_name} contains an invalid row {row}')
addr, amount, *_ = row
# not doing to_checksum_address() here since the file addresses are checksummed
# and doing to_checksum_address() so many times hits performance
```suggestion raise InvalidData(f'Airdrop CSV for {protocol_name} contains an invalid row: {row}') ```
data, csvfile = get_airdrop_data(protocol_name, data_dir)
for row in data:
if len(row) < 2:
+ raise UnableToDecryptRemoteData(
+ f'Airdrop CSV for {protocol_name} contains an invalid row: {row}',
+ )
addr, amount, *_ = row
# not doing to_checksum_address() here since the file addresses are checksummed
# and doing to_checksum_address() so many times hits performance |
codereview_python_data_1387 | dtypes: A `List` of `tf.DType` with the expected output types
devices: A `List` with the indexes of the devices to use
prefetch_queue_depth: `int` with the amount of prefetched batches
- num_threads: `int` with the number of reader threads in the pipeline
"""
super(DALIDataset, self).__init__()
```suggestion num_threads: `int` with the number of reader threads in the pipeline per GPU ```
dtypes: A `List` of `tf.DType` with the expected output types
devices: A `List` with the indexes of the devices to use
prefetch_queue_depth: `int` with the amount of prefetched batches
+ num_threads: `int` with the number of reader threads in the pipeline per GPU
"""
super(DALIDataset, self).__init__() |
codereview_python_data_1392 | Mainly used to ensure consistent and helpful error messages
"""
- err_msg = "Cannot set {attr} from {cls}. "
-
if isinstance(group, (Atom, AtomGroup)):
group_level = 1
elif isinstance(group, (Residue, ResidueGroup)):
I think I would prefer this message template to be down in the function, closer to where it is used. But it really is a matter of taste.
Mainly used to ensure consistent and helpful error messages
"""
if isinstance(group, (Atom, AtomGroup)):
group_level = 1
elif isinstance(group, (Residue, ResidueGroup)): |
codereview_python_data_1395 | coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
"""
pairs = np.empty((0, 2), dtype=np.int64)
distances = np.empty((0,), dtype=np.float64)
probably makes more sense to put this in an `else:` branch below the size checks, else it's not clear why we're defining these
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
"""
+ # Default return values (will be overwritten only if pairs are found):
pairs = np.empty((0, 2), dtype=np.int64)
distances = np.empty((0,), dtype=np.float64) |
codereview_python_data_1408 | backend=default_backend())
return Fernet(base64.urlsafe_b64encode(generated_key.derive(bytes(self.get_config('encryption_key'), 'utf-8'))))
- @staticmethod
- def _load_packers(path):
- packers = dict()
- for module in glob.iglob('%s/**.py' % path):
- packer = import_module(module.replace('/', '.').replace('\\', '.').replace('.py', ''))
- packers[packer.name] = packer
- return packers
-
async def _operate_extension(self, payload, headers):
try:
target = '.' + payload.split('.')[-1]
let's look for a more pythonic way to achieve this
backend=default_backend())
return Fernet(base64.urlsafe_b64encode(generated_key.derive(bytes(self.get_config('encryption_key'), 'utf-8'))))
async def _operate_extension(self, payload, headers):
try:
target = '.' + payload.split('.')[-1] |
codereview_python_data_1409 | try:
if request.headers.get('API_KEY') == self.get_config('api_key'):
return True
- elif 'localhost:' in request.host:
return True
await check_permission(request, group)
except (HTTPUnauthorized, HTTPForbidden):
can you make this localhost: a variable, since we're using it a few times in this module?
try:
if request.headers.get('API_KEY') == self.get_config('api_key'):
return True
+ elif self.bypass in request.host:
return True
await check_permission(request, group)
except (HTTPUnauthorized, HTTPForbidden): |
codereview_python_data_1415 | pass
def _matrix(self, options):
- """Creates a matrix for NEXUS object."""
if not self.ntax or not self.nchar:
raise NexusError('Dimensions must be specified before matrix!')
self.matrix = {}
This is a private method (starts with an underscore, not one of the underscore-underscore special methods like ``__len__``), so could you add ``... (PRIVATE)."""``` to the end of the line: ```python """Creates a matrix for NEXUS object (PRIVATE).""" ```
pass
def _matrix(self, options):
+ """Creates a matrix for NEXUS object (PRIVATE)"""
if not self.ntax or not self.nchar:
raise NexusError('Dimensions must be specified before matrix!')
self.matrix = {} |
codereview_python_data_1419 | def test_cylayer(self, universe, selstr):
sel = universe.select_atoms(selstr)
assert_equal(len(sel), 88)
empty = universe.select_atoms('cylayer 4.0 6.0 10 -10 name NOT_A_NAME')
assert_equal(len(empty), 0)
Thanks, these are thorough tests -- could you make each of these separate though? That way each function tests one specific functionality.
def test_cylayer(self, universe, selstr):
sel = universe.select_atoms(selstr)
assert_equal(len(sel), 88)
+
+ def test_empty_cylayer(self, universe):
empty = universe.select_atoms('cylayer 4.0 6.0 10 -10 name NOT_A_NAME')
assert_equal(len(empty), 0) |
codereview_python_data_1422 | url="https://github.com/modin-project/modin",
long_description=long_description,
long_description_content_type="text/markdown",
- install_requires=["pandas==0.23.4", "ray==0.6.2", "numpy<=1.15.0", "sqlalchemy>=1.2.17"],
extras_require={
# can be installed by pip install modin[dask]
"dask": ["dask==1.0.0", "distributed==1.25.0"],
I don't think we need `sqlalchemy` in `install_requires`. Users can have this installed if they wish to use it (similar to other file formats).
url="https://github.com/modin-project/modin",
long_description=long_description,
long_description_content_type="text/markdown",
+ install_requires=["pandas==0.23.4", "ray==0.6.2", "numpy<=1.15.0"],
extras_require={
# can be installed by pip install modin[dask]
"dask": ["dask==1.0.0", "distributed==1.25.0"], |
codereview_python_data_1431 | )
@click.option(
'--blotter',
- type=str,
default='default',
help="The blotter to use.",
show_default=True,
does this do anything?
)
@click.option(
'--blotter',
default='default',
help="The blotter to use.",
show_default=True, |
codereview_python_data_1457 | self._assert_ext_pkg_file_status(expected_to_be_present=False,
extension_version=extension_version)
- def test_ext_zip_file_packages_removed_in_updates_and_uninstall_case(self, *args):
test_data = WireProtocolData(DATA_FILE)
exthandlers_handler, protocol = self._create_mock(test_data, *args)
nit: update instead of updates?
self._assert_ext_pkg_file_status(expected_to_be_present=False,
extension_version=extension_version)
+ def test_ext_zip_file_packages_removed_in_update_and_uninstall_case(self, *args):
test_data = WireProtocolData(DATA_FILE)
exthandlers_handler, protocol = self._create_mock(test_data, *args) |
codereview_python_data_1461 | # Cell
class AzureMLCallback(Callback):
"Log losses, metrics, model architecture summary to AzureML"
def before_fit(self):
self.run = Run.get_context()
Add this line here to fix your smooth loss problem: ```python order = Recorder.order+1 ```
# Cell
class AzureMLCallback(Callback):
"Log losses, metrics, model architecture summary to AzureML"
+ order = Recorder.order+1
def before_fit(self):
self.run = Run.get_context() |
codereview_python_data_1467 | return f"{stack_name_part}-{resource_id_part}-{random_id_part}"
-def pre_create_default_name(key: str):
- def _pre_create_default_name(resource_id, resources, resource_type, func, stack_name):
resource = resources[resource_id]
props = resource["Properties"]
if not props.get(key):
nit: could add a type hint to indicate that this returns a `Callable[...]` (although the parameters of the callable will likely soon be simplified/unified).
return f"{stack_name_part}-{resource_id_part}-{random_id_part}"
+def pre_create_default_name(key: str) -> Callable[[str, dict, str, dict, str], None]:
+ def _pre_create_default_name(
+ resource_id: str, resources: dict, resource_type: str, func: dict, stack_name: str
+ ):
resource = resources[resource_id]
props = resource["Properties"]
if not props.get(key): |
codereview_python_data_1468 | "Please Report this as a bug, and send in data file."
def _translate(self, options):
- """Translates a Nexus file (PRIVATE).""""
self.translate = {}
opts = CharBuffer(options)
while True:
There are four double-quotes at the end of that line, which is where this flake8 error comes from: ``` $ flake8 Bio/ Bio/Nexus/Nexus.py:1046:49: E999 SyntaxError: EOL while scanning string literal ... ```
"Please Report this as a bug, and send in data file."
def _translate(self, options):
+ """Translates a Nexus file (PRIVATE)."""
self.translate = {}
opts = CharBuffer(options)
while True: |
codereview_python_data_1476 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4451-SEA 1645523409 3341744762</p>
<hr>
<p>Varnish cache server</p>
</body>
What is the abbreviation `sc`? Can we be a bit more verbose here so that it's more easy to read?
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4439-SEA 1645523409 2939135189</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_1483 | if double_stranded:
if seq_type == "protein":
- raise ValueError("double-stranded proteins await their discovery")
elif seq_type == "DNA":
seq = complement(seq, inplace=False) # TODO: remove inplace=False
elif seq_type == "RNA":
Funny, but perhaps a clearer error message is wiser?
if double_stranded:
if seq_type == "protein":
+ raise ValueError("protein sequences cannot be double-stranded")
elif seq_type == "DNA":
seq = complement(seq, inplace=False) # TODO: remove inplace=False
elif seq_type == "RNA": |
codereview_python_data_1490 | bboxes = squares[:, :4]
- if (self.ignore_iof_thr > 0) and (gt_bboxes_ignore is not None) and (
- gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = bbox_overlaps(
bboxes, gt_bboxes_ignore, mode='iof')
Thanks for the fix! Since there are lots of conditions, we can just use one pair of brackets to hold them. ```python if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): pass ```
bboxes = squares[:, :4]
+ if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None
+ and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0):
if self.ignore_wrt_candidates:
ignore_overlaps = bbox_overlaps(
bboxes, gt_bboxes_ignore, mode='iof') |
codereview_python_data_1492 | id=record_id,
description=record_id,
annotations={
- "molecule_type": "protein",
"model": model.id,
"chain": chain.id,
"start": int(rnumbers[0]),
Same here. How do you know it's protein?
id=record_id,
description=record_id,
annotations={
"model": model.id,
"chain": chain.id,
"start": int(rnumbers[0]), |
codereview_python_data_1494 | if len(self.nlabel_dict) > 1:
self.nlabels_flag = True
- assert len(g) == n_nodes
# update statistics of graphs
self.n += n_nodes
It is better to use g.number_of_nodes() len(g) will be deprecated soon.
if len(self.nlabel_dict) > 1:
self.nlabels_flag = True
+ assert g.number_of_nodes() == n_nodes
# update statistics of graphs
self.n += n_nodes |
codereview_python_data_1500 | action="store_true",
help=_("enables dnf's obsoletes processing logic "
"for upgrade or display capabilities that "
- "the package obsoletes for repoquery, and info"))
main_parser.add_argument("--rpmverbosity", default=None,
help=_("debugging output level for rpm"),
metavar='[debug level name]')
Erase the `,`, please.
action="store_true",
help=_("enables dnf's obsoletes processing logic "
"for upgrade or display capabilities that "
+ "the package obsoletes for info, list and repoquery"))
main_parser.add_argument("--rpmverbosity", default=None,
help=_("debugging output level for rpm"),
metavar='[debug level name]') |
codereview_python_data_1508 | task, self._id, self._task_result_queue, reporter,
use_multiprocessing=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout,
- check_unfulfilled_deps=self._config.check_unfulfilled_deps
)
def _purge_children(self):
Can you add a trailing comma here? Next line addition will look nicer in the diffs. :)
task, self._id, self._task_result_queue, reporter,
use_multiprocessing=bool(self.worker_processes > 1),
worker_timeout=self._config.timeout,
+ check_unfulfilled_deps=self._config.check_unfulfilled_deps,
)
def _purge_children(self): |
codereview_python_data_1512 | setdiff,
shift,
sort,
- split_into_nhot_depr as split_into_nhot,
symdiff,
Type,
union,
@st-pasha After your changes, `split_into_nhot_depr` should be changed to `split_into_nhot_deprecated` here.
setdiff,
shift,
sort,
+ split_into_nhot_deprecated as split_into_nhot,
symdiff,
Type,
union, |
codereview_python_data_1520 | builder.add('connect-src', 'self', quote=True)
builder.add('script-src', 'self', quote=True)
builder.add('script-src', 'scripts.test.tld')
- builder.add_sourceless('block-all-mixed-content')
self.assertEqual(
- str(builder), "block-all-mixed-content; connect-src 'self'; "
- "default-src 'none'; script-src 'self' scripts.test.tld;")
def test_policy_modification(self):
"""Ensure that policies can be modified."""
Shouldn't we test with upgrade-insecure-requests that is actually used in code ?
builder.add('connect-src', 'self', quote=True)
builder.add('script-src', 'self', quote=True)
builder.add('script-src', 'scripts.test.tld')
+ builder.add_sourceless('upgrade-insecure-requests')
self.assertEqual(
+ str(builder), "connect-src 'self'; default-src 'none'; "
+ "script-src 'self' scripts.test.tld; upgrade-insecure-requests;")
def test_policy_modification(self):
"""Ensure that policies can be modified.""" |
codereview_python_data_1521 | from Bio.Align import MultipleSeqAlignment
from Bio.SeqRecord import SeqRecord
-from Bio.codonalign.codonalphabet import default_codon_table, default_codon_alphabet, \
- compare_codon_alphabet
from Bio.codonalign.codonseq import _get_codon_list, CodonSeq, cal_dn_ds
from Bio.codonalign.chisq import chisqprob
Just use two import lines rather than a slash line continuation?
from Bio.Align import MultipleSeqAlignment
from Bio.SeqRecord import SeqRecord
+from Bio.codonalign.codonalphabet import (
+ default_codon_table, default_codon_alphabet, compare_codon_alphabet
+)
from Bio.codonalign.codonseq import _get_codon_list, CodonSeq, cal_dn_ds
from Bio.codonalign.chisq import chisqprob |
codereview_python_data_1524 | return 1
try:
- shellutil.run_command(['chcon', con, path])
except shellutil.CommandError as cmd_err:
return cmd_err.returncode
return 0
shellutil logs errors by default... we would need to log here to match the original code
return 1
try:
+ shellutil.run_command(['chcon', con, path], log_error=True)
except shellutil.CommandError as cmd_err:
return cmd_err.returncode
return 0 |
codereview_python_data_1531 | name='clusterfuzz',
version='0.0.1',
author='ClusterFuzz authors',
- author_email='clusterfuzz-announce@googlegroups.com',
description='ClusterFuzz',
long_description=long_description,
long_description_content_type='text/markdown',
should this be clusterfuzz-dev@ ?
name='clusterfuzz',
version='0.0.1',
author='ClusterFuzz authors',
+ author_email='clusterfuzz-dev@googlegroups.com',
description='ClusterFuzz',
long_description=long_description,
long_description_content_type='text/markdown', |
codereview_python_data_1534 | else:
ky = target_data.pop(key, None)
graph.add_edge(source, target, key=ky)
- graph[source][target][ky].update(target_data)
return graph
This `target_data` should be `tdata` I believe. Also, in this function, we copy the `tdata` dict just to `pop(id_)` from it without affecting the original dict. Seems like instead we should use `target = tdata[id_]`. But that change should apply to both the node and edge portions of the code and is better done in another PR.
else:
ky = target_data.pop(key, None)
graph.add_edge(source, target, key=ky)
+ graph[source][target][ky].update(tdata)
return graph |
codereview_python_data_1542 | from Bio import Alphabet
from Bio.Seq import Seq
-from Bio.SeqRecord import SeqRecord
from Bio.SeqIO.Interfaces import SequenceWriter
import struct
import sys
import re
If you are sorting alphabetically, it should be one line higher. See also #2715 when I removed the ``import sys`` and put the standard library imports above the Biopython ones.
from Bio import Alphabet
from Bio.Seq import Seq
from Bio.SeqIO.Interfaces import SequenceWriter
+from Bio.SeqRecord import SeqRecord
import struct
import sys
import re |
codereview_python_data_1543 | continue
elif coding in [")", "}"]:
raise NexusError(
- 'Improper character "'
- + coding
- + '" at position '
- + pos
- + " of a coding sequence."
)
else:
coding_list["d"].append(coding)
Not a black issue, but this could be rewritten to avoid the string concatenation, e.g. use ``%s`` formatting.
continue
elif coding in [")", "}"]:
raise NexusError(
+ "Improper character %s at position %i of a coding sequence."
+ % (coding, pos)
)
else:
coding_list["d"].append(coding) |
codereview_python_data_1549 | return cfg.total_epochs
-def get_final_results(log_json_path, epoch, RESULTS_LUT):
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines():
Should use lower case here for RESULTS_LUT.
return cfg.total_epochs
+def get_final_results(log_json_path, epoch, results_lut):
result_dict = dict()
with open(log_json_path, 'r') as f:
for line in f.readlines(): |
codereview_python_data_1554 | import threading
import os
from select import select, error as select_error
import subprocess
import time
import types
-import socket
from scapy.consts import DARWIN, FREEBSD, OPENBSD, WINDOWS
from scapy.compat import plain_str
from scapy.data import ETH_P_ALL, MTU
Also please move it to its right alphabetical place in the imports (above: native modules). Below is reserved for scapy modules
import threading
import os
from select import select, error as select_error
+import socket
import subprocess
import time
import types
+
from scapy.consts import DARWIN, FREEBSD, OPENBSD, WINDOWS
from scapy.compat import plain_str
from scapy.data import ETH_P_ALL, MTU |
codereview_python_data_1557 | # Value should be of WellRecord type
if not isinstance(obj, WellRecord):
raise ValueError(
- "A WellRecord type object is needed as value" + " (got %s)" % type(obj)
)
def __getitem__(self, index):
We can remove the plus and combine these strings. (The original code had a redundant plus)
# Value should be of WellRecord type
if not isinstance(obj, WellRecord):
raise ValueError(
+ "A WellRecord type object is needed as value (got %s)" % type(obj)
)
def __getitem__(self, index): |
codereview_python_data_1559 | _url_re = re.compile(r'''https?://(?:www\.)?teamliquid\.net/video/streams/''')
-_afreecaRe = re.compile('View on Afreeca')
-_twitchRe = re.compile('View on Twitch.tv')
class Teamliquid(Plugin):
Rather than searching for the specific strings `View on Afreeca` or `View on Twitch.tv` you might be better searching for any URL that is directly before the string `View on`. That way you can find other embedded streams, I noticed that there are some goodgame.ru streams too. You could use a regex like: ```python stream_address_re = re.compile(r'''href\s*=\s*"([^"]*?)"[^<>]+?>\s*View\s+on\s+''', re.IGNORECASE | re.MULTILINE) ``` Then extract the matched URL (if any), and pass it to `self.session.streams`; ```python stream_url_match = stream_address_re.search(res.text) if stream_url_match: stream_url = stream_url_match.group(1) self.logger.info("Attempting to play streams from {0}", stream_url) return self.session.streams(stream_url) ``` That way you will be able to support any embedded streams that streamlink supports, not just twitch/afreeca.
_url_re = re.compile(r'''https?://(?:www\.)?teamliquid\.net/video/streams/''')
class Teamliquid(Plugin): |
codereview_python_data_1569 | -from ..geometry import bbox_overlaps
from .registry import IOU_CALCULATOR
There is only one method in `geometry.py`. We may consider moving it to this file.
+import torch
+
from .registry import IOU_CALCULATOR |
codereview_python_data_1575 | stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
- return deltas.cuda()
def delta2bbox(rois,
Is this still necessary?
stds = deltas.new_tensor(stds).unsqueeze(0)
deltas = deltas.sub_(means).div_(stds)
+ return deltas
def delta2bbox(rois, |
codereview_python_data_1580 | cmdline.annot = self.annotation_outfile
self.assertEqual(
str(cmdline),
- probcons_exe + " -c 4 -ir 222 -pre 1 -annot Fasta/probcons_annot.out"
- " -a Fasta/fa01",
)
stdout, stderr = cmdline()
self.assertTrue(stderr.startswith("\nPROBCONS"))
Does this look better with the string literal in one (currently split in two with implicit concatenation)?
cmdline.annot = self.annotation_outfile
self.assertEqual(
str(cmdline),
+ probcons_exe
+ + " -c 4 -ir 222 -pre 1 -annot Fasta/probcons_annot.out -a Fasta/fa01",
)
stdout, stderr = cmdline()
self.assertTrue(stderr.startswith("\nPROBCONS")) |
codereview_python_data_1581 | def train(self, mode=True):
super(ResNet, self).train(mode)
if mode and self.norm_eval:
- for mod in self.modules():
# trick: eval have effect on BatchNorm only
- if isinstance(self, nn.BatchNorm2d):
- mod.eval()
If we set eval mode in `build_norm_layer`, then this method is not necessary any more.
def train(self, mode=True):
super(ResNet, self).train(mode)
if mode and self.norm_eval:
+ for m in self.modules():
# trick: eval have effect on BatchNorm only
+ if isinstance(m, nn.BatchNorm2d):
+ m.eval() |
codereview_python_data_1584 | import hive_metastore.ttypes
partition_str = self.partition_spec(partition)
thrift_table = client.get_partition_by_name(database, table, partition_str)
- except hive_metastore.ttypes.NoSuchObjectException as e:
return ''
else:
thrift_table = client.get_table(database, table)
can you fix this flake8 issue? `./luigi/contrib/hive.py:187:71: F841 local variable 'e' is assigned to but never used` (btw you can run `tox -e flake8` locally to test)
import hive_metastore.ttypes
partition_str = self.partition_spec(partition)
thrift_table = client.get_partition_by_name(database, table, partition_str)
+ except hive_metastore.ttypes.NoSuchObjectException:
return ''
else:
thrift_table = client.get_table(database, table) |
codereview_python_data_1585 | from Queue import Queue
-from anytree import Node
from anytree import RenderTree
from anytree import AsciiStyle
from anytree import node
nit: ``` import anytree as at ``` and use at.Node, at.RenderTree, etc.
from Queue import Queue
from anytree import RenderTree
from anytree import AsciiStyle
from anytree import node |
codereview_python_data_1586 | @classmethod
def materialize(cls, future):
"""
- Materialize data matching ``future`` object.
Parameters
----------
```suggestion Materialize data matching `future` object. ``` numpydoc says reference parameters with single backicks
@classmethod
def materialize(cls, future):
"""
+ Materialize data matching `future` object.
Parameters
---------- |
codereview_python_data_1601 | return ParseResult(cmd=None, args=None, cmdline=cmdline)
args = self._split_args(cmd, argstr, keep)
- args = [x.strip().lstrip(':').strip() if x is not ':' and x is not ' '
else x for x in args]
if keep and args:
cmdline = [cmdstr, sep + args[0]] + args[1:]
I don't think it makes sense to apply this on the arguments.
return ParseResult(cmd=None, args=None, cmdline=cmdline)
args = self._split_args(cmd, argstr, keep)
+ args = [x.strip().lstrip(':').strip() if x != ':' and
+ x != ' ' and cmdstr != 'set-cmd-text'
else x for x in args]
if keep and args:
cmdline = [cmdstr, sep + args[0]] + args[1:] |
codereview_python_data_1602 | # Issue warning if pdb_file is given
if pdb_file is not None:
warnings.warn(
- "ResidueDepth no longer requires a pdb file. "
- "This argument will be removed in a future release "
- "of Biopython.",
BiopythonDeprecationWarning,
)
Can save at least one line by breaking the strings differently
# Issue warning if pdb_file is given
if pdb_file is not None:
warnings.warn(
+ "ResidueDepth no longer requires a pdb file. This argument will be "
+ "removed in a future release of Biopython.",
BiopythonDeprecationWarning,
) |
codereview_python_data_1605 | Number of days of daily returns to use for the regression.
allowed_missing_percentage : float, optional
Percentage of returns observations that are allowed to be missing when
- calculating betas. Default is 25%.
"""
window_safe = True
dtype = float64_dtype
I couldn't find another place where `vectorized_beta` might be useful at the moment, however at a later date we might want to move it to `empyrical`.
Number of days of daily returns to use for the regression.
allowed_missing_percentage : float, optional
Percentage of returns observations that are allowed to be missing when
+ calculating betas. Assets with more than this percentage of returns
+ observations missing will produce values of NaN. Default behavior is
+ that 25% of inputs can be missing.
"""
window_safe = True
dtype = float64_dtype |
codereview_python_data_1606 | raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
- @cmdutils.argument('toggle', flag='t')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url.
No need to add this if you don't want to customize the letter - the first one is taken by default.
raise cmdexc.CommandError("Quickmark '{}' not found!".format(name))
@cmdutils.register(instance='command-dispatcher', scope='window')
def bookmark_add(self, url=None, title=None, toggle=False):
"""Save the current page as a bookmark, or a specific url. |
codereview_python_data_1609 | Return
------
modin.DataFrame or pandas.DataFrame [and list of groupby columns names if
- columns for groupby were be generated]
"""
assert not (
(groupby_ncols is None) ^ (count_groups is None)
```suggestion modin.DataFrame or pandas.DataFrame [and list of groupby columns names if columns for groupby were generated] ```
Return
------
modin.DataFrame or pandas.DataFrame [and list of groupby columns names if
+ columns for groupby were generated]
"""
assert not (
(groupby_ncols is None) ^ (count_groups is None) |
codereview_python_data_1610 | def test_delete_non_existent_policy_returns_no_such_entity(self):
non_existent_policy_arn = "arn:aws:iam::000000000000:policy/non-existent-policy"
- try:
- self.iam_client.delete_policy(PolicyArn=non_existent_policy_arn)
- except ClientError as e:
- self.assertEqual("NoSuchEntity", e.response["Error"]["Code"])
def test_recreate_iam_role(self):
role_name = "role-{}".format(short_uid())
Let's better change this to (to ensure that the exception is actually raised): ``` with self.assertRaises(ClientError) as ctx: self.iam_client.delete_policy(PolicyArn=non_existent_policy_arn) self.assertEqual("NoSuchEntity", ctx.execption.response["Error"]["Code"]) ```
def test_delete_non_existent_policy_returns_no_such_entity(self):
non_existent_policy_arn = "arn:aws:iam::000000000000:policy/non-existent-policy"
+ with self.assertRaises(ClientError) as ctx:
+ self.iam_client.delete_policy(PolicyArn=non_existent_policy_arn)
+ self.assertEqual("NoSuchEntity", ctx.exception.response["Error"]["Code"])
def test_recreate_iam_role(self):
role_name = "role-{}".format(short_uid()) |
codereview_python_data_1611 | if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
- # weight does not have num_class dim
weight = weight.view(-1, 1)
else:
- # weight is flattened while loss is not
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
We may also note what kind of method would encounter such a situation.
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
+ # For most cases, weight is of shape (num_priors, ),
+ # which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
+ # Sometimes, weight per anchor per class is also needed. e.g.
+ # in FSAF. But it may be flattened of shape
+ # (num_priors x num_class, ), while loss is still of shape
+ # (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor) |
codereview_python_data_1614 | title_obj = None
title = self._format_title(key)
if self.show_title and len(self.coords) > 1 and title:
- title_obj = self.handles['fig'].suptitle(title, **self._fontsize('title'),
- y=self.suptitle_y)
self.handles['title'] = title_obj
self.handles['bbox_extra_artists'] += [title_obj]
This is a syntax error, the y should precede the kwargs: ```suggestion title_obj = self.handles['fig'].suptitle( title, y=self.suptitle_y, **self._fontsize('title') ) ```
title_obj = None
title = self._format_title(key)
if self.show_title and len(self.coords) > 1 and title:
+ title_obj = self.handles['fig'].suptitle(title, y=self.suptitle_y,
+ **self._fontsize('title'))
self.handles['title'] = title_obj
self.handles['bbox_extra_artists'] += [title_obj] |
codereview_python_data_1617 | from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
print('Fuck you')
- for height in range(1,4):
transactions = [
Transaction.create(
[alice_pubkey(alice)],
I think we should remove this.
from bigchaindb.models import Transaction
# create blocks with transactions for `USER` to spend
print('Fuck you')
+ for height in range(1, 4):
transactions = [
Transaction.create(
[alice_pubkey(alice)], |
codereview_python_data_1621 | ]
else:
alternate_services_violations = []
- print 'XXX: Has violation? %r / %r / %r' % (iap_resource.iap_enabled,
- self.allowed_direct_access_sources,
- iap_resource.direct_access_sources)
if (iap_resource.iap_enabled and
self.allowed_direct_access_sources != '^.+$'):
sources_regex = re.compile(
same here with the print vs log
]
else:
alternate_services_violations = []
+ LOGGER.debug('Alternate services violations: %r',
+ alternate_services_violations)
+ LOGGER.debug('Has sources violation? %r / %r / %r',
+ iap_resource.iap_enabled,
+ self.allowed_direct_access_sources,
+ iap_resource.direct_access_sources)
if (iap_resource.iap_enabled and
self.allowed_direct_access_sources != '^.+$'):
sources_regex = re.compile( |
codereview_python_data_1625 | return result
-def exit_if_exception(result):
"""
If "result" is an exception then raise the "result".
Unless "result" is an exception then return the "result".
It will be better just to re-raise the exception 'result' rather than to abort with `exit` call. Correspondingly, the function `exit_if_exception` should be renamed to `raise_if_exception`. By the way, `exit` internally raises `SystemExit` which is inherited from `BaseException`. Thus with `exit` we will not enter to `except Exception` section of `try` block in `TestScenarioBase.execute_scenario`. Anyway, `exit` is some kind of emergency ways to quit. It doesn't fit an ordinary test fail.
return result
+def raise_if_exception(result):
"""
If "result" is an exception then raise the "result".
Unless "result" is an exception then return the "result". |
codereview_python_data_1626 | atoms (altloc) as a tie-breaker.
"""
- self.child_list.sort
def flag_disordered(self):
"""Set the disordered flag."""
Should that not be ``self.child_list.sort()`` for an in-place sorting of the list?
atoms (altloc) as a tie-breaker.
"""
+ self.child_list.sort()
def flag_disordered(self):
"""Set the disordered flag.""" |
codereview_python_data_1627 | restutil.http_get("http://foo.bar", retry_delay=retry_delay_in_sec, max_retry=max_retry)
duration = datetime.utcnow() - start_time
- self.assertEqual(max_retry, mock_resp.call_count, "Did not Retry the required amount of time")
upper_bound = timedelta(seconds=retry_delay_in_sec * (max_retry + 2))
lower_bound = timedelta(seconds=retry_delay_in_sec * (max_retry - 2))
self.assertTrue(upper_bound >= duration >= lower_bound,
should be "times" in "Did not Retry the required amount of times"
restutil.http_get("http://foo.bar", retry_delay=retry_delay_in_sec, max_retry=max_retry)
duration = datetime.utcnow() - start_time
+ self.assertEqual(max_retry, mock_resp.call_count, "Did not Retry the required amount of times")
upper_bound = timedelta(seconds=retry_delay_in_sec * (max_retry + 2))
lower_bound = timedelta(seconds=retry_delay_in_sec * (max_retry - 2))
self.assertTrue(upper_bound >= duration >= lower_bound, |
codereview_python_data_1628 | assert not message_mock.messages
assert qutescheme.spawn_output == expected
assert proc.exit_status() == QProcess.NormalExit
- assert proc.final_stdout().strip() == "test", proc.final_stdout()
- assert proc.final_stderr().strip() == "", proc.final_stderr()
def test_start_verbose(proc, qtbot, message_mock, py_proc):
Maybe those would fit better in `test_start_output_message` which also tests various combinations of stdout/stderr being set or unset?
assert not message_mock.messages
assert qutescheme.spawn_output == expected
assert proc.exit_status() == QProcess.NormalExit
def test_start_verbose(proc, qtbot, message_mock, py_proc): |
codereview_python_data_1632 | def get_result(self):
"""Get the result of minimization."""
# Done with minimization, output log one more time
- self._report_progress(True)
if not self.minimizer.tokenize:
return self.get_required_tokens()
return str(self)
Nit: explicit parameter_name=True here.
def get_result(self):
"""Get the result of minimization."""
# Done with minimization, output log one more time
+ self._report_progress(is_final_progress_report=True)
if not self.minimizer.tokenize:
return self.get_required_tokens()
return str(self) |
codereview_python_data_1638 | SUBSCRIBER_NAME = "bq"
KEYSPACE_NAME_INCOMING = "ilisten"
KEYSPACE_NAME_UNIQUE = "ulisten"
-APP_CREDENTIALS_FILE = os.environ['GOOGLE_APPLICATION_CREDENTIALS']
# TODO:
# Big query hardcoded data set ids
will this fail if the env variable doesn't exist? Is this the preferred behaviour?
SUBSCRIBER_NAME = "bq"
KEYSPACE_NAME_INCOMING = "ilisten"
KEYSPACE_NAME_UNIQUE = "ulisten"
+APP_CREDENTIALS_FILE = os.environ.get('GOOGLE_APPLICATION_CREDENTIALS')
# TODO:
# Big query hardcoded data set ids |
codereview_python_data_1652 | -# This code is part of the Biopython distribution and governed by its
-# license. Please see the LICENSE file that should have been included
-# as part of this package.
"""Tests for online functionality of EBI Search module."""
Missing copyright statement with author names.
+# Copyright 2017 by Berenice Batut (berenice.batut@gmail.com). All rights reserved.
+# Revision copyright 2017 by Francesco Gastaldello. All rights reserved.
+#
+# This file is part of the Biopython distribution and governed by your
+# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
+# Please see the LICENSE file that should have been included as part of this
+# package.
"""Tests for online functionality of EBI Search module.""" |
codereview_python_data_1663 | def __getitem__(self, n):
if isinstance(n, slice):
raise nx.NetworkXError(
- f"{type(self).__name__} does not support slicing, try list(G.nodes)[{n}]"
)
return self._nodes[n]
I went with `{n.start}:{n.stop}` as IMO the target users for this error may not know about the slice object.
def __getitem__(self, n):
if isinstance(n, slice):
raise nx.NetworkXError(
+ f"{type(self).__name__} does not support slicing, "
+ f"try list(G.nodes)[{n.start}:{n.stop}:{n.step}]"
)
return self._nodes[n] |
codereview_python_data_1664 | stockrecord. The price_incl_tax is quantized to two decimal places.
Rounding behaviour is Decimal's default
"""
- rate = D('0.20')
exponent = D('0.01') # Default to two decimal places
def pricing_policy(self, product, stockrecord):
Rate is country specific and should be moved inside UK strategy.
stockrecord. The price_incl_tax is quantized to two decimal places.
Rounding behaviour is Decimal's default
"""
+ rate = D('0') # Subclass and specify the correct rate
exponent = D('0.01') # Default to two decimal places
def pricing_policy(self, product, stockrecord): |
codereview_python_data_1666 | #@subsitute: tempita
[requires tempita substitution
- - context can't be specified here though
- only necessary when @required from non-tempita code]
for prototypes and implementation respectively. For non-python or
Then, where does it get the substitution context/variables from?
#@subsitute: tempita
[requires tempita substitution
+ - context can't be specified here though so only
+ tempita utility that requires no external context
+ will benefit from this tag
- only necessary when @required from non-tempita code]
for prototypes and implementation respectively. For non-python or |
codereview_python_data_1682 | self.buffer = self.buffer[1:]
def next_until(self, target):
- """Keep iterating the NEXUS file until it reaches a target character.
-
- Returns the word found in the NEXUS file.
- """
for t in target:
try:
pos = self.buffer.index(t)
This wording was odd before your change, but how about "Iterate over the NEXUS file until a target character is reached."
self.buffer = self.buffer[1:]
def next_until(self, target):
+ """Iterate over the NEXUS file until a target character is reached."""
for t in target:
try:
pos = self.buffer.index(t) |
codereview_python_data_1683 | service_name = "{0}.service".format(name).lower()
if CGroups.enabled() and not CGroupsTelemetry.is_tracked(service_name):
cgroup = CGroups.for_systemd_service(service_name)
tracker = CGroupsTelemetry(service_name, cgroup=cgroup)
CGroupsTelemetry._tracked[service_name] = tracker
@staticmethod
- def track_extension(name, cgroup=None, limits=None):
"""
Create all required CGroups to track all metrics for an extension and its associated services.
suggest 'handler_configuration' instead of 'limits' to be consistent with the other functions
service_name = "{0}.service".format(name).lower()
if CGroups.enabled() and not CGroupsTelemetry.is_tracked(service_name):
cgroup = CGroups.for_systemd_service(service_name)
+ logger.info("Now tracking cgroup {0}".format(service_name))
tracker = CGroupsTelemetry(service_name, cgroup=cgroup)
CGroupsTelemetry._tracked[service_name] = tracker
@staticmethod
+ def track_extension(name, cgroup=None, handler_configuration=None):
"""
Create all required CGroups to track all metrics for an extension and its associated services. |
codereview_python_data_1688 | def init_stylesheet(self, css_file="green.css"):
"""Initialize the stylesheet with a provided css file."""
- css_path = str(pathlib.Path(__file__).parent / css_file)
- self.config_stub.val.content.user_stylesheets = css_path
def set_css(self, css):
"""Set document style to `css` via stylesheet.js."""
nitpick: I'd move the `str(css_path)` here, so that the thing called `css_path` is actually a path rather than a string.
def init_stylesheet(self, css_file="green.css"):
"""Initialize the stylesheet with a provided css file."""
+ css_path = pathlib.Path(__file__).parent / css_file
+ self.config_stub.val.content.user_stylesheets = str(css_path)
def set_css(self, css):
"""Set document style to `css` via stylesheet.js.""" |
codereview_python_data_1694 | cb = functools.partial(
get_path_output_or_null,
env=env,
- no_find_output=True,
path_id=path_id,
aspect=aspect)
Let's name this `disable_output_fusion` or something like that.
cb = functools.partial(
get_path_output_or_null,
env=env,
+ disable_output_fusion=True,
path_id=path_id,
aspect=aspect) |
codereview_python_data_1696 | if not self.serialized_fuzz_target:
return None
- fuzz_target = data_types.FuzzTarget()
- fuzz_target.engine = self.serialized_fuzz_target['engine']
- fuzz_target.project = self.serialized_fuzz_target['project']
- fuzz_target.binary = self.serialized_fuzz_target['binary']
return fuzz_target
nit: can move to just constructor, engine=self.serialized_fuzz_target['engine']
if not self.serialized_fuzz_target:
return None
+ fuzz_target = data_types.FuzzTarget(
+ engine=self.serialized_fuzz_target['engine'],
+ project=self.serialized_fuzz_target['project'],
+ binary=self.serialized_fuzz_target['binary'])
return fuzz_target |
codereview_python_data_1698 | cmake_cmd.append("-DOpenCL_LIBRARY={0}".format(opencl_library))
elif use_cuda:
cmake_cmd.append("-DUSE_CUDA=ON")
- if openmp_include_dir:
- cmake_cmd.append("-DOpenMP_INCLUDE_DIR={0}".format(openmp_include_dir))
- if openmp_library:
- cmake_cmd.append("-DOpenMP_LIBRARY={0}".format(openmp_library))
if use_mpi:
cmake_cmd.append("-DUSE_MPI=ON")
if nomp:
I don't think this code is needed. I understand that CUDA requires OpenMP, but we recently dropped exactly the same code because new version of CMake are able to find OpenMP without these hints: #2674. Just bump the required CMake version for CUDA compilation and use found paths. I believe it's OK as for a new feature.
cmake_cmd.append("-DOpenCL_LIBRARY={0}".format(opencl_library))
elif use_cuda:
cmake_cmd.append("-DUSE_CUDA=ON")
if use_mpi:
cmake_cmd.append("-DUSE_MPI=ON")
if nomp: |
codereview_python_data_1709 | package, alt_package = package
try:
locals()[package] = __import__(alt_package)
- except (ImportError, SyntaxError):
locals()[package] = __import__(package)
else:
locals()[package] = __import__(package)
we need to be *extremely* careful about touching this code, at all.
package, alt_package = package
try:
locals()[package] = __import__(alt_package)
+ except ImportError:
locals()[package] = __import__(package)
else:
locals()[package] = __import__(package) |
codereview_python_data_1710 | def __truediv__(self, other):
pass
- def abs(self):
pass
def conjugate(self):
This can also be overloaded as `__abs__`.
def __truediv__(self, other):
pass
+ def __abs__(self):
pass
def conjugate(self): |
codereview_python_data_1721 | return ContainerInfo(container_name, entry_point)
- def get_host_path_for_path_in_docker(self, path):
- return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
- r'%s/\1' % config.HOST_TMP_FOLDER, path)
-
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
Can we extract this code into a shared util function (on global scope), to avoid duplication with line 614?
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN. |
codereview_python_data_1728 | SettingValue(typ.Bool(), 'false'),
"Hide the tabbar if only one tab is open."),
- ('perm-hide',
SettingValue(typ.Bool(), 'false'),
- "Hide permanently."),
('wrap',
SettingValue(typ.Bool(), 'true'),
I think `perm-` is a bit confusing. I'd prefer `always-hide`. Alternatively, renaming `auto-hide` to `hide-auto` and `perm-hide` to `hide-always` would sort them correctly. What's your opinion on this?
SettingValue(typ.Bool(), 'false'),
"Hide the tabbar if only one tab is open."),
+ ('always-hide',
SettingValue(typ.Bool(), 'false'),
+ "Always hide the tabbar."),
('wrap',
SettingValue(typ.Bool(), 'true'), |
codereview_python_data_1739 | def send_notifications(method, bucket_name, object_path, version_id):
- bucket_name = normalize_bucket_name(bucket_name)
for bucket, notifs in S3_NOTIFICATIONS.items():
- if bucket.lower() == bucket_name.lower():
action = {'PUT': 'ObjectCreated', 'POST': 'ObjectCreated', 'DELETE': 'ObjectRemoved'}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html
Good catch. Can we use this instead: ``` if normalize_bucket_name(bucket) == normalize_bucket_name(bucket_name): ```
def send_notifications(method, bucket_name, object_path, version_id):
for bucket, notifs in S3_NOTIFICATIONS.items():
+ if normalize_bucket_name(bucket) == normalize_bucket_name(bucket_name):
action = {'PUT': 'ObjectCreated', 'POST': 'ObjectCreated', 'DELETE': 'ObjectRemoved'}[method]
# TODO: support more detailed methods, e.g., DeleteMarkerCreated
# http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html |
codereview_python_data_1741 | loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))
- # test bce_loss
- cls_score = torch.Tensor([[-200, 100], [500, -1000], [300, -300]])
- label = torch.Tensor([0, 1, 0]).long()
- weight = torch.Tensor([0.6, 0.4, 0.5])
- class_weight = torch.tensor([0.1, 0.9]) # class 0: 0.1, class 1: 0.9
# test bce_loss without class weight
loss_cfg = dict(
`bce_loss` now only supports the input tensor with shape (n, 1).
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))
+ # test bce_loss matrix(M, C)
+ cls_score_M_C = torch.Tensor([[-200, 100], [500, -1000], [300, -300]])
+ label_M_C = torch.Tensor([0, 1, 0]).long()
+ weight_M = torch.Tensor([0.6, 0.4, 0.5]) # elemrntwise weight
+ class_weight_C = torch.tensor([0.1, 0.9]) # class 0: 0.1, class 1: 0.9
# test bce_loss without class weight
loss_cfg = dict( |
codereview_python_data_1748 | ),
'output_filename': 'js/machinery.min.js',
},
- 'in_context': {
- 'source_filenames': (
- 'js/jquery-1.11.1.min.js',
- 'js/bootstrap.min.js',
- 'js/cbpAnimatedHeader.min.js',
- 'js/agency.js',
- ),
- 'output_filename': 'js/in_context.min.js',
- },
'homepage': {
'source_filenames' : (
'js/lib/fullpage.js',
I assume that's a leftover from the rebase?
),
'output_filename': 'js/machinery.min.js',
},
'homepage': {
'source_filenames' : (
'js/lib/fullpage.js', |
codereview_python_data_1749 | nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
- if arg.type.is_pyobject or arg.type.is_numeric:
if arg.kw_only:
default_kwargs.append(arg)
else:
Why only these two? Why is this constraint needed?
nonliteral_other.append(arg)
else:
arg.default = DefaultLiteralArgNode(arg.pos, arg.default)
+ if arg.default.type and arg.default.type.can_coerce_to_pyobject(env):
if arg.kw_only:
default_kwargs.append(arg)
else: |
codereview_python_data_1750 | def _read_pfm_four_columns(handle):
- """Read motifs in Cluster Buster position frequency matrix format from a file handle.
- #cisbp
Pos A C G T
1 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
2 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
Why remove this URL?
def _read_pfm_four_columns(handle):
+ """Read motifs in position frequency matrix format (4 columns) from a file handle.
+ # cisbp
Pos A C G T
1 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846
2 0.00961538461538462 0.00961538461538462 0.00961538461538462 0.971153846153846 |
codereview_python_data_1756 | evts = self.phyloxml.phylogenies[4].clade.events
# Container behavior: __len__, __contains__
self.assertEqual(len(evts), 1)
- self.assertTrue("speciations" in evts)
- self.assertFalse("duplications" in evts)
# Attribute access: __get/set/delitem__
self.assertEqual(evts["speciations"], 1)
self.assertRaises(KeyError, lambda k: evts[k], "duplications") # noqa: E731
I may have missed these two last time, but ``assertIn`` and ``assertNotIn`` here please.
evts = self.phyloxml.phylogenies[4].clade.events
# Container behavior: __len__, __contains__
self.assertEqual(len(evts), 1)
+ self.assertIn("speciations", evts)
+ self.assertNotIn("duplications", evts)
# Attribute access: __get/set/delitem__
self.assertEqual(evts["speciations"], 1)
self.assertRaises(KeyError, lambda k: evts[k], "duplications") # noqa: E731 |
codereview_python_data_1763 | )
return usd_price
except (RemoteError, DeserializationError) as e:
- msg = f'Could not find price for {asset}. {str(e)}'
if instance._ethereum is not None:
instance._ethereum.msg_aggregator.add_warning(msg)
- return Price(ZERO)
return instance._query_oracle_instances(from_asset=asset, to_asset=A_USD)
def find_uniswap_v2_lp_price(
```suggestion msg = f'Could not find price for BSQ. {str(e)}' ``` Save ourselves an unneeded substituion. But also didn't you say it should be 100 sats? Isn't it better to use the "intended" price of 100 sats as fallback instead of `0`?
)
return usd_price
except (RemoteError, DeserializationError) as e:
+ msg = f'Could not find price for BSQ. {str(e)}'
if instance._ethereum is not None:
instance._ethereum.msg_aggregator.add_warning(msg)
+ return Price(SATOSHI_PER_BSQ * price_in_btc)
return instance._query_oracle_instances(from_asset=asset, to_asset=A_USD)
def find_uniswap_v2_lp_price( |
codereview_python_data_1777 | class ZigbeeClusterLibrary(Packet):
name = "Zigbee Cluster Library (ZCL) Frame"
fields_desc = [
# Frame control (8 bits)
BitField("reserved", 0, 3),
Could you add a ```python deprecated_fields = { "direction": ("command_direction", "2.5.0"), } ``` before the `fields_desc` to account for deprecation?
class ZigbeeClusterLibrary(Packet):
name = "Zigbee Cluster Library (ZCL) Frame"
+ deprecated_fields = {
+ "direction": ("command_direction", "2.5.0"),
+ }
fields_desc = [
# Frame control (8 bits)
BitField("reserved", 0, 3), |
codereview_python_data_1781 | sym_g.ndata[key] = g.ndata[key]
g = sym_g
- profiler = Profiler()
- profiler.start()
dgl.distributed.partition_graph(g, args.dataset, args.num_parts, 'data',
part_method=args.part_method,
balance_ntypes=balance_ntypes,
balance_edges=args.balance_edges)
- profiler.stop()
- print(profiler.output_text(unicode=True, color=True))
Is this still needed?
sym_g.ndata[key] = g.ndata[key]
g = sym_g
dgl.distributed.partition_graph(g, args.dataset, args.num_parts, 'data',
part_method=args.part_method,
balance_ntypes=balance_ntypes,
balance_edges=args.balance_edges) |
codereview_python_data_1782 | # Adjust data
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
- ratio = (panel[ticker]['price'] / panel[ticker]['close']).values
for col in adj_cols:
- panel[ticker][col] *= ratio
return panel
Could a stock on the way to delisting have a close price of 0 in Yahoo's data? If so, should we have a check here to make sure that close is non-0?
# Adjust data
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
+ ratio = (panel[ticker]['price'] / panel[ticker]['close'])
+ ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
+ panel[ticker][col] *= ratio_filtered
return panel |
codereview_python_data_1785 | @utils.benchmark('time', timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
-@utils.parametrize('format', ['csr']) # csr/csc is not supported
@utils.parametrize('fraction', [0.01, 0.1])
@utils.parametrize('return_uv', [True, False])
def track_time(graph_name, format, fraction, return_uv):
csr and coo are not supported
@utils.benchmark('time', timeout=1200)
@utils.parametrize_cpu('graph_name', ['cora', 'livejournal', 'friendster'])
@utils.parametrize_gpu('graph_name', ['cora', 'livejournal'])
+@utils.parametrize('format', ['coo', 'csr', 'csc'])
@utils.parametrize('fraction', [0.01, 0.1])
@utils.parametrize('return_uv', [True, False])
def track_time(graph_name, format, fraction, return_uv): |
codereview_python_data_1787 | def _apply():
if "apply_state" in self._optimizer._sparse_apply_args:
train_op = self._optimizer._resource_apply_sparse(
- accum_gradient.read_value(),
var,
indices,
apply_state=apply_state,
)
else:
train_op = self._optimizer._resource_apply_sparse(
- accum_gradient.read_value(), var, indices
)
reset_op = accum_gradient.assign(
tf.zeros_like(accum_gradient),
It seems one does not have to call `read_value()` here. Just passing in `accum_gradient` works for me as well.
def _apply():
if "apply_state" in self._optimizer._sparse_apply_args:
train_op = self._optimizer._resource_apply_sparse(
+ accum_gradient,
var,
indices,
apply_state=apply_state,
)
else:
train_op = self._optimizer._resource_apply_sparse(
+ accum_gradient, var, indices
)
reset_op = accum_gradient.assign(
tf.zeros_like(accum_gradient), |
codereview_python_data_1789 | pipeline = Pipeline.current()
if pipeline.exec_async or pipeline.exec_pipelined:
raise RuntimeError("PythonFunction can be used only in pipelines with `exec_async` and "
- "`exec_pipelined` specified to False.")
if (len(inputs) > self._schema.MaxNumInput() or
len(inputs) < self._schema.MinNumInput()):
raise ValueError(
```suggestion "`exec_pipelined` set to False.") ```
pipeline = Pipeline.current()
if pipeline.exec_async or pipeline.exec_pipelined:
raise RuntimeError("PythonFunction can be used only in pipelines with `exec_async` and "
+ "`exec_pipelined` set to False.")
if (len(inputs) > self._schema.MaxNumInput() or
len(inputs) < self._schema.MinNumInput()):
raise ValueError( |
codereview_python_data_1791 | message_func='default',
reduce_func='default',
apply_node_func='default'):
- """Functional method for ``dgl.DGLGraph.prop_nodes``.
Parameters
----------
Same as above for default arguments or docstrings
message_func='default',
reduce_func='default',
apply_node_func='default'):
+ """Functional method for :func:`dgl.DGLGraph.prop_nodes`.
Parameters
---------- |
codereview_python_data_1794 | def __init__(self, tab, qtbot, config_stub):
self.tab = tab
self.qtbot = qtbot
- loader = jinja2.FileSystemLoader(pathlib.Path(__file__).parent)
self._jinja_env = jinja2.Environment(loader=loader, autoescape=True)
# Make sure error logging via JS fails tests
config_stub.val.content.javascript.log = {
Since we need this multiple times, please do something like `JS_DIR = pathlib.Path(__file__).parent` at module level, and then use `JS_DIR` here and below.
def __init__(self, tab, qtbot, config_stub):
self.tab = tab
self.qtbot = qtbot
+ loader = jinja2.FileSystemLoader(JS_DIR)
self._jinja_env = jinja2.Environment(loader=loader, autoescape=True)
# Make sure error logging via JS fails tests
config_stub.val.content.javascript.log = { |
codereview_python_data_1796 | @property
def current_key(self):
"""Returns the current key value."""
- return getattr(self, '_current_key', None)
def _stream_parameters(self):
return util.stream_parameters(
I think I would prefer you declare `self._current_key=None` in the constructor and just return `self._current_key`. That way you can prevent anyone from overwriting `current_key` without needing to use `getattr` here.
@property
def current_key(self):
"""Returns the current key value."""
+ return self._current_key
def _stream_parameters(self):
return util.stream_parameters( |
codereview_python_data_1800 | if not environment.is_engine_fuzzer_job():
return None
if not fuzz_targets:
logs.log_error('No fuzz targets found. Unable to pick random one.')
return None
- fuzz_targets = list(fuzz_targets)
environment.set_value('FUZZ_TARGET_COUNT', len(fuzz_targets))
fuzz_target = fuzzer_selection.select_fuzz_target(fuzz_targets,
probably move this line before line:275 since it can be iterator. environment.set_value can be here as-is.
if not environment.is_engine_fuzzer_job():
return None
+ fuzz_targets = list(fuzz_targets)
if not fuzz_targets:
logs.log_error('No fuzz targets found. Unable to pick random one.')
return None
environment.set_value('FUZZ_TARGET_COUNT', len(fuzz_targets))
fuzz_target = fuzzer_selection.select_fuzz_target(fuzz_targets, |
codereview_python_data_1803 | ProjectFiles(locale_code, [self.parsed_configuration]),
)
- def l10n_path(self, locale, absolute_resource_path):
"""
Return l10n path for the given locale and reference path.
"""
project_files = self.get_or_set_project_files(locale.code)
- reference_path = locale_to_source_path(absolute_resource_path)
m = project_files.match(reference_path)
return m[0] if m is not None else None
`l10n_path` is only ever called with p-c on, so this should be here anymore, I think. Or it should at least be guarded by an `if`.
ProjectFiles(locale_code, [self.parsed_configuration]),
)
+ def l10n_path(self, locale, reference_path):
"""
Return l10n path for the given locale and reference path.
"""
project_files = self.get_or_set_project_files(locale.code)
m = project_files.match(reference_path)
return m[0] if m is not None else None |
codereview_python_data_1804 | def test_fuchsia_asan(self):
"""Test for Fuchsia ASan crashes."""
data = self._read_test_data('fuchsia_asan.txt')
expected_type = 'Heap-buffer-overflow\nWRITE 1'
expected_state = 'foo_function\nfoo_function\nbar_function\n'
as discussed, we need to fix this duplicated frame appearing in the stack. Could you add a TODO? This CL seems OK to land for now though to get the ball rolling and since there's no functionality change.
def test_fuchsia_asan(self):
"""Test for Fuchsia ASan crashes."""
+ # TODO(flowerhack): Once the duplicated frames issue is fixed for Fuchsia,
+ # update this test to recognize proper frames.
data = self._read_test_data('fuchsia_asan.txt')
expected_type = 'Heap-buffer-overflow\nWRITE 1'
expected_state = 'foo_function\nfoo_function\nbar_function\n' |
codereview_python_data_1813 | def _init_write_request_validator(self):
constraint_serializer = ConstraintsSerializer(domain_state_serializer)
config_state = self.states[CONFIG_LEDGER_ID]
- self.add_auth_rules_to_config_state(state=config_state,
- auth_map=auth_map,
- serializer=constraint_serializer)
- self.add_auth_rules_to_config_state(state=config_state,
- auth_map=anyone_can_write_map,
- serializer=constraint_serializer)
self.write_req_validator = WriteRequestValidator(config=self.config,
auth_map=auth_map,
cache=self.getIdrCache(),
We should not modify state without consensus. If a rule is not present in the config state - use local auth map instead.
def _init_write_request_validator(self):
constraint_serializer = ConstraintsSerializer(domain_state_serializer)
config_state = self.states[CONFIG_LEDGER_ID]
self.write_req_validator = WriteRequestValidator(config=self.config,
auth_map=auth_map,
cache=self.getIdrCache(), |
codereview_python_data_1815 | return self.retrieve(ram['planners'], self.unique)
else:
existing.update('stopping_conditions', self.stopping_conditions)
- existing.update('module', self.module)
- existing.update('description', self.description)
existing.update('params', self.params)
- existing.update('planner_id', self.planner_id)
return existing
""" PRIVATE """
should planner_id be updated? same with module?
return self.retrieve(ram['planners'], self.unique)
else:
existing.update('stopping_conditions', self.stopping_conditions)
existing.update('params', self.params)
return existing
""" PRIVATE """ |
codereview_python_data_1837 | from plenum.common.util import get_utc_epoch
from plenum.test.helper import sdk_send_and_check, sdk_sign_request_from_dict
from indy_common.constants import REVOC_REG_DEF_ID, VALUE, FROM, TO, ISSUED, \
- REVOKED, PREV_ACCUM, ACCUM, STATE_PROOF_FROM
from plenum.common.constants import TXN_TIME, DATA
from plenum.common.types import f
from plenum.common.util import randomString
Do we have tests with from/to times not equal to the state update times?
from plenum.common.util import get_utc_epoch
from plenum.test.helper import sdk_send_and_check, sdk_sign_request_from_dict
from indy_common.constants import REVOC_REG_DEF_ID, VALUE, FROM, TO, ISSUED, \
+ REVOKED, PREV_ACCUM, ACCUM_FROM, ACCUM_TO, STATE_PROOF_FROM, ACCUM
from plenum.common.constants import TXN_TIME, DATA
from plenum.common.types import f
from plenum.common.util import randomString |
codereview_python_data_1841 | results['scale'],
interpolation='nearest',
backend=self.backend)
- results['gt_semantic_seg'] = gt_seg
results[key] = gt_seg
def __call__(self, results):
should we rm line 269?
results['scale'],
interpolation='nearest',
backend=self.backend)
results[key] = gt_seg
def __call__(self, results): |
codereview_python_data_1843 | consumer_to_locate = find_consumer(consumer_arn, consumer_name, stream_arn)
if(not consumer_to_locate):
- error_msg = 'Consumer %s not found.' % consumer_arn or consumer_name
return simple_error_response(error_msg, 400, 'ResourceNotFoundException')
nit: I guess this should be changed to: ``` error_msg = 'Consumer %s not found.' % (consumer_arn or consumer_name) ```
consumer_to_locate = find_consumer(consumer_arn, consumer_name, stream_arn)
if(not consumer_to_locate):
+ error_msg = 'Consumer %s not found.' % (consumer_arn or consumer_name)
return simple_error_response(error_msg, 400, 'ResourceNotFoundException') |
codereview_python_data_1846 | runnable_scanners = scanner_builder.ScannerBuilder(
global_configs, scanner_configs, service_config, model_name,
- model_name).build()
# pylint: disable=bare-except
for scanner in runnable_scanners:
This model_name should not be repeated.
runnable_scanners = scanner_builder.ScannerBuilder(
global_configs, scanner_configs, service_config, model_name,
+ None).build()
# pylint: disable=bare-except
for scanner in runnable_scanners: |
codereview_python_data_1847 | def order_processes(delays, args_for_script):
- processed_delays = []
processes_dictionary = {}
- for delay in delays:
- if delay in processed_delays:
- continue
- else:
- processed_delays.append(delay)
delays_indices = [i for i, e in enumerate(delays) if e == delay]
args_list = []
for index in delays_indices:
I think it is not need to use path.join here since func arg is the folder from config. just expand user is enough.
def order_processes(delays, args_for_script):
+ assert len(delays) == len(args_for_script), 'Can not order the processes as a list of delays length is not equal ' \
+ 'to a list of arguments length.'
+ unique_delays = set(delays)
processes_dictionary = {}
+ for delay in unique_delays:
delays_indices = [i for i, e in enumerate(delays) if e == delay]
args_list = []
for index in delays_indices: |
codereview_python_data_1849 | "Fill a remotely allocated dict with values from the Cython C stack"
cython_func = self.get_cython_function()
- if sys.version_info[0] == 2:
- iterator = cython_func.locals.iteritems()
- else:
- iterator = cython_func.locals.items()
- for name, cyvar in iterator:
if (cyvar.type == PythonObject and
self.is_initialized(cython_func, name)):
Could you use a helper function `iteritems(a_dict)` here? Like `six` does? That would avoid having this version check all over the place.
"Fill a remotely allocated dict with values from the Cython C stack"
cython_func = self.get_cython_function()
+ for name, cyvar in cython_func.locals.items():
if (cyvar.type == PythonObject and
self.is_initialized(cython_func, name)): |
codereview_python_data_1861 | import azurelinuxagent.common.logger as logger
"""
-Data contract between guest and host
"""
"Base class for data contracts between guest and host and utilities to manipulate the properties in those contracts"
import azurelinuxagent.common.logger as logger
"""
+Base class for data contracts between guest and host and utilities to manipulate the properties in those contracts
""" |
codereview_python_data_1866 | class GNNBenchmarkDataset(DGLBuiltinDataset):
- r"""Base Class for GNN Benchmark dataset from https://github.com/shchur/gnn-benchmark#datasets"""
- _url = None
def __init__(self, name, force_reload=False):
_url = _get_dgl_url('dataset/' + name + '.zip')
super(GNNBenchmarkDataset, self).__init__(name=name, url=_url, force_reload=force_reload)
send it as argument of GNNBenchmarkDataset ``` def __init__(self, name, url, force_reload=False): ```
class GNNBenchmarkDataset(DGLBuiltinDataset):
+ r"""Base Class for GNN Benchmark dataset
+ Reference: https://github.com/shchur/gnn-benchmark#datasets
+ """
def __init__(self, name, force_reload=False):
_url = _get_dgl_url('dataset/' + name + '.zip')
super(GNNBenchmarkDataset, self).__init__(name=name, url=_url, force_reload=force_reload) |
codereview_python_data_1869 | match = re.search(pattern, new_line.decode('utf-8'))
if match:
self.set_tracking_url(
- self.logs_output_pattern_to_url(match.group(1))
)
else:
sleep(time_to_sleep)
i don't see how this has any different impact. you're just returning the same value you're passing.
match = re.search(pattern, new_line.decode('utf-8'))
if match:
self.set_tracking_url(
+ self.build_tracking_url(match.group(1))
)
else:
sleep(time_to_sleep) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.