id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_5106 | ordering = ['-date_created']
def __str__(self):
- return '{order} has changed status from {old_status} to {new_status}'.format(
order=self.order, old_status=self.old_status, new_status=self.new_status
)
This string needs to be translated.
ordering = ['-date_created']
def __str__(self):
+ return _('{order} has changed status from {old_status} to {new_status}').format(
order=self.order, old_status=self.old_status, new_status=self.new_status
) |
codereview_python_data_5107 | # Copyright 2006-2016 by Peter Cock. All rights reserved.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
Should you add revisions 2021 copyright you?
# Copyright 2006-2016 by Peter Cock. All rights reserved.
+# Revisions copyright 2021 by Michiel de Hoon.
#
# This file is part of the Biopython distribution and governed by your
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License". |
codereview_python_data_5113 | def test_modularity_communities_weighted():
G = nx.balanced_tree(2, 3)
for (a, b) in G.edges:
- if ((a == 1) | (a == 2)) & (b != 0):
G[a][b]["weight"] = 10.0
else:
G[a][b]["weight"] = 1.0
Just a nit, but I think logical operators should be used here instead of bitwise ```suggestion if ((a == 1) or (a == 2)) and (b != 0): ```
def test_modularity_communities_weighted():
G = nx.balanced_tree(2, 3)
for (a, b) in G.edges:
+ if ((a == 1) or (a == 2)) and (b != 0):
G[a][b]["weight"] = 10.0
else:
G[a][b]["weight"] = 1.0 |
codereview_python_data_5127 | if server_extensions:
server_extensions[0].finalize(handshake_flow.response.headers['Sec-WebSocket-Extensions'])
- request = Request(extensions = client_extensions, host = handshake_flow.request.host, target = handshake_flow.request.path)
data = self.connections[self.server_conn].send(request)
self.connections[self.client_conn].receive_data(data)
Please remove the whitespace around the `=` inside a function call (args)
if server_extensions:
server_extensions[0].finalize(handshake_flow.response.headers['Sec-WebSocket-Extensions'])
+ request = Request(extensions=client_extensions, host=handshake_flow.request.host, target=handshake_flow.request.path)
data = self.connections[self.server_conn].send(request)
self.connections[self.client_conn].receive_data(data) |
codereview_python_data_5130 | product_detail_page = self.get(self.product.get_absolute_url())
self.assertContains(product_detail_page, 'Add to wish list')
def test_wishlists_disabled(self):
- self.installed_apps_setting.remove('oscar.apps.wishlists.apps.WishlistsConfig')
- with self.settings(INSTALLED_APPS=self.installed_apps_setting):
- account_page = self.get(reverse('customer:profile-view'))
-
- self.assertNotContains(account_page, self.wishlists_url)
- product_detail_page = self.get(self.product.get_absolute_url())
- self.assertNotContains(product_detail_page, 'Add to wish list')
You can use the `django.test.utils.modify_settings` decorator instead: ``` @modify_settings(INSTALLED_APPS={ 'remove': [ 'oscar.apps.wishlists.apps.WishlistsConfig', ], }) ```
product_detail_page = self.get(self.product.get_absolute_url())
self.assertContains(product_detail_page, 'Add to wish list')
+ @modify_settings(INSTALLED_APPS={"remove": ['oscar.apps.wishlists.apps.WishlistsConfig']})
def test_wishlists_disabled(self):
+ account_page = self.get(reverse('customer:profile-view'))
+ self.assertNotContains(account_page, self.wishlists_url)
+ product_detail_page = self.get(self.product.get_absolute_url())
+ self.assertNotContains(product_detail_page, 'Add to wish list') |
codereview_python_data_5139 | from tensorflow_addons.image.distort_image_ops import adjust_hsv_in_yiq
from tensorflow_addons.image.distort_image_ops import random_hsv_in_yiq
-from tensorflow_addons.image.transform_ops import transform, rotate
Imports should usually be on separate lines :-)
from tensorflow_addons.image.distort_image_ops import adjust_hsv_in_yiq
from tensorflow_addons.image.distort_image_ops import random_hsv_in_yiq
+from tensorflow_addons.image.transform_ops import transform
+from tensorflow_addons.image.transform_ops import rotate |
codereview_python_data_5142 | ['gcloud', 'deployment-manager', 'deployments', 'describe',
deployment_name, '--format=json'])
- deployment_err_msg = 'Error occurred during the deployment, exiting...'
-
if return_code:
print(err)
- print(deployment_err_msg)
sys.exit(1)
deployment_info = json.loads(out)
Is termination the right thing to do? Or is it something recoverable that the user can try again, after the rest of the installation is finished.
['gcloud', 'deployment-manager', 'deployments', 'describe',
deployment_name, '--format=json'])
if return_code:
print(err)
+ print(constants.MESSAGE_DEPLOYMENT_ERROR)
sys.exit(1)
deployment_info = json.loads(out) |
codereview_python_data_5143 | with warnings.catch_warnings():
# e.g. BiopythonParserWarning: Dropping bond qualifier in feature
# location
warnings.simplefilter("ignore", BiopythonParserWarning)
# e.g. WARNING: Chain C is discontinuous at line 2645
warnings.simplefilter("ignore", PDBConstructionWarning)
- # e.g. First line is not a 'HEADER'; can't determine PDB ID
- warnings.simplefilter("ignore", BiopythonWarning)
-
# Try as an iterator using handle
h = open(t_filename, mode)
records = list(SeqIO.parse(handle=h, format=t_format))
That looks like the warning ought to be ``BiopythonParserWarning`` instead? Another pre-existing minor issue.
with warnings.catch_warnings():
# e.g. BiopythonParserWarning: Dropping bond qualifier in feature
# location
+ # e.g. First line is not a 'HEADER'; can't determine PDB ID
warnings.simplefilter("ignore", BiopythonParserWarning)
# e.g. WARNING: Chain C is discontinuous at line 2645
warnings.simplefilter("ignore", PDBConstructionWarning)
# Try as an iterator using handle
h = open(t_filename, mode)
records = list(SeqIO.parse(handle=h, format=t_format)) |
codereview_python_data_5145 | pos_weight=-1,
smoothl1_beta=1 / 9.0,
debug=False),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
`TensorboardLoggerHook` may not be used by default.
pos_weight=-1,
smoothl1_beta=1 / 9.0,
debug=False),
+ rpn_proposal=dict(
+ nms_across_levels=False,
+ nms_pre=2000,
+ nms_post=2000,
+ max_num=2000,
+ nms_thr=0.7,
+ min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner', |
codereview_python_data_5146 | file_that_does_not_exist_in_fs = "some/random/file"
for file in [file_that_exists_in_fs, file_that_does_not_exist_in_fs]:
- with patch("azurelinuxagent.common.osutil.factory.UBUNTU_20_04_IMAGE_PATH", file):
with patch.object(ExtHandlerInstance, "load_manifest", return_value=manifest):
with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event:
i think we should assert that the full path of the link is in path, rather than just the agent's dir
file_that_does_not_exist_in_fs = "some/random/file"
for file in [file_that_exists_in_fs, file_that_does_not_exist_in_fs]:
+ with patch("azurelinuxagent.common.osutil.ubuntu.UBUNTU_20_04_IMAGE_PATH", file):
with patch.object(ExtHandlerInstance, "load_manifest", return_value=manifest):
with patch.object(ExtHandlerInstance, 'report_event') as mock_report_event: |
codereview_python_data_5147 | log.init.debug("Initializing cookies...")
cookies.init(q_app)
- log.init.debug("Initializing websettings...")
- websettings.init(args)
- quitter.instance.shutting_down.connect(websettings.shutdown)
-
log.init.debug("Initializing cache...")
cache.init(q_app)
Can you elaborate why you moved this down? From a quick look, things still seem properly when moving it up again, and the init sequence is somewhat brittle.
log.init.debug("Initializing cookies...")
cookies.init(q_app)
log.init.debug("Initializing cache...")
cache.init(q_app) |
codereview_python_data_5148 | # Fuchsia specific.
r'^CrashTrampolineAsm',
- r'^libc_io_functions_not_implemented_use_fdio_instead',
- r'^\<libclang_rt.asan.so\>',
- r'^__zx_panic',
r'^syslog\:\:LogMessage',
]
nit: for python 2, _ needs to be escaped in all these entries.
# Fuchsia specific.
r'^CrashTrampolineAsm',
+ r'^libc\_io\_functions\_not\_implemented\_use\_fdio\_instead',
+ r'^\<libclang\_rt.asan.so\>',
+ r'^\_\_zx\_panic',
r'^syslog\:\:LogMessage',
] |
codereview_python_data_5154 | from say import say
-# Tests adapted from `problem-specifications//canonical-data.json` @ v1.1.0
-
class SayTest(unittest.TestCase):
def test_zero(self):
Please remove this blank line.
from say import say
+# Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0
class SayTest(unittest.TestCase):
def test_zero(self): |
codereview_python_data_5158 | @command('wp')
async def payto(self, destination, amount, fee=None, feerate=None, from_addr=None, from_coins=None, change_addr=None,
- nocheck=False, unsigned=False, rbf=None, password=None, locktime=None, addtransaction=True, wallet: Abstract_Wallet = None):
"""Create a transaction. """
self.nocheck = nocheck
tx_fee = satoshis(fee)
The default should be "False", as "True" would be a significant change in behaviour.
@command('wp')
async def payto(self, destination, amount, fee=None, feerate=None, from_addr=None, from_coins=None, change_addr=None,
+ nocheck=False, unsigned=False, rbf=None, password=None, locktime=None, addtransaction=False, wallet: Abstract_Wallet = None):
"""Create a transaction. """
self.nocheck = nocheck
tx_fee = satoshis(fee) |
codereview_python_data_5161 | (u'negative_probability', u'subnormal_probability')
)
- def __init__(self, allow_nan=True):
super(FullRangeFloats, self).__init__()
self.allow_nan = allow_nan
def draw_parameter(self, random):
return self.Parameter(
It took me a moment to figure out why there wasn't a separate allow_infinity value here. Am I right in thinking that allow_nan=False will also disable infinity, because they're both represented with maximum exponent?
(u'negative_probability', u'subnormal_probability')
)
+ def __init__(self, allow_nan=True, allow_infinity=True):
super(FullRangeFloats, self).__init__()
self.allow_nan = allow_nan
+ self.allow_infinity = allow_infinity
def draw_parameter(self, random):
return self.Parameter( |
codereview_python_data_5163 | try:
tar.extract(member)
except TarError as err:
- current_app.logger.error("{} while extracting {}, aborting import".format(
- type(err).__name__, member.name), exc_info=True)
# Cleanup
if utils.path_exists('/temp'):
utils.delete_dir('/temp', recursive=True)
if utils.path_exists(tmp_dump_dir):
utils.delete_dir(tmp_dump_dir, recursive=True)
shutil.rmtree(tmp_dump_dir)
- return
tmp_hdfs_path = os.path.join(tmp_dump_dir, member.name)
utils.upload_to_HDFS(tmp_hdfs_path, member.name)
If we just return here, the handler on the spark_reader side will not know that the import failed, we should raise an exception here, catch it in the handler and send back an appropriate message.
try:
tar.extract(member)
except TarError as err:
# Cleanup
if utils.path_exists('/temp'):
utils.delete_dir('/temp', recursive=True)
if utils.path_exists(tmp_dump_dir):
utils.delete_dir(tmp_dump_dir, recursive=True)
shutil.rmtree(tmp_dump_dir)
+ raise DumpInvalidException("{} while extracting {}, aborting import".format(type(err).__name__, member.name))
tmp_hdfs_path = os.path.join(tmp_dump_dir, member.name)
utils.upload_to_HDFS(tmp_hdfs_path, member.name) |
codereview_python_data_5168 | - email Set the Entrez email parameter (default is not set).
- tool Set the Entrez tool parameter (default is ``biopython``).
- api_key Personal API key from NCBI. If not set, only 3 queries per
- second are allowed. 10 queries per seconds otherwise with a
- valid API key.
- max_tries Configures how many times failed requests will be
- automatically retried on error (default is 3).
- sleep_between_tries The delay, in seconds, before retrying a request on
- error (default is 15).
Functions:
My internet is flakey today so I don't have access to an RST preview, but I think the last three bullet points (including ``api_key``) need the continuation lines to have less indentation (like the next block of bullet points).
- email Set the Entrez email parameter (default is not set).
- tool Set the Entrez tool parameter (default is ``biopython``).
- api_key Personal API key from NCBI. If not set, only 3 queries per
+ second are allowed. 10 queries per seconds otherwise with a
+ valid API key.
- max_tries Configures how many times failed requests will be
+ automatically retried on error (default is 3).
- sleep_between_tries The delay, in seconds, before retrying a request on
+ error (default is 15).
Functions: |
codereview_python_data_5175 | continue
if key == "molecule_type":
# EMBL allows e.g. "genomics DNA" where GenBank is limited.
- common_words = set(old.annotations[key].split()).intersection(
- new.annotations[key].split()
- )
if not common_words:
raise ValueError(
"Annotation mis-match for molecule_type:\n%s\n%s"
This doesn't look so nice... the one line version must be right on the 88 limit. Perhaps two lines? ```python common_words = set(old.annotations[key].split()) common_words = common_words.intersection(new.annotations[key].split()) ```
continue
if key == "molecule_type":
# EMBL allows e.g. "genomics DNA" where GenBank is limited.
+ common_words = set(old.annotations[key].split())
+ common_words = common_words.intersection(new.annotations[key].split())
if not common_words:
raise ValueError(
"Annotation mis-match for molecule_type:\n%s\n%s" |
codereview_python_data_5184 | if stats is None:
return '', 204
- if count is None:
- count = DEFAULT_ITEMS_PER_GET
count = min(count, MAX_ITEMS_PER_GET)
count = count + offset
artist_list = stats['artist']['all_time']['artists'][offset:count]
instead of doing this check here, we could just do `count = _get_non_negative_param('count', default=DEFAULT_ITEMS_PER_GET) above on line 84, where we first retrieve count,
if stats is None:
return '', 204
count = min(count, MAX_ITEMS_PER_GET)
count = count + offset
artist_list = stats['artist']['all_time']['artists'][offset:count] |
codereview_python_data_5186 | ;
""") # noqa : W291
- @staticmethod
- def test_write_alignment():
# Default causes no interleave (columns <= 1000)
records = [SeqRecord(Seq("ATGCTGCTGA" * 90, alphabet=ambiguous_dna), id=_id) for _id in ["foo", "bar", "baz"]]
a = MultipleSeqAlignment(records, alphabet=ambiguous_dna)
Can you switch that ``assert`` statement (and the similar ones below) to: ```python self.assertIn("ATGCTGCTGA" * 90, data) ``` This is new in Python 2.7, prior to that we'd have used a workaround like ``self.assertTrue("ATGCTGCTGA" * 90 in data)`` instead.
;
""") # noqa : W291
+ def test_write_alignment(self):
# Default causes no interleave (columns <= 1000)
records = [SeqRecord(Seq("ATGCTGCTGA" * 90, alphabet=ambiguous_dna), id=_id) for _id in ["foo", "bar", "baz"]]
a = MultipleSeqAlignment(records, alphabet=ambiguous_dna) |
codereview_python_data_5190 | import pkg_resources
from PyQt5.QtCore import QUrlQuery, QUrl
import qutebrowser
from qutebrowser.config import config, configdata, configexc, configdiff
from qutebrowser.utils import (version, utils, jinja, log, message, docutils,
objreg, urlutils)
from qutebrowser.misc import objects
-import sip
pyeval_output = ":pyeval was never called"
Since this is a third-party and not a qutebrowser import, it should be in the second block where `pkg_resources` is imported.
import pkg_resources
from PyQt5.QtCore import QUrlQuery, QUrl
+import sip
import qutebrowser
from qutebrowser.config import config, configdata, configexc, configdiff
from qutebrowser.utils import (version, utils, jinja, log, message, docutils,
objreg, urlutils)
from qutebrowser.misc import objects
pyeval_output = ":pyeval was never called" |
codereview_python_data_5200 | "if you use dict, the index should start from 0")
if eval_at is not None:
- self.eval_at = eval_at
super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, group=group,
eval_set=eval_set, eval_names=eval_names,
@wxchan Is it critical to check `eval_at` for None? I mean, will it cause error in underlying booster or we can omit `if` statement since default value is `[1]` and docstring says it should be list of int?
"if you use dict, the index should start from 0")
if eval_at is not None:
+ self._eval_at = eval_at
super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, group=group,
eval_set=eval_set, eval_names=eval_names, |
codereview_python_data_5206 | high_pokemon['iv']), 'green')
if keep_for_evo and len(group) > 0:
if candies == {}:
logger.log("Api call for candies failed, try again")
Pokemon with 2 evolutions always use candy of the first kind of pokemon. We have two solutions use the ['Next Evolution Requirements']['Name'] to order candies array (as my PR #2244 ). Or add the Evolution candy family in the JSON used for pokemon_list variable
high_pokemon['iv']), 'green')
if keep_for_evo and len(group) > 0:
+ if 'Previous evolution(s)' in self.bot.pokemon_list[pokemon_id - 1]:
+ logger.log(
+ '{} has previous evolution stages. This focuses on 1st stage because they use less '
+ 'candy'.format(pokemon_name), 'red')
+ continue
if candies == {}:
logger.log("Api call for candies failed, try again") |
codereview_python_data_5208 | and_(SuppressBug.run_id == action.run_id,
SuppressBug.hash == bug_hash,
SuppressBug.type == bug_hash_type,
- SuppressBug.file_name == ''))) \
.first()
if supp:
Please double check if the file_name column is None or empty string after a migration
and_(SuppressBug.run_id == action.run_id,
SuppressBug.hash == bug_hash,
SuppressBug.type == bug_hash_type,
+ SuppressBug.file_name == u''))) \
.first()
if supp: |
codereview_python_data_5219 | from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('ANALYZE')
-CTU_FUNC_MAP_CMD = 'clang-func-mapping'
class OrderedCheckersAction(argparse.Action):
This default variable should be moved into a config variable, created by `package_context`, and read from `config\package_layout.json`.
from libcodechecker.logger import LoggerFactory
LOG = LoggerFactory.get_new_logger('ANALYZE')
class OrderedCheckersAction(argparse.Action): |
codereview_python_data_5227 | try:
with timescale.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("SELECT SUM(count) FROM listen_count"))
- count = result.fetchone()["sum"] or 0
except psycopg2.OperationalError as e:
self.log.error("Cannot query timescale listen_count: %s" % str(e), exc_info=True)
raise
I think it would be cleaner to wrap ```result.fetchone()["sum"]``` with an int(), rather than having the cast come later. And then it might be an int or a str -- its just confusing.
try:
with timescale.engine.connect() as connection:
result = connection.execute(sqlalchemy.text("SELECT SUM(count) FROM listen_count"))
+ count = int(result.fetchone()["sum"]) or 0
except psycopg2.OperationalError as e:
self.log.error("Cannot query timescale listen_count: %s" % str(e), exc_info=True)
raise |
codereview_python_data_5228 | return_counts=True,
return_inverse=True,
)
- out[:] = where(
- group_labels != null_label,
- counts[inverse],
- self.missing_value,
- )
# Convenience aliases
I think slightly more efficient here would be: ```python np.copyto(out, counts[inverse], group_labels != null_label) ``` ``` In [9]: %timeit a[mask] = cp[mask] The slowest run took 5.88 times longer than the fastest. This could mean that an intermediate result is being cached. 10000 loops, best of 3: 44.6 mus per loop In [10]: %timeit np.copyto(a, cp, where=mask) The slowest run took 5.96 times longer than the fastest. This could mean that an intermediate result is being cached. 10000 loops, best of 3: 24.2 mus per loop ```
return_counts=True,
return_inverse=True,
)
+ copyto(out, counts[inverse], where=(group_labels != null_label))
# Convenience aliases |
codereview_python_data_5243 | :param bucket_name: The name of the bucket that receives the posted object.
:param object_key: The object key to identify the uploaded object.
- :param expires_in: The number of seconds the presigned POST is valid for.
:return: A dictionary that contains the URL and form fields that contain
required access data.
"""
...**seconds that the presigned POST is valid** ~~for~~.
:param bucket_name: The name of the bucket that receives the posted object.
:param object_key: The object key to identify the uploaded object.
+ :param expires_in: The number of seconds the presigned POST is valid.
:return: A dictionary that contains the URL and form fields that contain
required access data.
""" |
codereview_python_data_5248 | self._scheduler.add_worker(self._id, {'workers': self.worker_processes})
@rpc_message_callback
- def dispatch_scheduler_message(self, task_id, message):
if not self._config.receive_messages:
return
add `, **kwargs):` so that we can add more things in the future (without breaking old workers). One next step could be that a message has an ID so the worker can report back that it got the message, perhaps even "reply" etc.
self._scheduler.add_worker(self._id, {'workers': self.worker_processes})
@rpc_message_callback
+ def dispatch_scheduler_message(self, task_id, message_id, message, **kwargs):
if not self._config.receive_messages:
return |
codereview_python_data_5253 | import sys
import getopt
import distutils
from distutils import dir_util
from distutils import file_util
from setuptools import find_packages, setup
if __name__ == "__main__":
if (8 * struct.calcsize("P")) != 64:
raise Exception('Cannot install LightGBM in 32-bit python, please use 64-bit python instead.')
use_gpu = False
I got the same error on my mac, fix by removing this line
import sys
import getopt
import distutils
+import shutil
from distutils import dir_util
from distutils import file_util
from setuptools import find_packages, setup
if __name__ == "__main__":
+ build_sdist = sys.argv[1] == 'sdist'
if (8 * struct.calcsize("P")) != 64:
raise Exception('Cannot install LightGBM in 32-bit python, please use 64-bit python instead.')
use_gpu = False |
codereview_python_data_5254 | else:
return exec_command(logfile)
finally:
- logfile.close()
def _send_logs(self):
msg = None
I'm debating if we need to log it directly to the log file or pass it as a parameter to our logger to make sure we capture the other metadata too (datetime, thread name, etc etc). Do you have a sample output log file with these changes? I would like to take a look at those to understand this better
else:
return exec_command(logfile)
finally:
+ if logfile is not None:
+ logfile.close()
def _send_logs(self):
msg = None |
codereview_python_data_5262 | line[24:33], line[33:40],
line[40:47], line[47:54]]
except ValueError:
- warnings.warn("Failed to read CRYST entry, got:\n{}"
"".format(line))
# check if atom number changed
Maybe be even more explicit and say "Failed to read CRYST1 record, possibly invalid PDB file: read:\n{}"
line[24:33], line[33:40],
line[40:47], line[47:54]]
except ValueError:
+ warnings.warn("Failed to read CRYST1 record, "
+ "possibly invalid PDB file, got:\n{}"
"".format(line))
# check if atom number changed |
codereview_python_data_5266 | bootstrap = Flag(
doc="Debug server catalog bootstrap.")
- cache_yolo = Flag(
- doc="Disable consistency check.")
edgeql_parser = Flag(
doc="Debug EdgeQL parser (rebuild grammar verbosly).")
perhaps `bootstrap_cache_yolo` or do you mean for this to apply to all cache everywhere?
bootstrap = Flag(
doc="Debug server catalog bootstrap.")
+ bootstrap_cache_yolo = Flag(
+ doc="Disable bootstrap cache consistency check.")
edgeql_parser = Flag(
doc="Debug EdgeQL parser (rebuild grammar verbosly).") |
codereview_python_data_5272 | """
)
general.add_argument(
- "--can-handle-url-nohead",
metavar="URL",
help="""
- Same as --can-handle-url but without HTTP operations involved.
"""
)
general.add_argument(
Maybe `Same as --can-handle-url but without following redirects when looking up the URL` is more clear about the indented function?
"""
)
general.add_argument(
+ "--can-handle-url-no-redirect",
metavar="URL",
help="""
+ Same as --can-handle-url but without following redirects when looking up the URL.
"""
)
general.add_argument( |
codereview_python_data_5276 | num_nodes=(num_nodes_dict[srctype], num_nodes_dict[dsttype]),
validate=False, index_dtype=index_dtype))
- return hetero_from_relations(rel_graphs, num_nodes_dict,
- index_dtype=index_dtype)
def to_hetero(G, ntypes, etypes, ntype_field=NTYPE, etype_field=ETYPE,
Similar to `hetero_from_relations`. I don't think this `index_dtype` is needed.
num_nodes=(num_nodes_dict[srctype], num_nodes_dict[dsttype]),
validate=False, index_dtype=index_dtype))
+ return hetero_from_relations(rel_graphs, num_nodes_dict)
def to_hetero(G, ntypes, etypes, ntype_field=NTYPE, etype_field=ETYPE, |
codereview_python_data_5279 | import torch as th
from ...dist_tensor import DistTensor
-from ...sparse_emb import NodeEmbedding
from .utils import alltoallv_cpu, alltoall_cpu
class DistSparseGradOptimizer(abc.ABC):
this is synchronized update. we should keep the async update so that we can make a comparison between them.
import torch as th
from ...dist_tensor import DistTensor
+from ...sparse_emb.pytorch import NodeEmbedding
from .utils import alltoallv_cpu, alltoall_cpu
class DistSparseGradOptimizer(abc.ABC): |
codereview_python_data_5281 | box=self.u.dimensions)
# Maybe exclude same molecule distances
if self._exclusion_block is not None:
- idxA = pairs[:, 0]//self._exclusion_block[0],
idxB = pairs[:, 1]//self._exclusion_block[1]
mask = np.where(idxA != idxB)[0]
dist = dist[mask]
is the `,` necessary here?
box=self.u.dimensions)
# Maybe exclude same molecule distances
if self._exclusion_block is not None:
+ idxA = pairs[:, 0]//self._exclusion_block[0]
idxB = pairs[:, 1]//self._exclusion_block[1]
mask = np.where(idxA != idxB)[0]
dist = dist[mask] |
codereview_python_data_5284 | if number == 1:
return "1 bottle"
else:
- return f"{number} bottles"
def _next_verse(current_verse):
```suggestion return f'{number} bottles' ```
if number == 1:
return "1 bottle"
else:
+ return f'{number} bottles'
def _next_verse(current_verse): |
codereview_python_data_5291 | return False
def get_status(self, bigchain):
- concluded = self.get_election(self.id, bigchain)
- if concluded:
return self.CONCLUDED
return self.INCONCLUSIVE if self.has_validator_set_changed(bigchain) else self.ONGOING
Why is this approach preferable over checking the election outputs?
return False
def get_status(self, bigchain):
+ election = self.get_election(self.id, bigchain)
+ if election and election['is_concluded']:
return self.CONCLUDED
return self.INCONCLUSIVE if self.has_validator_set_changed(bigchain) else self.ONGOING |
codereview_python_data_5296 | """Set up the async update subprocess.
"""
self.async_q = Queue(1)
- self.async_p = mp.Process(target=async_update, args=(None, self, self.async_q))
self.async_p.start()
def finish_async_update(self):
looks like this piece of code is replicated?
"""Set up the async update subprocess.
"""
self.async_q = Queue(1)
+ self.async_p = mp.Process(target=async_update, args=(self.args, self, self.async_q))
self.async_p.start()
def finish_async_update(self): |
codereview_python_data_5298 | from mmcv import Config, DictAction
from mmdet.core.evaluation import eval_map
-from mmdet.core.mask.structures import (BitmapMasks, PolygonMasks,
- polygon_to_bitmap)
from mmdet.core.visualization.image import imshow_det_bboxes
from mmdet.datasets import build_dataset, retrieve_loading_pipeline
We can use the API gt_masks.to_array
from mmcv import Config, DictAction
from mmdet.core.evaluation import eval_map
+from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.visualization.image import imshow_det_bboxes
from mmdet.datasets import build_dataset, retrieve_loading_pipeline |
codereview_python_data_5305 | with open(path) as f:
return _sanitize_markdown(f.read())
except Exception as err:
- raise RuntimeError(f'Makrdown file "{path}" could not be loaded: {err}')
def _load_skill(path, course):
hmm, in this case perhaps the whole try-except could be removed altogether? because if the file does not exist, it's already returning `None`. In any other case it should actually probably fail with the exception, no?
with open(path) as f:
return _sanitize_markdown(f.read())
except Exception as err:
+ raise RuntimeError(f'Markdown file "{path}" could not be loaded: {err}')
def _load_skill(path, course): |
codereview_python_data_5313 | def check_operator_multipaste(bs, pastes, in_size, out_size, even_paste_count, no_intersections, full_input, in_anchor_top_left,
- out_anchor_top_left, out_dtype, use_gpu):
pipe, in_idx_l, in_anchors_l, shapes_l, out_anchors_l = get_pipeline(
batch_size=bs,
in_size=in_size,
In Python world, that's what we do: ```suggestion out_anchor_top_left, out_dtype, device): ```
def check_operator_multipaste(bs, pastes, in_size, out_size, even_paste_count, no_intersections, full_input, in_anchor_top_left,
+ out_anchor_top_left, out_dtype, device):
pipe, in_idx_l, in_anchors_l, shapes_l, out_anchors_l = get_pipeline(
batch_size=bs,
in_size=in_size, |
codereview_python_data_5314 | if type(axes) is list:
axes = tuple(axes)
if mean is None:
mean = x.mean(axis = axes, keepdims = True)
if stddev is None:
- factor = np.prod([x.shape[a] for a in axes]) - ddof if axes else x.size - ddof
sqr = (x - mean).astype(np.float)**2
var = np.sum(sqr, axis = axes, keepdims = True)
if factor > 0:
Maybe you can just use `numpy.std` at least for some case?
if type(axes) is list:
axes = tuple(axes)
+ num_reduced = np.prod([x.shape[a] for a in axes]) if axes else x.size
+
if mean is None:
mean = x.mean(axis = axes, keepdims = True)
+ if stddev is None and eps == 0 and num_reduced > ddof:
+ stddev = np.std(x, axis = axes, ddof = ddof, keepdims=True)
+
if stddev is None:
+ factor = num_reduced - ddof
sqr = (x - mean).astype(np.float)**2
var = np.sum(sqr, axis = axes, keepdims = True)
if factor > 0: |
codereview_python_data_5317 | continue
if not crash_analyzer.is_memory_tool_crash(result.output):
- # Didn't crash or time out.
continue
# Get memory tool crash information.
Remove "time out"
continue
if not crash_analyzer.is_memory_tool_crash(result.output):
+ # Didn't crash.
continue
# Get memory tool crash information. |
codereview_python_data_5324 | state['handle'] = handle
self.__dict__.update(state)
- def _reverse_update_params(self):
- self.train_set._reverse_update_params()
- for valid_set in self.valid_sets:
- valid_set._reverse_update_params()
-
def free_dataset(self):
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None)
put this into booster is a little bit confusing. you can change the name to _reverse_update_params_of_dataset or something else. Or remove this function.
state['handle'] = handle
self.__dict__.update(state)
def free_dataset(self):
self.__dict__.pop('train_set', None)
self.__dict__.pop('valid_sets', None) |
codereview_python_data_5327 | s.next()
base_type = p_c_base_type(s)
is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode)
- is_template = isinstance(base_type, Nodes.TemplatedTypeNode)
- is_const_volatile = isinstance(base_type, Nodes.CConstOrVolatileTypeNode)
- is_ctuple = isinstance(base_type, Nodes.CTupleBaseTypeNode)
- if not is_memslice and not is_template and not is_const_volatile and not is_ctuple and base_type.name is None:
s.error("Unknown type")
declarator = p_c_declarator(s, empty = 1)
if s.sy == '?':
I think this has become long enough to start making it easier to read. ```suggestion if not (is_memslice or is_template or is_const_volatile or is_ctuple) and base_type.name is None: ``` And given that, I wonder if it's not better to just say ```cython isinstance(base_type, ( list, exceptional, types, here, )) ```
s.next()
base_type = p_c_base_type(s)
is_memslice = isinstance(base_type, Nodes.MemoryViewSliceTypeNode)
+ is_other_unnamed_type = isinstance(base_type, (Nodes.TemplatedTypeNode,
+ Nodes.CConstOrVolatileTypeNode,
+ Nodes.CTupleBaseTypeNode))
+ if not (is_memslice or is_other_unnamed_type) and base_type.name is None:
s.error("Unknown type")
declarator = p_c_declarator(s, empty = 1)
if s.sy == '?': |
codereview_python_data_5328 | if self.selection_expr is None:
return data
if not isinstance(data, Dataset):
- raw = True
data = Dataset(data)
return data[self.selection_expr.apply(Dataset(data))]
What does this line achieve?
if self.selection_expr is None:
return data
if not isinstance(data, Dataset):
data = Dataset(data)
return data[self.selection_expr.apply(Dataset(data))] |
codereview_python_data_5329 | Translate.
random_negative_prob (float): The probability that turns the
offset negative.
- interpolation (str): Same as :func:`mmcv.imtranslate`.
min_size (int | float): The minimum pixel for filtering
invalid bboxes after the translation.
"""
interpolation is not necessary since offset is an integer?
Translate.
random_negative_prob (float): The probability that turns the
offset negative.
min_size (int | float): The minimum pixel for filtering
invalid bboxes after the translation.
""" |
codereview_python_data_5332 | --------
>>> G = nx.Graph()
>>> G.add_path([0,1,2,3,4])
- >>> print(list(nx.dfs_postorder_nodes(G,0)))
[4, 3, 2, 1, 0]
- >>> print(list(nx.dfs_postorder_nodes(G,0,2)))
[1, 0]
Notes
Missing a space after the comma; run PEP8 checks again.
--------
>>> G = nx.Graph()
>>> G.add_path([0,1,2,3,4])
+ >>> list(nx.dfs_postorder_nodes(G,0))
[4, 3, 2, 1, 0]
+ >>> list(nx.dfs_postorder_nodes(G,0,2))
[1, 0]
Notes |
codereview_python_data_5333 | if count:
return int(count)
- if "PYTEST_CURRENT_TEST" in os.environ:
- # pytest sets the environment variable PYTEST_CURRENT_TEST. If the variable is set
- # then return exact listen count using count(*) for listens corresponding to the
- # user_name in the listen schema itself
- query = "SELECT count(*) FROM listen WHERE user_name = :user_name"
- else:
- # otherwise fetch the count from listen_count view
- query = "SELECT SUM(count) FROM listen_count WHERE user_name = :user_name"
try:
with timescale.engine.connect() as connection:
Could we maybe remove this and mock? probably not but I'll check.
if count:
return int(count)
+ query = "SELECT SUM(count) FROM listen_count WHERE user_name = :user_name"
try:
with timescale.engine.connect() as connection: |
codereview_python_data_5335 | >>> from Bio import TogoWS
>>> for id in TogoWS.search_iter("pubmed", "diabetes+human", limit=10):
... print("PubMed ID: %s" %id) # maybe fetch data with entry?
- PubMed ID: 27374092
Internally this first calls the Bio.TogoWS.search_count() and then
uses Bio.TogoWS.search() to get the results in batches.
You need to replace the id (27374092) with an ELLIPSIS (...) because the search result (and thus the first id) will change when new articles with this keywords will be added to Pubmed. The ELLIPSIS (three dots: ...) will match any output.
>>> from Bio import TogoWS
>>> for id in TogoWS.search_iter("pubmed", "diabetes+human", limit=10):
... print("PubMed ID: %s" %id) # maybe fetch data with entry?
+ PubMed ID: ...
Internally this first calls the Bio.TogoWS.search_count() and then
uses Bio.TogoWS.search() to get the results in batches. |
codereview_python_data_5336 | assert self._tempdir is not None # for mypy
modified_src = self._tempdir / src.name
- shutil.copy(os.path.join(REPO_ROOT, 'www/header.asciidoc'), modified_src)
outfp = io.StringIO()
```suggestion shutil.copy(str(REPO_ROOT / 'www' / 'header.asciidoc'), modified_src) ```
assert self._tempdir is not None # for mypy
modified_src = self._tempdir / src.name
+ shutil.copy(str(REPO_ROOT / 'www' / 'header.asciidoc'), modified_src)
outfp = io.StringIO() |
codereview_python_data_5344 | p["A02"] = p["A02"]
for w in p:
pass
- self.assertTrue("A01" in p)
- self.assertFalse("test" in p)
self.assertRaises(ValueError, next, p.get_row("test"))
self.assertEqual(next(p.get_row("A")), p["A01"])
self.assertRaises(ValueError, next, p.get_column("test"))
These should use ``self.assertIn`` and ``self.assertNotIn`` instead please.
p["A02"] = p["A02"]
for w in p:
pass
+ self.assertIn("A01", p)
+ self.assertNotIn("test", p)
self.assertRaises(ValueError, next, p.get_row("test"))
self.assertEqual(next(p.get_row("A")), p["A01"])
self.assertRaises(ValueError, next, p.get_column("test")) |
codereview_python_data_5346 | self.__add_result_listeners(jmx)
if not is_jmx_generated:
self.__force_tran_parent_sample(jmx)
- self.__force_hc4_cookie_handler(jmx)
self.__fill_empty_delimiters(jmx)
return jmx
What if user uses older JMeter and does not want to force HC4 cookie manager?
self.__add_result_listeners(jmx)
if not is_jmx_generated:
self.__force_tran_parent_sample(jmx)
+ if self.settings.get('version', self.JMETER_VER) >= '3.2':
+ self.__force_hc4_cookie_handler(jmx)
self.__fill_empty_delimiters(jmx)
return jmx |
codereview_python_data_5350 | return get_model(app_label, model_name)
-def get_installed_app_config(app_label):
- try:
- return apps.get_app_config(app_label)
- except LookupError:
- pass
-
-
def get_model(app_label, model_name):
"""
Fetches a Django model using the app registry.
Just let the users call `apps.get_app_config` directly instead of having a separate function.
return get_model(app_label, model_name)
def get_model(app_label, model_name):
"""
Fetches a Django model using the app registry. |
codereview_python_data_5355 | ('colors', 'tab.indicator.stop'): 'tabs.indicator.stop',
('colors', 'tab.indicator.error'): 'tabs.indicator.error',
('colors', 'tab.indicator.system'): 'tabs.indicator.system',
- ('tabs', 'always-hide'): 'hide-always',
('tabs', 'auto-hide'): 'hide-auto',
}
DELETED_OPTIONS = [
Okay, last one before I'll test and (if it works for me) merge ;) I'd say let's remove this one. Except for you, there's not going to be anyone who had the old name in the config for a just invented option ;)
('colors', 'tab.indicator.stop'): 'tabs.indicator.stop',
('colors', 'tab.indicator.error'): 'tabs.indicator.error',
('colors', 'tab.indicator.system'): 'tabs.indicator.system',
('tabs', 'auto-hide'): 'hide-auto',
}
DELETED_OPTIONS = [ |
codereview_python_data_5356 | """Receive on layer 3"""
r = SuperSocket.recv(self, x)
if r:
- ts = r.time
- r = r.payload
- r.time = ts
return r
def send(self, pkt):
No need for a temporary variable here or another variable assignment: ```suggestion r.payload.time = r.time return r.payload ```
"""Receive on layer 3"""
r = SuperSocket.recv(self, x)
if r:
+ r.payload.time = r.time
+ return r.payload
return r
def send(self, pkt): |
codereview_python_data_5358 | try:
resp.content # Consume socket so it can be released
except (ContentDecodingError, RuntimeError):
- pass # It seems to have already been consumed.
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
In the ContentDecodingError case, how do we know that the entire content has actually been read?
try:
resp.content # Consume socket so it can be released
except (ContentDecodingError, RuntimeError):
+ resp.raw.read() # Ensure that the socket is consumed
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) |
codereview_python_data_5360 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4470-SEA 1645543044 2832070395</p>
<hr>
<p>Varnish cache server</p>
</body>
don't need types in docstring if they're in the method args
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4454-SEA 1645543045 2148018107</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_5373 | code.mark_pos(self.pos)
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr.generate_evaluation_code(code)
- result = self.expr.result()
- if not self.expr.is_temp and result:
if not self.expr.type.is_void:
result = "(void)(%s)" % result
code.putln("%s;" % result)
Could you keep the `is_temp` check before calling `result()` here? Just in case? Just split the `if` into two.
code.mark_pos(self.pos)
self.expr.result_is_used = False # hint that .result() may safely be left empty
self.expr.generate_evaluation_code(code)
+ if not self.expr.is_temp and self.expr.result():
+ result = self.expr.result()
if not self.expr.type.is_void:
result = "(void)(%s)" % result
code.putln("%s;" % result) |
codereview_python_data_5374 | finding.get('source_properties').get('violation_data'))
raise api_errors.ApiExecutionError(violation_data, e)
- # pylint: disable=logging-too-many-args
def list_findings(self, source_id):
"""Lists all the findings in CSCC.
why is this pylint disable needed?
finding.get('source_properties').get('violation_data'))
raise api_errors.ApiExecutionError(violation_data, e)
def list_findings(self, source_id):
"""Lists all the findings in CSCC. |
codereview_python_data_5381 | if len(transactions) + len(current_spent_transactions) > 1:
raise DoubleSpend('tx "{}" spends inputs twice'.format(txid))
elif transactions:
- transaction = operation_class(transactions[0]).from_db(self, transactions[0])
elif current_spent_transactions:
transaction = current_spent_transactions[0]
To make the core transaction module independent from the plugin modules, we can do DI the following way: ``` class Transaction: type_registry = {} @classmethod def register_type(typ, clas): type_registry[typ] = clas @classmethod def from_whatever(tx): return cls(type_registry[tx['operation']]) ``` Then, inside `a_plugin.py` one can do smth like: ``` from core import Transaction Transaction.register_type('election', ValidatorElection) ``` What do you think?
if len(transactions) + len(current_spent_transactions) > 1:
raise DoubleSpend('tx "{}" spends inputs twice'.format(txid))
elif transactions:
+ transaction = Transaction.from_db(self, transactions[0])
elif current_spent_transactions:
transaction = current_spent_transactions[0] |
codereview_python_data_5388 | if role not in accepted_roles:
raise ValueError("Role {} is not acceptable".format(role))
if not need_to_be_on_ledger and role != "*":
- raise ValueError("It has point to set 'need_to_be_on_ledger' flag, only if '*' role set. "
- "Got {} role instead".format(role))
def __str__(self):
role = get_named_role(self.role) if self.role != '*' else 'ALL'
I would rather say `'need_to_be_on_ledger' can be set to False only if any role is accepted (role='*'). Got {} role instead.`
if role not in accepted_roles:
raise ValueError("Role {} is not acceptable".format(role))
if not need_to_be_on_ledger and role != "*":
+ raise ValueError("'need_to_be_on_ledger' can be set to False only if any role is accepted (role='*'). "
+ "Got {} role instead.".format(role))
def __str__(self):
role = get_named_role(self.role) if self.role != '*' else 'ALL' |
codereview_python_data_5392 | """Checks and updates the necessary symbolizer options such as
`external_symbolizer_path` and `symbolize_inline_frames`."""
if 'external_symbolizer_path' not in tool_options:
- llvm_symbolizer_path_arg = _quote_value_if_needed(
- get_llvm_symbolizer_path())
- if llvm_symbolizer_path_arg:
tool_options.update({
- 'external_symbolizer_path': llvm_symbolizer_path_arg
})
if 'symbolize_inline_frames' not in tool_options:
tool_options.update({
i think _quote_value_if_needed should be moved inside the if. and then s/llvm_symbolizer_path_arg/llvm_symbolizer_path we shouldn't be quoting None as then we wont fail on the if.
"""Checks and updates the necessary symbolizer options such as
`external_symbolizer_path` and `symbolize_inline_frames`."""
if 'external_symbolizer_path' not in tool_options:
+ llvm_symbolizer_path = get_llvm_symbolizer_path()
+ if llvm_symbolizer_path:
tool_options.update({
+ 'external_symbolizer_path':
+ _quote_value_if_needed(llvm_symbolizer_path)
})
if 'symbolize_inline_frames' not in tool_options:
tool_options.update({ |
codereview_python_data_5397 | """Instead of reading a file, just parse a config entry."""
def locked_get(self):
- """ TODO ADD DOC-STRING"""
content = db_config.get_value('client_credentials')
if not content:
return None
Add a docstring. Return credentials ?
"""Instead of reading a file, just parse a config entry."""
def locked_get(self):
+ """Return Credentials."""
content = db_config.get_value('client_credentials')
if not content:
return None |
codereview_python_data_5402 | @property
def _pubsub(self):
- return getattr(self._thread, "_pubsub", None)
@_pubsub.setter
def _pubsub(self, value):
If I understand the proposal of @thedrow : ```python @property def pubsub(self): if self._thread.pubsub is None: # Create pubsub client instance here self._thread.pubsub = pubsub_instance return self._thread.pubsub ``` Thus the setter is not required, but all references to `self._pubsub` should now become `self.pubsub`.
@property
def _pubsub(self):
+ if getattr(self._thread, "_pubsub", None) is None:
+ self._thread._pubsub = self.backend._create_client(
+ **self.backend.connparams
+ ).pubsub(ignore_subscribe_messages=True)
+
+ return self._thread._pubsub
@_pubsub.setter
def _pubsub(self, value): |
codereview_python_data_5405 | def auto_fp16_wrapper(old_func):
def new_func(*args, **kwargs):
args_info = getfullargspec(old_func)
num_args = len(args)
num_kwargs = len(kwargs)
The dimension check is unnecessary since we use the `auto_fp16` decorator to specify tensors to converted.
def auto_fp16_wrapper(old_func):
def new_func(*args, **kwargs):
+ if not args[0].fp16_enabled:
+ return old_func(*args, **kwargs)
args_info = getfullargspec(old_func)
num_args = len(args)
num_kwargs = len(kwargs) |
codereview_python_data_5420 | _log_inited = True
-def change(filters):
- console.addFilter(LogFilter(filters.split(',')))
-
-
def _init_py_warnings():
"""Initialize Python warning handling."""
warnings.simplefilter('default')
Instead of adding a new filter (which means I won't be able to change the filter I set on the command line, only add new filters), you could get the existing filter (e.g. by assigning it to a `console_filter` global when it's created in `init_log`), and set the `_names` attribute there (after renaming it to `names` to make it "public"). Also, same as below applies - you can do this directly from the command :wink:
_log_inited = True
def _init_py_warnings():
"""Initialize Python warning handling."""
warnings.simplefilter('default') |
codereview_python_data_5426 | rescale (bool): If True, return boxes in original image space.
Returns:
- Tensor: Labeled boxes in shape (n, 5), where the first 4 columns
- are bounding box positions (tl_x, tl_y, br_x, br_y) and the
- 5-th column is a score between 0 and 1.
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
Labeled boxes have the shape of (n,5)
rescale (bool): If True, return boxes in original image space.
Returns:
+ Tensor: Labeled boxes have the shape of (n,5), where the
+ first 4 columns are bounding box positions
+ (tl_x, tl_y, br_x, br_y) and the 5-th column is a score
+ between 0 and 1.
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg) |
codereview_python_data_5429 | create_path(dump_path)
db_dump.dump_postgres_db(dump_path, time_now, threads)
ls.dump_listens(dump_path, time_now, threads)
- write_hashes(dump_path)
@cli.command()
There is no error handling here. If the disk runs out of space during hash creation, the hash creation fails silently.
create_path(dump_path)
db_dump.dump_postgres_db(dump_path, time_now, threads)
ls.dump_listens(dump_path, time_now, threads)
+ try:
+ write_hashes(dump_path)
+ except IOError as e:
+ print('Unable to create hash files! Error: %s' % str(e))
+ return
+ print('Dumps created and hashes written at %s' % dump_path)
@cli.command() |
codereview_python_data_5432 | # and remove those addresses from being asked about again. Here
# any value of None means the address hasn't been set.
- values = current_context.get_if_set(reads)
addresses_not_found = []
for address, value in zip(reads, values):
if value is not None:
Why use extend and not `context_ids_already_searched = base_contexts[:]`?
# and remove those addresses from being asked about again. Here
# any value of None means the address hasn't been set.
+ values = current_context.get_all_set(reads)
addresses_not_found = []
for address, value in zip(reads, values):
if value is not None: |
codereview_python_data_5450 | -def Score(x, y):
pass
Method names should start with lowercase. ```suggestion def score(x, y): ```
+def score(x, y):
pass |
codereview_python_data_5451 | super(_ExtensionsGoalStateFromExtensionsConfig, self).__init__()
self._id = incarnation
self._text = xml_text
- self._parse_extensions_config(xml_text)
try:
self._do_common_validations()
except Exception as e:
- raise ExtensionsConfigError("Error parsing ExtensionsConfig (incarnation: {0}): {1}\n{2}".format(incarnation, format_exception(e), xml_text))
def _parse_extensions_config(self, xml_text):
xml_doc = parse_doc(xml_text)
Should we trim the xml text? Incase its very big? Is there a chance `xml_text` can contain secrets? We should redact it in that case too
super(_ExtensionsGoalStateFromExtensionsConfig, self).__init__()
self._id = incarnation
self._text = xml_text
try:
+ self._parse_extensions_config(xml_text)
self._do_common_validations()
except Exception as e:
+ raise ExtensionsConfigError("Error parsing ExtensionsConfig (incarnation: {0}): {1}\n{2}".format(incarnation, format_exception(e), self.get_redacted_text()))
def _parse_extensions_config(self, xml_text):
xml_doc = parse_doc(xml_text) |
codereview_python_data_5452 | del othercid['spam']
assert cid != othercid
assert cid == {'spam': 'blueval', 'eggs': 'redval'}
- assert cid.__eq__(object()) == NotImplemented
def test_setdefault(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'})
Shouldn't this be ``` py with pytest.raises(NotImplementedError): cid == object() ``` We shouldn't be directly invoking dunder methods anywhere in the code base, not even tests.
del othercid['spam']
assert cid != othercid
assert cid == {'spam': 'blueval', 'eggs': 'redval'}
+ assert cid != object()
def test_setdefault(self):
cid = CaseInsensitiveDict({'Spam': 'blueval'}) |
codereview_python_data_5456 | class LeafNode(Node):
- __slots__ = ('reader', 'is_leaf')
def __init__(self, path, reader):
Node.__init__(self, path)
`is_leaf` is already defined in `Node.__slots__`
class LeafNode(Node):
+ __slots__ = ('reader', )
def __init__(self, path, reader):
Node.__init__(self, path) |
codereview_python_data_5457 | try:
backup_archive_path = shutil.make_archive(backup_archive_path,
BACKUP_ARCHIVE_FORMAT, directory)
- logs.log('Created corpus backup file.',
- backup_archive_path=backup_archive_path,
- directory=directory,
- size=os.path.getsize(backup_archive_path))
dated_backup_url = gcs_url_for_backup_file(
backup_bucket_name, corpus.engine, corpus.project_qualified_target_name,
I'm actually not sure if this will provide any more info. I think the backups are being created they just don't contain anything.
try:
backup_archive_path = shutil.make_archive(backup_archive_path,
BACKUP_ARCHIVE_FORMAT, directory)
+ logs.log(
+ 'Created corpus backup file.',
+ backup_archive_path=backup_archive_path,
+ directory=directory,
+ size=os.path.getsize(backup_archive_path))
dated_backup_url = gcs_url_for_backup_file(
backup_bucket_name, corpus.engine, corpus.project_qualified_target_name, |
codereview_python_data_5459 | def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
- elif column in loaders:
- return loaders[column]
else:
raise ValueError(
"No PipelineLoader registered for column %s." % column
I think this will crash in the current implementation if the user doesn't supply any custom loaders (because `loaders` will be None, so `column in loaders` will barf).
def choose_loader(column):
if column in USEquityPricing.columns:
return pipeline_loader
+ elif data_frame_loaders and column in data_frame_loaders:
+ return data_frame_loaders[column]
else:
raise ValueError(
"No PipelineLoader registered for column %s." % column |
codereview_python_data_5462 | def get_preds(self, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False, n_batch:Optional[int]=None, pbar:Optional[PBar]=None, ordered=True) -> List[Tensor]:
"Return predictions and targets on the valid, train, or test set, depending on `ds_type`."
- lf = self.loss_func if with_loss else None
- preds = get_preds(self.model, self.dl(ds_type), cb_handler=CallbackHandler(self.callbacks),
- activ=_loss_func2activ(self.loss_func), loss_func=lf, n_batch=n_batch, pbar=pbar)
if ordered and hasattr(self.dl(ds_type), 'sampler'):
sampler = [i for i in self.dl(ds_type).sampler]
reverse_sampler = np.argsort(sampler)
I think those two lines can be replaced by `super().get_preds(...)` that would make your code shorter and avoid importing the private function `_loss_func2actv`
def get_preds(self, ds_type:DatasetType=DatasetType.Valid, with_loss:bool=False, n_batch:Optional[int]=None, pbar:Optional[PBar]=None, ordered=True) -> List[Tensor]:
"Return predictions and targets on the valid, train, or test set, depending on `ds_type`."
+ preds = super().get_preds(ds_type=ds_type, with_loss=with_loss, n_batch=n_batch, pbar=pbar)
if ordered and hasattr(self.dl(ds_type), 'sampler'):
sampler = [i for i in self.dl(ds_type).sampler]
reverse_sampler = np.argsort(sampler) |
codereview_python_data_5467 | .. [1] https://en.wikipedia.org/wiki/Graph_minor
"""
-from networkx.algorithms.minors import contraction
-
from networkx.algorithms.minors.contraction import (
contracted_edge,
contracted_nodes,
I guess my only remaining question is: do we want to add this module to the top-level namespace, or only the functions within it? I.e. do we ever want users to do `from networkx import contraction` or `nx.contraction.contracted_edge` instead of calling the functions from directly from `nx`. It seems superfluous to expose the module from `nx` when all of the relevant functions are already available from there, but I don't feel strong about it either way - just curious what others think.
.. [1] https://en.wikipedia.org/wiki/Graph_minor
"""
from networkx.algorithms.minors.contraction import (
contracted_edge,
contracted_nodes, |
codereview_python_data_5468 | try:
remote = cache[__method_name__]
except KeyError:
- cache[__method_name__] = remote = getattr(remote_cls, __method_name__)
return remote(self.__remote_end__, *args, **kw)
wrapper.__name__ = method
You probably should do the same as in `__prepare__`
try:
remote = cache[__method_name__]
except KeyError:
+ # see comments in ProxyMeta.__prepare__ on using remote_cls.__getattr__
+ cache[__method_name__] = remote = remote_cls.__getattr__(
+ __method_name__
+ )
return remote(self.__remote_end__, *args, **kw)
wrapper.__name__ = method |
codereview_python_data_5470 | self.populations = []
def __str__(self):
- """Return (reconstructs) a GenePop textual representation."""
rep = [self.comment_line + '\n']
rep.append('\n'.join(self.loci_list) + '\n')
for pop in self.populations:
You should change this to "reconstruct" as well (to match the verb change).
self.populations = []
def __str__(self):
+ """Return (reconstruct) a GenePop textual representation."""
rep = [self.comment_line + '\n']
rep.append('\n'.join(self.loci_list) + '\n')
for pop in self.populations: |
codereview_python_data_5475 | # Code based on
# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/
def directed_laplacian(G, nodelist=None, weight='weight', walk_type=None, alpha=0.95):
r"""Return the directed Laplacian matrix of G.
You could use the decorator `require' from networkx/utils/decorators.py
# Code based on
# https://bitbucket.org/bedwards/networkx-community/src/370bd69fc02f/networkx/algorithms/community/
+@require('numpy')
+@not_implemented_for('undirected')
+@not_implemented_for('multigraph')
def directed_laplacian(G, nodelist=None, weight='weight', walk_type=None, alpha=0.95):
r"""Return the directed Laplacian matrix of G. |
codereview_python_data_5476 | >>> list(TR.edges)
[(1, 2), (2, 3)]
To perform transitive reduction on a DiGraph and transfer node/edge data:
>>> DG = nx.DiGraph()
It might be good to have the explanation that's in the notes section in the example instead to provide a bit more context for the example. ```suggestion To avoid unnecessary data copies, this implementation does not return a DiGraph with node/edge data. To perform transitive reduction on a DiGraph and transfer node/edge data: >>> DG = nx.DiGraph() >>> DG.add_edges_from([(1, 2), (2, 3), (1, 3)], color='red') >>> TR = nx.transitive_reduction(DG) >>> TR.add_nodes_from(DG.nodes(data=True)) >>> TR.add_edges_from((u, v, DG.edges[u, v]) for u, v in TR.edges) >>> list(TR.edges(data=True)) [(1, 2, {'color': 'red'}), (2, 3, {'color': 'red'})] ```
>>> list(TR.edges)
[(1, 2), (2, 3)]
+ To avoid unnecessary data copies, this implementation does not return a
+ DiGraph with node/edge data.
To perform transitive reduction on a DiGraph and transfer node/edge data:
>>> DG = nx.DiGraph() |
codereview_python_data_5488 | class TestCRDWriterMissingAttrs(object):
# All required attributes with the default value
- req_attrs = {'resnames': 'UNK',
- 'resids': 1,
- 'names': 'X',
- 'tempfactors': 0.0,
- }
@pytest.mark.parametrize('missing_attr', req_attrs)
def test_warns(self, missing_attr, tmpdir):
The tests are failing because parametrizing with a dict causes an unpredictable order for xdist
class TestCRDWriterMissingAttrs(object):
# All required attributes with the default value
+ req_attrs = OrderedDict([
+ ('resnames', 'UNK'),
+ ('resids', 1),
+ ('names', 'X'),
+ ('tempfactors', 0.0),
+ ])
@pytest.mark.parametrize('missing_attr', req_attrs)
def test_warns(self, missing_attr, tmpdir): |
codereview_python_data_5498 | if set_tasks["ever_failed"]:
if not set_tasks["failed"]:
smiley = ":)"
- reason = "there were failed tasks but suceeded in retry"
else:
smiley = ":("
reason = "there were failed tasks"
perhaps change to "there were failed tasks but **they all** suc**c**eeded in **a** retry"
if set_tasks["ever_failed"]:
if not set_tasks["failed"]:
smiley = ":)"
+ reason = "there were failed tasks but they all suceeded in a retry"
else:
smiley = ":("
reason = "there were failed tasks" |
codereview_python_data_5499 | avg_count = torch.mean((ranks <= hit).float())
print("Hits (filtered) @ {}: {:.6f}".format(hit, avg_count.item()))
return mrr.item()
You should move this for loop into calc_filtered_mrr. Otherwise there will be two print output of "test triplet {} / {}" in both perturb_s_and_get_filtered_rank and perturb_o_and_get_filtered_rank which is wired.
avg_count = torch.mean((ranks <= hit).float())
print("Hits (filtered) @ {}: {:.6f}".format(hit, avg_count.item()))
return mrr.item()
+
+#######################################################################
+#
+# Main evaluation function
+#
+#######################################################################
+
+def calc_mrr(embedding, w, train_triplets, valid_triplets, test_triplets, hits=[], eval_bz=100, eval_p="filtered"):
+ if eval_p == "filtered":
+ mrr = calc_filtered_mrr(embedding, w, train_triplets, valid_triplets, test_triplets, hits)
+ else:
+ mrr = calc_raw_mrr(embedding, w, test_triplets, hits, eval_bz)
+ return mrr |
codereview_python_data_5504 | self.db = db
self.db_ledger = DBLedgerActions(self.db, self.db.msg_aggregator)
- def _consume_cointracking_entry(
- self,
- csv_row: Dict[str, Any],
- extra_parameters: Dict[str, Any],
- ) -> None:
"""Consumes a cointracking entry row from the CSV and adds it into the database
Can raise:
- DeserializationError if something is wrong with the format of the expected values
Okay so both here and in all other import functions, why add an `extra_parameters` dict that's making things more confusing and is not typed by mypy? You added it to **all** import functions. Just add another argument called `timestamp_format`
self.db = db
self.db_ledger = DBLedgerActions(self.db, self.db.msg_aggregator)
+ def _consume_cointracking_entry(self, csv_row: Dict[str, Any], **kwargs: Any) -> None:
"""Consumes a cointracking entry row from the CSV and adds it into the database
Can raise:
- DeserializationError if something is wrong with the format of the expected values |
codereview_python_data_5505 | if email:
try:
ProductAlert.objects.get(
- product=self.product, email=email, status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"There is already an active stock alert for %s") % email)
- elif self.user.id:
try:
ProductAlert.objects.get(product=self.product,
user=self.user,
Is there a difference between this and doing a `self.user.is_authenticated()` or would both be doing the same thing? I just asking because I haven't seen it checked this way before.
if email:
try:
ProductAlert.objects.get(
+ product=self.product, email=email,
+ status=ProductAlert.ACTIVE)
except ProductAlert.DoesNotExist:
pass
else:
raise forms.ValidationError(_(
"There is already an active stock alert for %s") % email)
+ elif self.user.is_authenticated():
try:
ProductAlert.objects.get(product=self.product,
user=self.user, |
codereview_python_data_5506 | if self.bot.config.release.get('all'):
group = [p for p in inventory.pokemons().all()
- if not p.in_fort and not p.is_favorite and not (p.unique_id == self.buddy['id'])]
self._release_pokemon_worst_in_group(group, 'all')
def _should_work(self):
If there is no buddy, `self.buddy['id']` will give an error
if self.bot.config.release.get('all'):
group = [p for p in inventory.pokemons().all()
+ if not p.in_fort and not p.is_favorite and not (p.unique_id == self.buddyid)]
self._release_pokemon_worst_in_group(group, 'all')
def _should_work(self): |
codereview_python_data_5508 | mosaic_bboxes = np.concatenate(mosaic_bboxes, 0)
mosaic_labels = np.concatenate(mosaic_labels, 0)
if self.bbox_clip_border:
mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0,
2 * self.img_scale[1])
The original logic of this operation does not exist, and it is unclear whether it will affect performance.
mosaic_bboxes = np.concatenate(mosaic_bboxes, 0)
mosaic_labels = np.concatenate(mosaic_labels, 0)
+ # In some dataset like MOT17, the gt bboxes are allowed to cross
+ # the border of images. Therefore, we don't need to clip the gt
+ # bboxes in these cases.
if self.bbox_clip_border:
mosaic_bboxes[:, 0::2] = np.clip(mosaic_bboxes[:, 0::2], 0,
2 * self.img_scale[1]) |
codereview_python_data_5516 | docstring_arborescence.format(kind='maximum', style='spanning arborescence')
minimum_spanning_arborescence.__doc__ = \
- docstring_arborescence.format(kind='minimum', style='spanning arborescence')
\ No newline at end of file
Don't remove the newline at the end of the file.
docstring_arborescence.format(kind='maximum', style='spanning arborescence')
minimum_spanning_arborescence.__doc__ = \
\ No newline at end of file
+ docstring_arborescence.format(kind='minimum', style='spanning arborescence') |
codereview_python_data_5520 | def __call__(self, data, **metadata):
opts = jsbeautifier.default_options()
opts.indent_size = 2
- res = jsbeautifier.beautify(strutils.native(data), opts)
return "JavaScript", format_text(res)
This is going to fail for any non-valid utf8 :wink:
def __call__(self, data, **metadata):
opts = jsbeautifier.default_options()
opts.indent_size = 2
+ data = data.decode("utf-8", "replace")
+ res = jsbeautifier.beautify(data, opts)
return "JavaScript", format_text(res) |
codereview_python_data_5527 | ('BRENDA, the Enzyme Database', ['1.1.1.1']),
('CAS', ['9031-72-5'])])
self.assertEqual(records[-1].entry, "2.7.2.1")
- self.assertEqual(records[-1].__str__().replace(" ", "").split("\n")[:10],
['ENTRYEC2.7.2.1', 'NAMEacetatekinase', 'acetokinase',
'AckA', 'AK', 'acetickinase', 'acetatekinase(phosphorylating)',
'CLASSTransferases;', 'Transferringphosphorus-containinggroups;',
Rather than calling the magic method explicitly, I would just use ``str(records[-1])`` here.
('BRENDA, the Enzyme Database', ['1.1.1.1']),
('CAS', ['9031-72-5'])])
self.assertEqual(records[-1].entry, "2.7.2.1")
+ self.assertEqual(str(records[-1]).replace(" ", "").split("\n")[:10],
['ENTRYEC2.7.2.1', 'NAMEacetatekinase', 'acetokinase',
'AckA', 'AK', 'acetickinase', 'acetatekinase(phosphorylating)',
'CLASSTransferases;', 'Transferringphosphorus-containinggroups;', |
codereview_python_data_5534 | else:
url = "https://{0}.twitch.tv{1}".format(self.subdomain, path)
- headers = {'Client-ID': TWITCH_CLIENT_ID}
- if "/kraken/channels/" not in path:
- headers['Accept'] = 'application/vnd.twitchtv.v{0}+json'.format(self.version)
# The certificate used by Twitch cannot be verified on some OpenSSL versions.
res = http.get(url, params=params, verify=False, headers=headers)
This change should be reverted, the v5 API must be used from now on.
else:
url = "https://{0}.twitch.tv{1}".format(self.subdomain, path)
+ headers = {'Accept': 'application/vnd.twitchtv.v{0}+json'.format(self.version),
+ 'Client-ID': TWITCH_CLIENT_ID}
# The certificate used by Twitch cannot be verified on some OpenSSL versions.
res = http.get(url, params=params, verify=False, headers=headers) |
codereview_python_data_5559 | @pytest.mark.skipif(not hasattr(typing, 'NewType'), reason='test for NewType')
def test_resolves_NewType():
- for t in [
- typing.NewType('T', int),
- typing.NewType('UnionT', typing.Optional[int]),
- typing.NewType('NestedT', typing.NewType('T', int)),
- ]:
- from_type(t).example()
It would be nice to add an assertion that this produces a value of the right type.
@pytest.mark.skipif(not hasattr(typing, 'NewType'), reason='test for NewType')
def test_resolves_NewType():
+ typ = typing.NewType('T', int)
+ nested = typing.NewType('NestedT', typ)
+ uni = typing.NewType('UnionT', typing.Optional[int])
+ assert isinstance(from_type(typ).example(), integer_types)
+ assert isinstance(from_type(nested).example(), integer_types)
+ assert isinstance(from_type(uni).example(), integer_types + (type(None),)) |
codereview_python_data_5564 | self.opts.packages_action = self.opts._packages_action
if self.opts.obsoletes:
if self.opts._packages_action:
- raise dnf.exceptions.Error(_("argument {}: not allowed with argument {}".format(
- "--obsoletes", "--" + self.opts._packages_action)))
else:
self.opts.packages_action = 'obsoletes'
if self.opts.packages_action == 'updates':
Please don't duplicate the code, create a new function for this.
self.opts.packages_action = self.opts._packages_action
if self.opts.obsoletes:
if self.opts._packages_action:
+ self.cli._option_conflict("--obsoletes", "--" + self.opts._packages_action)
else:
self.opts.packages_action = 'obsoletes'
if self.opts.packages_action == 'updates': |
codereview_python_data_5571 | return RateLimiter(FLAGS.max_bigquery_api_calls_per_100_seconds,
self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS)
- def get_bigquery_projectids(self, key='projects'):
"""Request and page through bigquery projectids.
Returns: A list of project_ids enabled for bigquery.
nit: arg description for "key"?
return RateLimiter(FLAGS.max_bigquery_api_calls_per_100_seconds,
self.DEFAULT_QUOTA_TIMESPAN_PER_SECONDS)
+ def get_bigquery_projectids(self):
"""Request and page through bigquery projectids.
Returns: A list of project_ids enabled for bigquery. |
codereview_python_data_5572 | with tempdir.in_tempdir():
reference.trajectory[-1]
x = align.AlignTraj(universe, reference)
- os.remove(x.filename)
- assert os.path.basename(x.filename) == 'rmsfit_adk_dims.dcd'
def test_AlignTraj_outfile_default_exists(self, universe, reference, tmpdir):
reference.trajectory[-1]
This line is making the Windows tests fail - "PermissionError: [WinError 32] The process cannot access the file because it is being used by another process: 'C:\\projects\\mdanalysis\\testsuite\\MDAnalysisTests\\data\\rmsfit_adk_dims.dcd'"
with tempdir.in_tempdir():
reference.trajectory[-1]
x = align.AlignTraj(universe, reference)
+ try:
+ assert os.path.basename(x.filename) == 'rmsfit_adk_dims.dcd'
+ finally:
+ universe.trajectory.close() # drop file handle
+ os.remove(x.filename)
def test_AlignTraj_outfile_default_exists(self, universe, reference, tmpdir):
reference.trajectory[-1] |
codereview_python_data_5576 | def _enable_amqheartbeats(timer, connection, rate=2.0):
- heartbeat_error = [None]
if not connection:
return heartbeat_error
I'm not seeing why this is a `list` instead of just `None`. What benefit is there by making this a list and sometimes modifying the first value?
def _enable_amqheartbeats(timer, connection, rate=2.0):
+ heartbeat_error = None
if not connection:
return heartbeat_error |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.