id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_5583 | state.last_saveload = path
path = os.path.expanduser(path)
try:
- f = file(path, "wb")
- f.write(str(content))
- f.close()
except IOError, v:
master.statusbar.message(v.strerror)
This must not be the case. We want to get to a point where we have error-only flows as well.
state.last_saveload = path
path = os.path.expanduser(path)
try:
+ with file(path, "wb") as f:
+ f.write(content)
except IOError, v:
master.statusbar.message(v.strerror) |
codereview_python_data_5588 | """
self.ag.center_of_mass(pbc=True)
- def time_center_of_mass_unwrap(self, num_atoms):
- """Benchmark center_of_mass calculation with
- unwrap active.
- """
- self.ag_unwrap.center_of_mass(unwrap=True, compound='residues')
-
def time_center_of_geometry_default(self, num_atoms):
"""Benchmark center_of_geometry calculation with
pbc and unwrap inactive.
These look cool, but some are only going to work from 0.20.0 onwards. Is there a way to skip benchmarks if they're not valid? (@tylerjereddy ) Some sort of `@requires('0.20.0')` decorator?
"""
self.ag.center_of_mass(pbc=True)
def time_center_of_geometry_default(self, num_atoms):
"""Benchmark center_of_geometry calculation with
pbc and unwrap inactive. |
codereview_python_data_5592 | if len(G) == 0:
return {}, {}
- M = nx.to_scipy_sparse_matrix(G, nodelist=list(G))
- (n, m) = M.shape # should be square
- A = M.T @ M # authority matrix
x = np.ones((n, 1)) / n # initial guess
# choose fixed starting vector if not given
if nstart is not None:
I like that you are using A for the adjacency matrix above. Maybe we should keep that convention. Then I would replace ``` A = M.T @ M # authority matrix ``` with ``` ATA = A.T @ A # authority matrix ``` Similarly, ``` AAT = A @ A.T # hub matrix ```
if len(G) == 0:
return {}, {}
+ A = nx.to_scipy_sparse_matrix(G, nodelist=list(G))
+ (n, m) = A.shape # should be square
+ ATA = A.T @ A # authority matrix
x = np.ones((n, 1)) / n # initial guess
# choose fixed starting vector if not given
if nstart is not None: |
codereview_python_data_5593 | import json
import tqdm
import pickle
-import gdown
from gluoncv.utils import download, makedirs
_TARGET_DIR = os.path.expanduser('~/.mxnet/datasets/visualgenome')
Put it in another place so mainland china users could have access?
import json
import tqdm
import pickle
from gluoncv.utils import download, makedirs
_TARGET_DIR = os.path.expanduser('~/.mxnet/datasets/visualgenome') |
codereview_python_data_5594 | @property
def hadoop_user_name(self):
- return os.getlogin()
@property
def spark_version(self):
It is a bit dangerous, in some environments HADOOP_USER_NAME may be already set and this will override it, right? Probably it would be safer to return None
@property
def hadoop_user_name(self):
+ return None
@property
def spark_version(self): |
codereview_python_data_5595 | def get_final_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset':
This code cannot be modified because `epoch_{}.pth` will make an error.
def get_final_epoch(config):
+ cfg = mmcv.Config.fromfile('./configs/' + config)
+ return cfg.runner.max_epochs
+
+
+def get_real_epoch(config):
cfg = mmcv.Config.fromfile('./configs/' + config)
epoch = cfg.runner.max_epochs
if cfg.data.train.type == 'RepeatDataset': |
codereview_python_data_5599 | self.nodeMsgRouter.routes[Request] = self.processNodeRequest
self.nodeAuthNr = self.defaultNodeAuthNr()
- self.nodeMsgRouter.routes[PrePrepare] = self.extraPrePrepareProcesing
-
-
- def extraPrePrepareProcesing(self, msg, frm):
- ledgerid = getattr(msg, f.LEDGER_ID.nm, -1)
- if self.poolCfg.isWritable() or ledgerid == CONFIG_LEDGER_ID:
- super().sendToReplica(msg, frm)
- else:
- logger.debug("Message {} from {} was ignored due to readonly mode".format(msg, frm))
-
-
def getPoolConfig(self):
return PoolConfig(self.configLedger)
Does it replace default PrePrepare handler?
self.nodeMsgRouter.routes[Request] = self.processNodeRequest
self.nodeAuthNr = self.defaultNodeAuthNr()
def getPoolConfig(self):
return PoolConfig(self.configLedger) |
codereview_python_data_5600 | archive = FileArchive(export_name=export_name, pack=False)
archive.add(self.image1)
archive.add(self.image2)
- self.assertEqual(None, FileArchive.clear(archive))
Not sure what this is testing, shouldn't this be something like: ```suggestion archive.clear() self.assertEqual(archive._files, {}) ```
archive = FileArchive(export_name=export_name, pack=False)
archive.add(self.image1)
archive.add(self.image2)
+ archive.clear()
+ self.assertEqual(archive._files, {}) |
codereview_python_data_5601 | raise errors.SchemaError(f'unsupported collection type {t!r}')
elif isinstance(t, s_objtypes.ObjectType):
- if t not in view_shapes:
- # If we have an object type that isn't attached to a view,
- # it must be the case that it doesn't actually appear in
- # the output, so just return *something* that is well
- # formed.
- return self._describe_type(
- self.schema.get('std::uuid'), view_shapes,
- view_shapes_metadata,
- )
-
# This is a view
self.schema, mt = t.material_type(self.schema)
base_type_id = mt.id
I feel like a better fix would be to pretend that a `view_shape` exists, but is empty, or, even better, consisting entirely of implicit pointers. Otherwise there is a danger of the frontend getting confused by an object not being represented properly.
raise errors.SchemaError(f'unsupported collection type {t!r}')
elif isinstance(t, s_objtypes.ObjectType):
# This is a view
self.schema, mt = t.material_type(self.schema)
base_type_id = mt.id |
codereview_python_data_5608 | Number of wells 96
If the handle contains no records, or more than one record,
- an exception is raised. For example:
from Bio import phenotype
record = phenotype.read("plates.csv", "pm-csv")
You need a double colon on the preceding paragraph to make this an RST literal block.
Number of wells 96
If the handle contains no records, or more than one record,
+ an exception is raised. For example::
from Bio import phenotype
record = phenotype.read("plates.csv", "pm-csv") |
codereview_python_data_5610 | runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
- runner.register_hook(CompatibleCheckHook())
if distributed:
runner.register_hook(DistSamplerSeedHook())
The function in train.py will be unified in the future. May use customized hook and add it in the default_runtime.py
runner.register_training_hooks(cfg.lr_config, optimizer_config,
cfg.checkpoint_config, cfg.log_config,
cfg.get('momentum_config', None))
if distributed:
runner.register_hook(DistSamplerSeedHook()) |
codereview_python_data_5614 | ndarray = ()
-def check_sample(values, require_1d_array=True, require_sequence=True):
if isinstance(values, ndarray):
- if require_1d_array and values.ndim != 1:
note_deprecation((
'Only one-dimensional arrays are supported for sampling, '
'and the given value has {ndim} dimensions (shape '
These new arguments are not needed - the function should always have the same (existing) requirements for the input.
ndarray = ()
+def check_sample(values, strategy_name):
if isinstance(values, ndarray):
+ if values.ndim != 1:
note_deprecation((
'Only one-dimensional arrays are supported for sampling, '
'and the given value has {ndim} dimensions (shape ' |
codereview_python_data_5615 | 'want to sample slices. Sampling a multi-dimensional '
'array will be an error in a future version of Hypothesis.'
).format(ndim=values.ndim, shape=values.shape))
- elif not isinstance(values, _SEQUENCE_TYPES):
note_deprecation(
'Cannot sample from {values}, not an ordered collection. '
'Hypothesis goes to some length to ensure that the {strategy} '
I wonder if we want a different deprecation message for permutations? Fiddling with the sort order for `permutations` will affect the shrink order, but not necessarily in this way - we shrink towards the original order, don't we?
'want to sample slices. Sampling a multi-dimensional '
'array will be an error in a future version of Hypothesis.'
).format(ndim=values.ndim, shape=values.shape))
+ elif not isinstance(values, (OrderedDict, Sequence, enum.EnumMeta)):
note_deprecation(
'Cannot sample from {values}, not an ordered collection. '
'Hypothesis goes to some length to ensure that the {strategy} ' |
codereview_python_data_5621 | if skiprows is not None and not isinstance(skiprows, int):
return False
- # can't pickle dialect object to the worker process
- if read_csv_kwargs.get("dialect", None) is not None:
- return False
-
return True
@classmethod
are you sure this is relevant to the issue you're stating as being closed by this PR?
if skiprows is not None and not isinstance(skiprows, int):
return False
return True
@classmethod |
codereview_python_data_5622 | ],
),
)
- for i in range(1, len(cumulative))
]
return (
dict(partition_ids_with_indices)
Can we remove the `cumsum()` here and use `indices[count_for_each_partition[i]` below instead of what we have now? It should help with readability of the code
],
),
)
+ for i in range(1, len(count_for_each_partition))
+ if count_for_each_partition[i] > count_for_each_partition[i - 1]
]
return (
dict(partition_ids_with_indices) |
codereview_python_data_5627 | def __init__(self, data, models, **kwargs):
super().__init__(data, models, **kwargs)
- def _determine_loss_func(self, data):
- return F.cross_entropy
def save_encoder(self, name): save_model(self.model[0], self.get_model_path(name))
def load_encoder(self, name): load_model(self.model[0], self.get_model_path(name))
You can put these on one line.
def __init__(self, data, models, **kwargs):
super().__init__(data, models, **kwargs)
+ def _get_crit(self, data): return F.cross_entropy
def save_encoder(self, name): save_model(self.model[0], self.get_model_path(name))
def load_encoder(self, name): load_model(self.model[0], self.get_model_path(name)) |
codereview_python_data_5632 | return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
- """Given base-pair position, return (angle, cosine, sin)(PRIVATE)."""
angle = self.sweep * 2 * pi * (base - self.start) / self.length
return (angle, cos(angle), sin(angle))
Put a space between the closing and opening brackets here please.
return greytrack_bgs, greytrack_labels
def canvas_angle(self, base):
+ """Given base-pair position, return (angle, cosine, sin) (PRIVATE)."""
angle = self.sweep * 2 * pi * (base - self.start) / self.length
return (angle, cos(angle), sin(angle)) |
codereview_python_data_5638 | scheme = 'http'
host = 'localhost'
port = 9200
- es_retry_on_timeout=False
- es_timeout=10
es_max_retries = 3
def __init__(self, url=None, *args, **kwargs):
For uniformity and readability I think it is preferrable to surround `=` with spaces: ``` es_retry_on_timeout = False ```
scheme = 'http'
host = 'localhost'
port = 9200
+ es_retry_on_timeout = False
+ es_timeout = 10
es_max_retries = 3
def __init__(self, url=None, *args, **kwargs): |
codereview_python_data_5641 | -# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
nit: Should we make it 2019 as well?
+# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License. |
codereview_python_data_5643 | from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
-from mmdet.core import (bbox2roi, bbox2result,
merge_aug_masks)
-from mmdet.core import build_assigner, build_sampler
@DETECTORS.register_module
Since all methods are imported from the same module, a single statement is enough.
from .test_mixins import RPNTestMixin
from .. import builder
from ..registry import DETECTORS
+from mmdet.core import (build_assigner, bbox2roi, bbox2result, build_sampler,
merge_aug_masks)
@DETECTORS.register_module |
codereview_python_data_5644 | d[k] = _get_cls(v)
-_load_dict(icmp6rplcodes)
_load_dict(icmp6ndoptscls)
_load_dict(icmp6typescls)
_load_dict(ipv6nhcls)
This will never be called: the contrib module will always be called after `inet6`, meaning this will always be empty.
d[k] = _get_cls(v)
_load_dict(icmp6ndoptscls)
_load_dict(icmp6typescls)
_load_dict(ipv6nhcls) |
codereview_python_data_5647 | return self.op()
def test_wrong_pipeline():
pipe = WrongPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED)
- try:
- pipe.build()
- except Exception as e:
- print(e)
- return
- raise Exception('Should not pass')
This looks weird - there's nothing "wrong" with this pipeline. Also, the offending option is default and it's very hard for the reader to know why exactly this pipeline should fail. Consider renaming this class and specify the offending flag(s) explicitly.
return self.op()
+@raises(RuntimeError)
def test_wrong_pipeline():
pipe = WrongPipeline(BATCH_SIZE, NUM_WORKERS, DEVICE_ID, SEED)
+ pipe.build() |
codereview_python_data_5650 | help='Deletes an entire model')
delete_model_parser.add_argument(
'model',
- help='Model to delete, either hash or name')
create_model_parser = action_subparser.add_parser(
'create',
Instead of `hash`, use `handle`, as it is more consistent with usage everywhere else.
help='Deletes an entire model')
delete_model_parser.add_argument(
'model',
+ help='Model to delete, either handle or name')
create_model_parser = action_subparser.add_parser(
'create', |
codereview_python_data_5654 | return data1.get(attr, default) == data2.get(attr, default)
else:
attrs = list(zip(attr, default)) # Python 3
def match(data1, data2):
- values1 = [data1.get(attr, d) for attr, d in attrs]
- values2 = [data2.get(attr, d) for attr, d in attrs]
- return values1 == values2
return match
try:
Might be better as ``` return all(data1.get(k, d) == data2.get(k, d) for k, d in zip(attr, default)) ``` so that lists don't need to be created.
return data1.get(attr, default) == data2.get(attr, default)
else:
attrs = list(zip(attr, default)) # Python 3
+
def match(data1, data2):
+ return all(data1.get(attr, d) == data2.get(attr, d) for attr, d in attrs)
return match
try: |
codereview_python_data_5656 | # homepage : http://jaceksmietanski.net
# email : jacek.smietanski@ii.uj.edu.pl
#
-# This file is part of the Biopython distribution and governed by your
-# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
# Please see the LICENSE file that should have been included as part of this
# package.
# It may be distributed freely with respect to the original authors.
We've not actually started dual licensing yet (see #898) as I'm still hoping to get a lawyer to look at the proposed wording for the header.
# homepage : http://jaceksmietanski.net
# email : jacek.smietanski@ii.uj.edu.pl
#
+# This code is released under the conditions of the Biopython license.
# Please see the LICENSE file that should have been included as part of this
# package.
# It may be distributed freely with respect to the original authors. |
codereview_python_data_5660 | warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
- entry = None
- cpp_already_overridden = False
if name and name in entries and not shadow:
old_entry = entries[name]
This makes an assumption about the order returned by `.all_alternatives()`. The code here shouldn't rely on that.
warning(pos, "'%s' is a reserved name in C." % cname, -1)
entries = self.entries
if name and name in entries and not shadow:
old_entry = entries[name] |
codereview_python_data_5666 | self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
- posix_o = Path(o).as_posix()
- res = self.matcher(str(posix_o))
- assert res,f'Failed to find "{self.pat}" in "{posix_o}"'
return res.group(1)
# Cell
`Path` is really slow, and this function needs to be fast. So I'd suggest something like ``` import os,posixpath o = str(o).replace(os.sep, posixpath.sep) ```
self.matcher = self.pat.match if match else self.pat.search
def __call__(self, o):
+ o = str(o).replace(os.sep, posixpath.sep)
+ res = self.matcher(o)
+ assert res,f'Failed to find "{self.pat}" in "{o}"'
return res.group(1)
# Cell |
codereview_python_data_5668 | In order to query this endpoint, send a GET request.
A JSON response will be returned, with one of three codes.
- :statuscode 200: The user token is valid.
:statuscode 400: No token was sent to the endpoint.
- :statuscode 401: The user token is invalid (does not exist).
"""
auth_token = request.args.get('token', '')
if not auth_token:
this is not true anymore, we should update the docstring. =)
In order to query this endpoint, send a GET request.
A JSON response will be returned, with one of three codes.
+ :statuscode 200: The user token is (in)valid.
:statuscode 400: No token was sent to the endpoint.
"""
auth_token = request.args.get('token', '')
if not auth_token: |
codereview_python_data_5669 | raise CorpusPruningException('Failed to sync corpus to disk.')
if not self.quarantine_corpus.rsync_to_disk(self.quarantine_corpus_path):
- logs.log_error('Failed to sync quarantine corpus to disk.')
if not self.shared_corpus.rsync_to_disk(self.shared_corpus_path):
- logs.log_error('Failed to sync shared corpus to disk.')
self._cross_pollinate_other_fuzzer_corpuses()
can you add ", fuzz_target=self.fuzz_target" to both places, makes it easier to debug :)
raise CorpusPruningException('Failed to sync corpus to disk.')
if not self.quarantine_corpus.rsync_to_disk(self.quarantine_corpus_path):
+ logs.log_error('Failed to sync quarantine corpus to disk.',
+ fuzz_target=self.fuzz_target)
if not self.shared_corpus.rsync_to_disk(self.shared_corpus_path):
+ logs.log_error('Failed to sync shared corpus to disk.',
+ fuzz_target=self.fuzz_target)
self._cross_pollinate_other_fuzzer_corpuses() |
codereview_python_data_5671 | data = json.loads(fileutil.read_file(config_file))
except (IOError, OSError) as e:
self.logger.warn('Failed to load resource manifest file ({0}): {1}'.format(config_file, e.strerror))
- except ValueError:
- self.logger.warn('Malformed resource manifest file ({0}).'.format(config_file))
try:
handler_config = HandlerConfiguration(data)
Will this also catch file not found?
data = json.loads(fileutil.read_file(config_file))
except (IOError, OSError) as e:
self.logger.warn('Failed to load resource manifest file ({0}): {1}'.format(config_file, e.strerror))
+ except ValueError as e:
+ self.logger.warn('Malformed resource manifest file ({0}).'.format(ustr(e)))
try:
handler_config = HandlerConfiguration(data) |
codereview_python_data_5672 | self.overall_progress_tqdm = None
self.epoch_progress_tqdm = None
self.is_training = False
- self.test_last_update_time = time.time()
- self.test_epoch_progress_tqdm = None
- self.test_num_epochs = None
- self.test_logs = None
self.num_epochs = None
self.logs = None
super().__init__()
Are those `test_*` variables necessary anymore? Since we are not going to display the progress bar for `train with validation_data`, can we re-use the same variable for `train()` and `evaluate()`? This would make it easier to maintain, thanks!
self.overall_progress_tqdm = None
self.epoch_progress_tqdm = None
self.is_training = False
self.num_epochs = None
self.logs = None
super().__init__() |
codereview_python_data_5677 | from sovrin_client.test.helper import getClientAddedWithRole
-from sovrin_client.test.helper import checkRejects
from stp_core.loop.eventually import eventually
from sovrin_node.test.pool_config.helper import ensurePoolConfigSent, checkPoolConfigWritableSet, sendPoolConfig
from plenum.common.constants import STEWARD
Please use full names (write_force, not_write_not_force, etc.)
from sovrin_client.test.helper import getClientAddedWithRole
+from sovrin_client.test.helper import checkRejects, checkNacks
from stp_core.loop.eventually import eventually
from sovrin_node.test.pool_config.helper import ensurePoolConfigSent, checkPoolConfigWritableSet, sendPoolConfig
from plenum.common.constants import STEWARD |
codereview_python_data_5680 | item_count = item_dict.get('count')
item_id = item_dict.get('item_id')
- if not item_count and not item_id:
if item_id in items_stock:
items_stock[item_id] = item_count
return items_stock
I misunderstood the if condition. Previous one was `!= {}`, so the correct is `if item_count and item_id`, my bad.
item_count = item_dict.get('count')
item_id = item_dict.get('item_id')
+ if item_count and item_id:
if item_id in items_stock:
items_stock[item_id] = item_count
return items_stock |
codereview_python_data_5688 | import tempfile
import sqlite3
import os
This creates a file on the file system and returns you an opened file. It's probably not what we want when we're creating a database _de nuovo_. I suggest that you create a temporary directory instead, and then join the tmpdir path with some filename to get the database file path.
import tempfile
+import shutil
import sqlite3
import os |
codereview_python_data_5690 | async def _init_graphql_schema(conn, cluster, loop):
logger.info('Bootstrapping graphql module...')
- from edgedb.lang import schema as edgedb_schema
- from edgedb.server import protocol as edgedb_protocol
-
protocol = edgedb_protocol.Protocol(cluster, loop=loop)
protocol.backend = await backend.open_database(conn)
Is there still a reason for inline imports?
async def _init_graphql_schema(conn, cluster, loop):
logger.info('Bootstrapping graphql module...')
protocol = edgedb_protocol.Protocol(cluster, loop=loop)
protocol.backend = await backend.open_database(conn) |
codereview_python_data_5691 | from app.api.rest_api import RestApi
from app.service.app_svc import AppService
from app.service.auth_svc import AuthService
from app.service.data_svc import DataService
from app.service.learning_svc import LearningService
from app.service.planning_svc import PlanningService
from app.service.rest_svc import RestService
any reason to not load the conf/default.yml file instead of re-defining it here? i understand it can change--but that seems good for testing/validation.
from app.api.rest_api import RestApi
from app.service.app_svc import AppService
from app.service.auth_svc import AuthService
+from app.service.contact_svc import ContactService
from app.service.data_svc import DataService
+from app.service.file_svc import FileSvc
from app.service.learning_svc import LearningService
from app.service.planning_svc import PlanningService
from app.service.rest_svc import RestService |
codereview_python_data_5694 | if web:
install_webapp()
-def update_examples():
- '''
- This is only used during development to update the examples.
- '''
- # doesn't hurt to run init/update in case the repo doesn't exist yet.
- # if there are still files in it from before switching to submodules, they need to be deleted
- cmd('git submodule init; git submodule update')
- cmd('git submodule foreach "git fetch origin; git checkout master; git reset --hard origin/master"')
- cmd('env OUTDIR=../webapp-lib/examples make -C examples/')
def main():
parser = argparse.ArgumentParser(description="Install components of CoCalc into the system")
this `OUTDIR=../` makes me nervous
if web:
install_webapp()
def main():
parser = argparse.ArgumentParser(description="Install components of CoCalc into the system") |
codereview_python_data_5700 | assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_equal(u.trajectory.n_frames, 2 * ref.trajectory.n_frames)
def test_pickle(self):
u = mda.Universe(PSF, DCD)
-
s = cPickle.dumps(u, protocol = cPickle.HIGHEST_PROTOCOL)
-
new_u = cPickle.loads(s)
-
assert_equal(u.atoms.names, new_u.atoms.names)
@pytest.mark.parametrize('dtype', (int, np.float32, np.float64))
Would it be worth testing more than PSF/DCD here?
assert_equal(len(u.atoms), 3341, "Loading universe failed somehow")
assert_equal(u.trajectory.n_frames, 2 * ref.trajectory.n_frames)
+ @pytest.mark.xfail(sys.version_info < (3, 0), reason="pickle function not \
+ working in python 2")
def test_pickle(self):
u = mda.Universe(PSF, DCD)
s = cPickle.dumps(u, protocol = cPickle.HIGHEST_PROTOCOL)
new_u = cPickle.loads(s)
assert_equal(u.atoms.names, new_u.atoms.names)
@pytest.mark.parametrize('dtype', (int, np.float32, np.float64)) |
codereview_python_data_5712 | if os.path.exists('/var/log/forseti.log'): # ubuntu on GCE
default_log_handler = logging.FileHandler('/var/log/forseti.log')
else:
default_log_handler = logging.handlers.SysLogHandler()
-
- default_log_handler.setFormatter(logging.Formatter(SYSLOG_LOG_FMT))
logger_instance = logging.getLogger(module_name)
logger_instance.addHandler(default_log_handler)
We probably want to include a the timestamp as '%(asctime)s' before the levelname for the messages in forseti.log, so you need two formats, one for syslog, and one for forseti.log.
if os.path.exists('/var/log/forseti.log'): # ubuntu on GCE
default_log_handler = logging.FileHandler('/var/log/forseti.log')
+ default_log_handler.setFormatter(logging.Formatter(FORSETI_LOG_FMT))
else:
default_log_handler = logging.handlers.SysLogHandler()
+ default_log_handler.setFormatter(logging.Formatter(SYSLOG_LOG_FMT))
logger_instance = logging.getLogger(module_name)
logger_instance.addHandler(default_log_handler) |
codereview_python_data_5719 | def test_can_send_bytes_bytearray_objects_with_files(self):
# Test bytes:
- data = {'a': 0.0}
files = {'b': b'foo'}
r = requests.Request('POST', httpbin('post'), data=data, files=files)
p = r.prepare()
@Lukasa I thought we expected all data to be bytes/string and were generally against coercing int, float, etc.?
def test_can_send_bytes_bytearray_objects_with_files(self):
# Test bytes:
+ data = {'a': 'this is a string'}
files = {'b': b'foo'}
r = requests.Request('POST', httpbin('post'), data=data, files=files)
p = r.prepare() |
codereview_python_data_5721 | This module contains the functionality that is used when benchmarking modin commits.
In the case of using utilities from the main modin code, there is a chance that when
benchmarking old commits, the utilities changed, which in turn can unexpectedly affect
-the performance results.
"""
import os
```suggestion the performance results, hence some utility functions are duplicated here. ```
This module contains the functionality that is used when benchmarking modin commits.
In the case of using utilities from the main modin code, there is a chance that when
benchmarking old commits, the utilities changed, which in turn can unexpectedly affect
+the performance results, hence some utility functions are duplicated here.
"""
import os |
codereview_python_data_5732 | Store the partition information
"""
# Get shared data from server side
request = GetSharedDataRequest(GET_SHARED_MSG)
rpc.send_request(self._main_server_id, request)
response = rpc.recv_response()
revert the change here.
Store the partition information
"""
# Get shared data from server side
+ self.barrier()
request = GetSharedDataRequest(GET_SHARED_MSG)
rpc.send_request(self._main_server_id, request)
response = rpc.recv_response() |
codereview_python_data_5733 | :param obj:
:rtype: bool
"""
- dumpable_types = (cls.TYPES, JSONDumpable)
return isinstance(obj, dumpable_types)
@staticmethod
This changes the result completely. Is this intentional change? Also weird that no unit test has detected this problem
:param obj:
:rtype: bool
"""
+ dumpable_types = tuple(cls.TYPES + (JSONDumpable,))
return isinstance(obj, dumpable_types)
@staticmethod |
codereview_python_data_5734 | class AlignmentHasDifferentLengthsError(Exception):
- """Manage exceptions for Alignments with different length."""
pass
Why "Manage"? Also having looked at the context it is raised, it would be more accurate to say something like "Exception where sequences in alignment have different lengths". Looking at this, it might be better for this to be a subclass of ``ValueError`` - but that would probably be best as a separate pull request later.
class AlignmentHasDifferentLengthsError(Exception):
+ """Exception where sequences in alignment have different lengths."""
pass |
codereview_python_data_5738 | def compare_arguments(self, target_path, arguments, corpora_or_testcase,
actual):
"""Compare expected arguments."""
- self.assertListEqual(
- actual,
- ['/usr/bin/unshare', '-U', '--map-root-user', '-n', target_path
- ] + arguments + corpora_or_testcase)
@test_utils.integration
test needs fixing as well
def compare_arguments(self, target_path, arguments, corpora_or_testcase,
actual):
"""Compare expected arguments."""
+ self.assertListEqual(actual, ['/usr/bin/unshare', '-n', target_path] +
+ arguments + corpora_or_testcase)
@test_utils.integration |
codereview_python_data_5748 | self.data_instance_type = iris.cube.Cube
self.init_data()
def test_dataset_add_dimensions_values_hm(self):
pass
I assume you are doing this to disable some of the inherited tests. A docstring explaining why these tests are not enable would be good...
self.data_instance_type = iris.cube.Cube
self.init_data()
+ # Disabled tests for NotImplemented methods
def test_dataset_add_dimensions_values_hm(self):
pass |
codereview_python_data_5750 | """
bs = len(img_metas)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
- padding_mask = feats[-1].new_zeros((bs, input_img_h, input_img_w),
- dtype=torch.float32)
for i in range(bs):
img_h, img_w, _ = img_metas[i]['img_shape']
- padding_mask[i, img_h:, img_w:] = 1
padding_mask = F.interpolate(
padding_mask.unsqueeze(1),
size=feats[-1].shape[-2:],
here we should use new_ones and assign the unpadded area as 0, please check the docstring of multihead attention about the meaning and value of padding mask
"""
bs = len(img_metas)
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
+ padding_mask = feats[-1].new_ones((bs, input_img_h, input_img_w),
+ dtype=torch.float32)
for i in range(bs):
img_h, img_w, _ = img_metas[i]['img_shape']
+ padding_mask[i, img_h:, img_w:] = 0
padding_mask = F.interpolate(
padding_mask.unsqueeze(1),
size=feats[-1].shape[-2:], |
codereview_python_data_5760 | return ret
def from_networkx(self, nx_graph):
- # FIXME: the doc says that the argument should be nx.DiGraph but in
- # fact it can take in any data convertible to nx.DiGraph. Is this
- # intended?
"""Convert from networkx graph.
If 'id' edge attribute exists, the edge will be added follows
Not intended. Should be anything convertable to DiGraph.
return ret
def from_networkx(self, nx_graph):
"""Convert from networkx graph.
If 'id' edge attribute exists, the edge will be added follows |
codereview_python_data_5766 | """Upload inventory summary to GCS."""
-from google.cloud.forseti.common.gcp_api import storage
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
Please do not use abbreviation in class name `GcsInventorySummary`.
"""Upload inventory summary to GCS."""
+from google.cloud.forseti.common.gcp_api import file_uploader
from google.cloud.forseti.common.util import date_time
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats |
codereview_python_data_5767 | suggestions[entry['target']].update(entry)
suggestions[entry['target']]['count'] += 1
except DataError as e:
- # Catches 'argument exceeds the maximum length of 255 bytes' Error
- return HttpResponse(status=501, reason='Not Implemented: {error}'.format(error=e))
return JsonResponse(
sorted(suggestions.values(), key=lambda e: e['count'], reverse=True)[:max_results],
Whoops, my bad - text should not be in the `reason` attribute, it should be passed as the first positional argument, followed by `status`. `return HttpResponse('Not Implemented: {error}'.format(error=e), status=501)`
suggestions[entry['target']].update(entry)
suggestions[entry['target']]['count'] += 1
except DataError as e:
+
+ # Catches argument exceeds the maximum length of 255 bytes' Error
+ return HttpResponse('Not Implemented: {error}'.format(error=e), status=501)
return JsonResponse(
sorted(suggestions.values(), key=lambda e: e['count'], reverse=True)[:max_results], |
codereview_python_data_5768 | _plot_methods = dict(single='bezier')
def get_data(self, element, ranges=None, empty=False):
- data_attrs = ['x0', 'y0', 'x1', 'y1',
- 'cx0', 'cy0', 'cx1', 'cy1']
verts = np.array(element.data[0])
inds = np.where(np.array(element.data[1])==1)[0]
data = {da: [] for da in data_attrs}
Maybe zip the string keys and values and loop over them to append?
_plot_methods = dict(single='bezier')
def get_data(self, element, ranges=None, empty=False):
+ data_attrs = ['x0', 'y0', 'cx0', 'cy0', 'cx1', 'cy1', 'x1', 'y1',]
verts = np.array(element.data[0])
inds = np.where(np.array(element.data[1])==1)[0]
data = {da: [] for da in data_attrs} |
codereview_python_data_5772 | if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in sorted(self.temps):
- code.put_xdecref_clear(temp, type, do_for_memoryviewslice=True, have_gil=False)
def setup_parallel_control_flow_block(self, code):
"""
There is a slight difference here. `put_xdecref_clear()` generates code to first clear the variable and then decref it. I think that should often be ok, since the C compiler should generally be able to optimise away the strict ordering for local variables. But it's something that has been bothering me for a while since this difference isn't always explicit in the generator code. Not to be solved right now, but to keep in mind.
if self.is_parallel and not self.is_nested_prange:
code.putln("/* Clean up any temporaries */")
for temp, type in sorted(self.temps):
+ code.put_xdecref_clear(temp, type, have_gil=False)
def setup_parallel_control_flow_block(self, code):
""" |
codereview_python_data_5773 | """Configure self._clear_timer according to the config."""
interval = config.get('ui', 'message-timeout')
if interval != 0:
- multimpy_by = len(self._messages) + 1
- if multimpy_by > 5:
- multimpy_by = 5
- interval *= multimpy_by
self._clear_timer.setInterval(interval)
@pyqtSlot()
You could simply do `interval *= min(5, len(self._messages) + 1)` Why the `+ 1` though?
"""Configure self._clear_timer according to the config."""
interval = config.get('ui', 'message-timeout')
if interval != 0:
+ interval *= min(5, len(self._messages))
self._clear_timer.setInterval(interval)
@pyqtSlot() |
codereview_python_data_5776 | from panopticapi.evaluation import OFFSET, VOID, PQStat
from panopticapi.utils import IdGenerator, rgb2id
-from .api_wrappers import COCO as _COCO
from .builder import DATASETS
from .coco import CocoDataset
We can directly use COCOPanoptic
from panopticapi.evaluation import OFFSET, VOID, PQStat
from panopticapi.utils import IdGenerator, rgb2id
+from .api_wrappers import COCO
from .builder import DATASETS
from .coco import CocoDataset |
codereview_python_data_5781 | G2 = nx.barbell_graph(3, 0)
# There is only one cut edge, and each set has volume seven.
S2 = {0, 1, 2}
- conductance_no_T = nx.conductance(G2, S2)
- expected2 = 1 / 7
- assert expected2 == conductance_no_T
class TestEdgeExpansion:
```suggestion assert nx.conductance(G2, S2) == 1 / 7 ``` A slightly more condense way of putting this - feel free to ignore!
G2 = nx.barbell_graph(3, 0)
# There is only one cut edge, and each set has volume seven.
S2 = {0, 1, 2}
+ assert nx.conductance(G2, S2) == 1 / 7
class TestEdgeExpansion: |
codereview_python_data_5784 | assert_equal(sorted(G.neighbors('C')),['D'])
assert_equal(sorted(G['C']),['D'])
assert_equal(sorted(G.neighbors('A')),['B', 'C'])
- assert_equal(sorted(G.neighbors('A')),['B', 'C'])
- assert_equal(sorted(G.neighbors('C')),['D'])
- assert_equal(sorted(G.neighbors('A')),['B', 'C'])
assert_raises(nx.NetworkXError,G.neighbors,'j')
assert_raises(nx.NetworkXError,G.neighbors,'j')
This ends up with many identical tests repeated. We probably want to remove duplicates.
assert_equal(sorted(G.neighbors('C')),['D'])
assert_equal(sorted(G['C']),['D'])
assert_equal(sorted(G.neighbors('A')),['B', 'C'])
assert_raises(nx.NetworkXError,G.neighbors,'j')
assert_raises(nx.NetworkXError,G.neighbors,'j') |
codereview_python_data_5787 | search command.
"""
- aliases = ('search', 's')
summary = _('search package details for the given string')
@staticmethod
Zypper calls this `se`, and I think that makes more sense than `s`.
search command.
"""
+ aliases = ('search', 'se')
summary = _('search package details for the given string')
@staticmethod |
codereview_python_data_5788 | class TestFont:
- """Test Font."""
-
TESTS = {
# (style, weight, pointsize, pixelsize, family
'"Foobar Neue"':
Feel free to just remove the docstring completely
class TestFont:
TESTS = {
# (style, weight, pointsize, pixelsize, family
'"Foobar Neue"': |
codereview_python_data_5791 | a = a1 + a2 # shape (B, deg, 1)
e = F.softmax(F.leaky_relu(a), dim=1)
if self.attn_drop != 0.0:
- e = F.dropout(e, self.attn_drop, training=self.training)
return {'accum': torch.sum(e * ft, dim=1)} # shape (B, D)
Instead of using nn.functional.dropout, let's change to a dropout layer, then we don't need to explicitly have `training=self.training`
a = a1 + a2 # shape (B, deg, 1)
e = F.softmax(F.leaky_relu(a), dim=1)
if self.attn_drop != 0.0:
+ e = F.dropout(e, self.attn_drop)
return {'accum': torch.sum(e * ft, dim=1)} # shape (B, D) |
codereview_python_data_5797 | 'memory://thisshouldntbethebroker')
assert self.app.conf.broker_url == prepatch_broker_url
def test_table(self):
assert self.app.conf.table(with_defaults=True)
assert self.app.conf.table(with_defaults=False)
The application configuration has already been resolved at this stage, so I think this test case does not actually prove the issue. Configuration cannot be changed dynamically.
'memory://thisshouldntbethebroker')
assert self.app.conf.broker_url == prepatch_broker_url
+ def test_broker_dynamic_setting(self):
+ prepatch_broker_url = 'memory://thisshouldbethebroker'
+ self.app.conf.broker_url = prepatch_broker_url
+ assert self.app.conf.broker_url == prepatch_broker_url
+
def test_table(self):
assert self.app.conf.table(with_defaults=True)
assert self.app.conf.table(with_defaults=False) |
codereview_python_data_5800 | from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers import findings
-from google.cloud.forseti.notifier.notifiers.gcs_inv_summary import (
- GcsInvSummary)
from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification
from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.inventory.storage import InventoryIndex
There are no length limits for import lines... so, please keep this in the same line.
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.common.util import string_formats
from google.cloud.forseti.notifier.notifiers import findings
+from google.cloud.forseti.notifier.notifiers.gcs_inv_summary import GcsInvSummary
from google.cloud.forseti.notifier.notifiers.base_notification import BaseNotification
from google.cloud.forseti.services.inventory.storage import DataAccess
from google.cloud.forseti.services.inventory.storage import InventoryIndex |
codereview_python_data_5801 | dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
if not self.by_epoch or not self.evaluation_flag(runner):
return
Seems this code snippet is used several times, how about turning it into a method?
dist.broadcast(module.running_var, 0)
dist.broadcast(module.running_mean, 0)
+ def after_train_epoch(self, runner):
+ if self.broadcast_bn_buffer:
+ self._broadcast_bn_buffer(runner)
+
if not self.by_epoch or not self.evaluation_flag(runner):
return |
codereview_python_data_5805 | def get_commited_votes(self, bigchain, election_pk=None):
if election_pk is None:
election_pk = self.to_public_key(self.id)
- txns = list(backend.query.get_received_votes_for_election(bigchain.connection, self.id, election_pk))
return self.count_votes(election_pk, txns)
@classmethod
A way to not duplicate the checks is to accept a getter. ``` def count_votes(cls, election_pk, txns, getter=getattr): ... if getter(txn, 'operation') == 'VALIDATOR_ELECTION_VOTE': ... ``` Then you just call `count_votes(election_pk, txns)` for objects and `count_votes(election_pk, txns, operator.itemgetter)` for dictionaries. On a side note, it would be nice to deal with dictionaries everywhere in the future.
def get_commited_votes(self, bigchain, election_pk=None):
if election_pk is None:
election_pk = self.to_public_key(self.id)
+ txns = list(backend.query.get_asset_tokens_for_public_keys(bigchain.connection,
+ self.id,
+ [election_pk],
+ 'VALIDATOR_ELECTION_VOTE'))
return self.count_votes(election_pk, txns)
@classmethod |
codereview_python_data_5808 | queue_url = self._queue_url(path, req_data, headers)
self._set_queue_attributes(queue_url, req_data)
elif action == 'DeleteQueue':
- del QUEUE_ATTRIBUTES[self._queue_url(path, req_data, headers)]
if 'QueueName' in req_data:
encoded_data = urlencode(req_data, doseq=True) if method == 'POST' else ''
a bit safer, to avoid `KeyError`: ``` QUEUE_ATTRIBUTES.pop(self._queue_url(path, req_data, headers), None) ```
queue_url = self._queue_url(path, req_data, headers)
self._set_queue_attributes(queue_url, req_data)
elif action == 'DeleteQueue':
+ QUEUE_ATTRIBUTES.pop(self._queue_url(path, req_data, headers), None)
if 'QueueName' in req_data:
encoded_data = urlencode(req_data, doseq=True) if method == 'POST' else '' |
codereview_python_data_5816 | "Bio.KEGG.Compound",
"Bio.KEGG.Enzyme",
"Bio.NMR.xpktools",
- "Bio.MaxEntropy"
"Bio.motifs",
"Bio.motifs.applications._xxmotif",
"Bio.pairwise2",
Missing trailing comma - would instead get concatenated with the next line I think.
"Bio.KEGG.Compound",
"Bio.KEGG.Enzyme",
"Bio.NMR.xpktools",
+ "Bio.MaxEntropy",
"Bio.motifs",
"Bio.motifs.applications._xxmotif",
"Bio.pairwise2", |
codereview_python_data_5818 | label = 'order'
name = 'oscar.apps.order'
verbose_name = _('Order')
-
- def ready(self):
- from . import receivers # noqa
- super(OrderConfig, self).ready()
I don't think we need a `super()` call here - the parent method is a noop.
label = 'order'
name = 'oscar.apps.order'
verbose_name = _('Order') |
codereview_python_data_5826 | test_tool.ParseOptions(options)
test_tool.ProcessStorage()
- with open(temp_file_name, 'r') as file_object:
for line in file_object.readlines():
lines.append(line.strip())
`open(temp_file_name, 'r')` => `io.open(temp_file_name, 'rb', encoding='utf-8')` ?
test_tool.ParseOptions(options)
test_tool.ProcessStorage()
+ with io.open(temp_file_name, 'rt', encoding=encoding) as file_object:
for line in file_object.readlines():
lines.append(line.strip()) |
codereview_python_data_5845 | image=tf.convert_to_tensor(rand_image), flow=tf.convert_to_tensor(rand_flows),
)
- np.testing.assert_allclose(rand_image, interp, rtol=1e-3)
def test_zero_flows():
```suggestion np.testing.assert_allclose(rand_image, interp, rtol=1e-6, atol=1e-6) ``` For info, the signature of assertAllClose is `def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):`
image=tf.convert_to_tensor(rand_image), flow=tf.convert_to_tensor(rand_flows),
)
+ np.testing.assert_allclose(rand_image, interp, rtol=1e-6, atol=1e-6)
def test_zero_flows(): |
codereview_python_data_5862 | """Set up."""
fake_global_configs = {
'securitycenter': {'max_calls': 1, 'period': 1.1}}
- cls.securitycenter_api_client = securitycenter.SecurityCenterClient(version='v1')
cls.project_id = 111111
cls.source_id = 'organizations/111/sources/222'
ditto, we don't need to call this api client anymore: ``` cls.securitycenter_client ```
"""Set up."""
fake_global_configs = {
'securitycenter': {'max_calls': 1, 'period': 1.1}}
+ cls.securitycenter = securitycenter.SecurityCenterClient(version='v1')
cls.project_id = 111111
cls.source_id = 'organizations/111/sources/222' |
codereview_python_data_5870 | # Link property reference; the source is the
# link immediately preceding this step in the path.
if path_tip.rptr is None:
- source = get_set_type(path_tip, ctx=ctx)
- s_dn = source.get_displayname(ctx.env.schema)
- raise errors.InvalidReferenceError(
- f"link property {ptr_name!r} "
- f"should be accessed from link, not from {s_dn!r}",
context=ptr_expr.ptr.context,
)
I'd use "unexpected reference to link property {ptr_name!r} outside of a path expression", and I think it should be an `EdgeQLSyntaxError` (we normally use `InvalidReferenceError` to indicate a syntactically-correct reference to a non-existent object).
# Link property reference; the source is the
# link immediately preceding this step in the path.
if path_tip.rptr is None:
+ raise errors.EdgeQLSyntaxError(
+ f"unexpected reference to link property {ptr_name!r} "
+ "outside of a path expression",
context=ptr_expr.ptr.context,
) |
codereview_python_data_5875 | def test_alignto_sort(self, universe):
mobile = universe.atoms[:4]
ref = universe.atoms[[3, 2, 1, 0]]
- assert align.alignto(mobile, ref, select='bynum 1-4') == (0.0, 0.0)
def _get_aligned_average_positions(ref_files, ref, select="all", **kwargs):
I sometimes get a bit nervous on exact equality checks with floating point calculations, though probably not a big deal here. `np.assert_allclose()` may be slightly less risky, though I suppose one could argue the values really should be very close to identical.
def test_alignto_sort(self, universe):
mobile = universe.atoms[:4]
ref = universe.atoms[[3, 2, 1, 0]]
+ np.testing.assert_allclose(align.alignto(mobile, ref,
+ select='bynum 1-4'), (0.0, 0.0))
def _get_aligned_average_positions(ref_files, ref, select="all", **kwargs): |
codereview_python_data_5878 | def remove_blank_target(self):
"""Remove target from link."""
elem = self._elem
- for i in range(5):
if elem is None:
break
tag = elem.tagName().lower()
Since you don't need `i` here, can you name it `_` instead?
def remove_blank_target(self):
"""Remove target from link."""
elem = self._elem
+ for _ in range(5):
if elem is None:
break
tag = elem.tagName().lower() |
codereview_python_data_5879 | self.shmem_buffer_holder.append(
self._shared_cache[emb_name][grad_shmem_name])
- # The minimun buffer size is 32.
# We extend the buffer by idx_i.shape[0] * 2 to avoid
# frequent shared memory allocation.
# The overall buffer cost will be smaller than three times
# the maximum memory requirement for sharing gradients.
- buffer_size = 32 if idx_i.shape[0] < 32 else idx_i.shape[0] * 2
idx_shmem = create_shared_mem_array(idx_shmem_name, \
(buffer_size,), idx_dtype)
grad_shmem = create_shared_mem_array(grad_shmem_name, \
why is the minimum buffer size so small? I think the buffer size can start from a couple of MB.
self.shmem_buffer_holder.append(
self._shared_cache[emb_name][grad_shmem_name])
+ # The total number of buffers is the number of NodeEmbeddings *
+ # world_size * (world_size - 1). The minimun buffer size is 128.
+ #
# We extend the buffer by idx_i.shape[0] * 2 to avoid
# frequent shared memory allocation.
# The overall buffer cost will be smaller than three times
# the maximum memory requirement for sharing gradients.
+ buffer_size = 128 if idx_i.shape[0] < 128 else idx_i.shape[0] * 2
idx_shmem = create_shared_mem_array(idx_shmem_name, \
(buffer_size,), idx_dtype)
grad_shmem = create_shared_mem_array(grad_shmem_name, \ |
codereview_python_data_5893 | upsample_ratio=2,
num_classes=81,
class_agnostic=False,
- carafe_cfg=None,
conv_cfg=None,
norm_cfg=None,
loss_mask=dict(
Maybe renamed to `upsample_cfg` for future usage.
upsample_ratio=2,
num_classes=81,
class_agnostic=False,
+ upsample_cfg=None,
conv_cfg=None,
norm_cfg=None,
loss_mask=dict( |
codereview_python_data_5908 | def to_array(dali_out):
if isinstance(dali_out, TensorListGPU):
- dali_out = dali_out.asCPU()
return np.squeeze(dali_out.as_array())
I believe `as_cpu() ` is the proper syntax now. `asCPU` is deprecated
def to_array(dali_out):
if isinstance(dali_out, TensorListGPU):
+ dali_out = dali_out.as_cpu()
return np.squeeze(dali_out.as_array()) |
codereview_python_data_5916 | 'similarity.similar_users', params=params))
-@cli.command(name='request_similar_users')
@click.pass_context
def request_all_stats(ctx):
ctx.invoke(request_user_stats, type_="entity", range_="week", entity="artists")
should be request_all_stats
'similarity.similar_users', params=params))
+@cli.command(name='request_all_stats')
@click.pass_context
def request_all_stats(ctx):
ctx.invoke(request_user_stats, type_="entity", range_="week", entity="artists") |
codereview_python_data_5919 | # END String map partitions operations
- def repeat(self, repeats):
- new_modin_frame = self._modin_frame._apply_full_axis(
- 0, lambda df: df.squeeze().repeat(repeats, axis=None)
- )
- return self.__constructor__(new_modin_frame)
-
def unique(self):
"""Return unique values of Series object.
We can make a more efficient version if `repeats` is an integer using `MapFunction`.
# END String map partitions operations
def unique(self):
"""Return unique values of Series object. |
codereview_python_data_5921 | path : file or string
File, directory, or filename to read.
- simplify: Simplify line geometries to start and end coordinates
- If False, and line feature geometry has multiple segments, the
- attributes for that feature will be repeated for each edge comprising
- that feature
Returns
-------
It should be: ``` simplify : bool If ``True``, simplify line geometries to the start and end coordinates. If ``False``... comprising that feature. ```
path : file or string
File, directory, or filename to read.
+ simplify: bool
+ If ``True``, simplify line geometries to start and end coordinates.
+ If ``False``, and line feature geometry has multiple segments, the
+ non-geometric attributes for that feature will be repeated for each
+ edge comprising that feature.
Returns
------- |
codereview_python_data_5924 | order_args[asset] = (asset, amount, style)
order_ids = self.blotter.batch_order(viewvalues(order_args))
- order_ids = pd.Series(data=order_ids, index=order_args)
- return order_ids[~order_ids.isnull()]
@error_keywords(sid='Keyword argument `sid` is no longer supported for '
'get_open_orders. Use `asset` instead.')
Should we `continue` here if `amount` is 0?
order_args[asset] = (asset, amount, style)
order_ids = self.blotter.batch_order(viewvalues(order_args))
+ return pd.Series(data=order_ids, index=order_args)
@error_keywords(sid='Keyword argument `sid` is no longer supported for '
'get_open_orders. Use `asset` instead.') |
codereview_python_data_5931 | df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
- meta = {0: "index", 1: "columns"}
for df in [modin_df, pandas_df]:
- sort_index = getattr(df, meta[axis])
- setattr(
- df,
- meta[axis],
[np.nan if i % 2 == 0 else sort_index[i] for i in range(len(sort_index))],
)
eval_general(
will it be the same? Current approach is a bit confusing ```suggestion for df in [modin_df, pandas_df]: sort_index = df.axes[axis] df.set_axis( [np.nan if i % 2 == 0 else sort_index[i] for i in range(len(sort_index))], axis=axis, inplace=True ) ```
df.index = [(i - length / 2) % length for i in range(length)]
# Add NaNs to sorted index
for df in [modin_df, pandas_df]:
+ sort_index = df.axes[axis]
+ df.set_axis(
[np.nan if i % 2 == 0 else sort_index[i] for i in range(len(sort_index))],
+ axis=axis,
+ inplace=True,
)
eval_general( |
codereview_python_data_5942 | from google.cloud.security.inventory.pipelines import load_group_members_pipeline
from google.cloud.security.inventory.pipelines import load_org_iam_policies_pipeline
from google.cloud.security.inventory.pipelines import load_orgs_pipeline
from google.cloud.security.inventory.pipelines import load_projects_iam_policies_pipeline
from google.cloud.security.inventory.pipelines import load_projects_pipeline
-from google.cloud.security.inventory.pipelines import load_projects_buckets_pipeline
from google.cloud.security.inventory import util
# pylint: enable=line-too-long
nit: I think that this import should be sorted above the import for load_projects_iam_policies_pipeline
from google.cloud.security.inventory.pipelines import load_group_members_pipeline
from google.cloud.security.inventory.pipelines import load_org_iam_policies_pipeline
from google.cloud.security.inventory.pipelines import load_orgs_pipeline
+from google.cloud.security.inventory.pipelines import load_projects_buckets_pipeline
from google.cloud.security.inventory.pipelines import load_projects_iam_policies_pipeline
from google.cloud.security.inventory.pipelines import load_projects_pipeline
from google.cloud.security.inventory import util
# pylint: enable=line-too-long |
codereview_python_data_5943 | err_msg = "Frechet distances did not increase after path reversal"
assert_(self.frech_matrix[1,2] >= self.frech_matrix[0,1], err_msg)
- def check_dendrogram_produced(self):
err_msg = "Dendrogram dictionary object was not produced"
assert_(type(self.plot_data[1]) is dict, err_msg)
Method name needs to start with `test_` or it is not run by nose.
err_msg = "Frechet distances did not increase after path reversal"
assert_(self.frech_matrix[1,2] >= self.frech_matrix[0,1], err_msg)
+ def test_dendrogram_produced(self):
err_msg = "Dendrogram dictionary object was not produced"
assert_(type(self.plot_data[1]) is dict, err_msg) |
codereview_python_data_5948 | self._out_feats = out_feats
self._k = k
with self.name_scope():
- # NOTE(zihao): MXNet do not support ModuleList, use Sequential as workaround.
self.fc = nn.Sequential()
for _ in range(k):
self.fc.add(
I think Sequential is the right way. Consider remove this note.
self._out_feats = out_feats
self._k = k
with self.name_scope():
self.fc = nn.Sequential()
for _ in range(k):
self.fc.add( |
codereview_python_data_5950 | from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.protocol.restapi import DataContract, set_properties
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
IMDS_ENDPOINT = '169.254.169.254'
-IMDS_ENDPOINT_BACKUP = '168.63.129.16'
APIVERSION = '2018-02-01'
BASE_URI = "http://{0}/metadata/instance/{1}?api-version={2}"
The agent reads the Wireserver IP from DHCP settings. Never seen such a case (maybe azure stack uses something different) but maybe that would not be same as the one above. You should probably use the same uri that the agent is using instead of hardcoding it
from azurelinuxagent.common.future import ustr
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common.protocol.restapi import DataContract, set_properties
+from azurelinuxagent.common.protocol.util import get_protocol_util
from azurelinuxagent.common.utils.flexible_version import FlexibleVersion
IMDS_ENDPOINT = '169.254.169.254'
APIVERSION = '2018-02-01'
BASE_URI = "http://{0}/metadata/instance/{1}?api-version={2}" |
codereview_python_data_5956 | self.entry.create_wrapper = True
else:
if self.overridable:
- error(self.pos, "Variables can not be cpdef")
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var(
Sorry, @da-woods. If `error` means raising an error instead of a warning (I'm not familiar with Cython's internal or test system), I have a big . This will break tons of code out there (including ours) that implemented the workaround shown in #3959. I am sure we learned it somewhere so we can't be alone.
self.entry.create_wrapper = True
else:
if self.overridable:
+ error(self.pos, "Variables cannot be declared with 'cpdef'. Use 'cdef' instead.")
if self.directive_locals:
error(self.pos, "Decorators can only be followed by functions")
self.entry = dest_scope.declare_var( |
codereview_python_data_5962 | device,
flatten=False):
"""Almost the same as the implementation in fcos, we remove half stride
- offset to align with the original implementation."""
warnings.warn(
'`_get_points_single` in `AutoAssignHead` will be '
'deprecated soon, we support a multi level point generator now'
- 'you can get points of single level '
'with `self.prior_generator.single_level_grid_priors` ')
y, x = super(FCOSHead,
self)._get_points_single(featmap_size, stride, dtype,
If we really want to deprecate this function, we should not implement the real logic here in 513-516 but use `self.prior_generator.single_level_grid_priors`
device,
flatten=False):
"""Almost the same as the implementation in fcos, we remove half stride
+ offset to align with the original implementation.
+
+ This function will be deprecated soon.
+ """
warnings.warn(
'`_get_points_single` in `AutoAssignHead` will be '
'deprecated soon, we support a multi level point generator now'
+ 'you can get points of a single level feature map '
'with `self.prior_generator.single_level_grid_priors` ')
y, x = super(FCOSHead,
self)._get_points_single(featmap_size, stride, dtype, |
codereview_python_data_5963 | def out_degrees(self, u=ALL):
"""Return the out-degree(s) of the given nodes.
- It computes the out-degree(s) w.r.t. to the edges of the given edge type.
It does not support heterogeneous graphs yet.
Parameters
So where is the edge type?
def out_degrees(self, u=ALL):
"""Return the out-degree(s) of the given nodes.
+ It computes the out-degree(s).
It does not support heterogeneous graphs yet.
Parameters |
codereview_python_data_5965 | import argparse
-import math
import os
import time
-import dgl
-import dgl.function as fn
import numpy as np
-import scipy.sparse as sp
import sklearn.preprocessing
import torch
import torch.nn as nn
Seems I'am always getting "Iteration 0/MAX", which is a bit confusing. Maybe you can just remove it if this does not matter.
import argparse
import os
import time
import numpy as np
import sklearn.preprocessing
import torch
import torch.nn as nn |
codereview_python_data_5967 | aligner.score("AAA", "AA&")
def test_aligner_array_errors(self):
- import array
aligner = Align.PairwiseAligner()
self.assertEqual(aligner.alphabet, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
s1 = "GGG"
Move the import to the top of the file?
aligner.score("AAA", "AA&")
def test_aligner_array_errors(self):
aligner = Align.PairwiseAligner()
self.assertEqual(aligner.alphabet, "ABCDEFGHIJKLMNOPQRSTUVWXYZ")
s1 = "GGG" |
codereview_python_data_5969 | tasks = [TaskRole('test', 'cd scikit-learn/benchmarks && python bench_mnist.py', memoryMB=4096)]
-class FailJob(PaiTask):
- image = "openpai/pai.example.sklearn"
- name = "test_job_fail_{0}".format(time.time())
- command = 'cd scikit-learn/benchmarks && python bench_mnist.py'
- virtual_cluster = 'spark'
- tasks = [TaskRole('test', 'cd scikit-learn/benchmarks && python bench_mnist.py', memoryMB=4096)]
-
-
class TestPaiTask(unittest.TestCase):
@responses.activate
This class is identical to `SklearnJob`. Just keep `SklearnJob` and use it for both the successful test and the 404 test.
tasks = [TaskRole('test', 'cd scikit-learn/benchmarks && python bench_mnist.py', memoryMB=4096)]
class TestPaiTask(unittest.TestCase):
@responses.activate |
codereview_python_data_5970 | basestring = str # Python 3
class NotConstant(object):
- def __deepcopy__(self, memo):
- return self
def __repr__(self):
return "<NOT CONSTANT>"
Wouldn't using **new** be more robust for making sure this is a singleton class?
basestring = str # Python 3
class NotConstant(object):
+ _obj = None
+
+ def __new__(cls):
+ if NotConstant._obj is None:
+ NotConstant._obj = super(NotConstant, cls).__new__(cls)
+
+ return NotConstant._obj
+
def __repr__(self):
return "<NOT CONSTANT>" |
codereview_python_data_5976 | from django.conf import settings
customDir = join(dirname(__file__), 'custom')
customModPrefix = 'graphite.functions.custom.'
maybe add some function validations here ? (types are known, etc..)
from django.conf import settings
+from graphite.functions.params import Param, ParamTypes # noqa
+from graphite.logger import log
+
customDir = join(dirname(__file__), 'custom')
customModPrefix = 'graphite.functions.custom.' |
codereview_python_data_5980 | prev_block_id = g.id
for block in range(4):
transactions = [
- Transaction.create(
- [b.me], [user_vk], metadata={'i': i}).sign([b.me_private])
for i in range(10)
]
block = b.create_block(transactions)
Since `Transaction.asset` can never be `None` but always has to be an invocation of `Asset()` and since then it always has a `uuid` created as its `id`, can we remove `metadata={'i': i}` now, but still create unique transactions?
prev_block_id = g.id
for block in range(4):
transactions = [
+ Transaction.create([b.me], [user_vk]).sign([b.me_private])
for i in range(10)
]
block = b.create_block(transactions) |
codereview_python_data_5986 | key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key)
-
- @cmdutils.register(instance='command-dispatcher', scope='window')
- @cmdutils.argument('count', count=True)
- def repeat_command(self, count=None):
- """Repeat the last executed command, like '.' in vi.
-
- Args:
- count: Which numeric argument to give the command.
- """
- if runners.last_command is None:
- raise cmdexc.CommandError("You didn't do anything yet.")
- runners.CommandRunner(self._win_id).run(
- runners.last_command[0],
- count if count is not None else runners.last_command[1])
This doesn't need anything from `browser/commands.py`, so I think it should go to `misc/utilcmds.py` instead.
key: mark identifier; capital indicates a global mark
"""
self._tabbed_browser.jump_mark(key) |
codereview_python_data_5988 | "timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
elif timeout is not None:
timeout = TimeoutSauce(connect=timeout, read=timeout)
Unfortunately, this change is breaking. You'll either need to remove the default timeout change, or move this PR to the 3.0 branch. You're also welcome to split this PR into two parts, the first bit making just the non-breaking changes against the master branch and the second bit making the breaking ones against 3.0.
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
+ elif isinstance(timeout, TimeoutSauce):
+ pass
elif timeout is not None:
timeout = TimeoutSauce(connect=timeout, read=timeout) |
codereview_python_data_5997 | return ret
return lhex(x)
-
-class ThreeBytesEnumField(EnumField, ThreeBytesField):
-
- def __init__(self, name, default, enum):
- EnumField.__init__(self, name, default, enum, "!I")
-
-
class _MultiEnumField(_EnumField):
def __init__(self, name, default, enum, depends_on, fmt = "H"):
Keep it `lldp.uts` as it not used elsewhere.
return ret
return lhex(x)
class _MultiEnumField(_EnumField):
def __init__(self, name, default, enum, depends_on, fmt = "H"): |
codereview_python_data_6003 | @cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def reverse_selection(tab: apitypes.Tab) -> None:
- """Toggle caret selection mode."""
tab.caret.reverse_selection()
That seems wrong.
@cmdutils.register()
@cmdutils.argument('tab', value=cmdutils.Value.cur_tab)
def reverse_selection(tab: apitypes.Tab) -> None:
+ """Switch the stationary end of the current selection with the moving end."""
tab.caret.reverse_selection() |
codereview_python_data_6004 | locale=locale,
entity=t.entity,
translation=t,
- project=t.entity.resource.project,
) for t in Translation.objects.filter(pk__in=changed_translation_pks).prefetch_related('entity__resource')]
TranslationMemoryEntry.objects.bulk_create(memory_entries)
There's already `project` defined a few lines above, let's use it instead. It should save us a few DB hits.
locale=locale,
entity=t.entity,
translation=t,
+ project=project,
) for t in Translation.objects.filter(pk__in=changed_translation_pks).prefetch_related('entity__resource')]
TranslationMemoryEntry.objects.bulk_create(memory_entries) |
codereview_python_data_6009 | corpus_backup_dir = ('gs://{bucket}/corpus/libfuzzer/test2_fuzzer/')
gsutil.GSUtilRunner().run_gsutil([
'cp', (corpus_backup_dir +
- 'backup.zip').format(bucket=TEST_BUCKET_TEST2_BACKUP),
(corpus_backup_dir +
'%s.zip' % corpus_backup_date).format(bucket=self.backup_bucket)
])
so this code was previously wrong ? is that reason for bucket difference ?
corpus_backup_dir = ('gs://{bucket}/corpus/libfuzzer/test2_fuzzer/')
gsutil.GSUtilRunner().run_gsutil([
'cp', (corpus_backup_dir +
+ 'backup.zip').format(bucket=TEST2_BACKUP_BUCKET),
(corpus_backup_dir +
'%s.zip' % corpus_backup_date).format(bucket=self.backup_bucket)
]) |
codereview_python_data_6012 | ]) == 0
-def on_master():
- return hash_for_name('HEAD') == merge_base('HEAD', 'origin/master')
-
-
def changelog():
with open(os.path.join(ROOT, 'docs', 'changes.rst')) as i:
return i.read()
> This updates the master check to use "git merge-base --is-ancestor"
]) == 0
def changelog():
with open(os.path.join(ROOT, 'docs', 'changes.rst')) as i:
return i.read() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.