id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_10541 | if os.path.isfile(ovf_file_path):
try:
ovf_env = OvfEnv(fileutil.read_file(ovf_file_path))
- self.report_event(message="{0}".format(ovf_env.provision_guest_agent),
is_success=True,
duration=0,
operation=WALAEventOperation.ProvisionGuestAgent)
Rather than using `"{0}".format(blah)` this way, just convert blah to a string, i.e. ```self.report_event(message=str(ovf_env.provision_guest_agent),```
if os.path.isfile(ovf_file_path):
try:
ovf_env = OvfEnv(fileutil.read_file(ovf_file_path))
+ self.report_event(message=ovf_env.provision_guest_agent,
is_success=True,
duration=0,
operation=WALAEventOperation.ProvisionGuestAgent) |
codereview_python_data_10543 | """Set up the standard AdK system in implicit solvent."""
- known_pos = np.array([3.94543672, -12.4060812, -7.26820087], dtype=np.float32)
-
@staticmethod
@pytest.fixture()
def universe():
`known_pos` is used once within the class, it can just live in this method
"""Set up the standard AdK system in implicit solvent."""
@staticmethod
@pytest.fixture()
def universe(): |
codereview_python_data_10546 | port or conf.broker_port,
transport=transport or conf.broker_transport,
ssl=self.either('broker_use_ssl', ssl),
- heartbeat=self.either('broker_heartbeat',heartbeat),
login_method=login_method or conf.broker_login_method,
failover_strategy=(
failover_strategy or conf.broker_failover_strategy
flake8 is complaining about a missing space after the `,`.
port or conf.broker_port,
transport=transport or conf.broker_transport,
ssl=self.either('broker_use_ssl', ssl),
+ heartbeat=heartbeat or self.conf.broker_heartbeat,
login_method=login_method or conf.broker_login_method,
failover_strategy=(
failover_strategy or conf.broker_failover_strategy |
codereview_python_data_10551 | class AttributeHandler(WriteRequestHandler):
def __init__(self, database_manager: DatabaseManager,
- write_request_validator: WriteRequestValidator):
super().__init__(database_manager, ATTRIB, DOMAIN_LEDGER_ID)
- self.write_request_validator = write_request_validator
def static_validation(self, request: Request):
self._validate_request_type(request)
please, rename write_request_validator to write_req_validator
class AttributeHandler(WriteRequestHandler):
def __init__(self, database_manager: DatabaseManager,
+ write_req_validator: WriteRequestValidator):
super().__init__(database_manager, ATTRIB, DOMAIN_LEDGER_ID)
+ self.write_req_validator = write_req_validator
def static_validation(self, request: Request):
self._validate_request_type(request) |
codereview_python_data_10554 | self.weight_mat = (col_mat - row_mat) ** 2
def call(self, y_true, y_pred):
- y_true = tf.cast(y_true, dtype=tf.keras.backend.floatx())
batch_size = tf.shape(y_true)[0]
cat_labels = tf.matmul(y_true, self.col_label_vec)
cat_label_mat = tf.tile(cat_labels, [1, self.num_classes])
Is this required?
self.weight_mat = (col_mat - row_mat) ** 2
def call(self, y_true, y_pred):
+ y_true = tf.cast(y_true, dtype=self.col_label_vec.dtype)
+ y_pred = tf.cast(y_pred, dtype=self.weight_mat)
batch_size = tf.shape(y_true)[0]
cat_labels = tf.matmul(y_true, self.col_label_vec)
cat_label_mat = tf.tile(cat_labels, [1, self.num_classes]) |
codereview_python_data_10556 | Example
-------
To group atoms with the same residue name and mass together::
>>> ag.groupby('resnames', 'masses')
Leave in the example with a single argument.
Example
-------
+
+
+ To group atoms with the same mass together::
+
+ >>> ag.groupby('masses')
+ {12.010999999999999: <AtomGroup with 462 atoms>,
+ 14.007: <AtomGroup with 116 atoms>,
+ 15.999000000000001: <AtomGroup with 134 atoms>}
+
To group atoms with the same residue name and mass together::
>>> ag.groupby('resnames', 'masses') |
codereview_python_data_10561 | for directed in [False, True]:
edge_counts = [[0] * 5 for row in range(5)]
for i in range(runs):
- G = nx.generators.random_graphs.fast_gnp_random_graph(
n, p, directed=directed
)
for (v, w) in G.edges:
As currently defined, this will run 4 times since it is inside the for-loop over random graph generators. The easiest way to fix this would be to move it into it's own test function (or method in this case).
for directed in [False, True]:
edge_counts = [[0] * 5 for row in range(5)]
for i in range(runs):
+ G = generator(
n, p, directed=directed
)
for (v, w) in G.edges: |
codereview_python_data_10564 | self._idx += offset
self._idx %= len(self.items)
new = self.curitem()
- elif self._mode == self.Modes.exception: # pragma: no branch
raise
- elif self._mode == self.Modes.edge:
new_idx = self._idx + offset
right_edge = len(self._items) - 1
left_edge = 0
Can you move the `# pragma: no branch` line from above to here? This is to tell `coverage.py` to not mark the (not existing) "else:" branch of this if as not taken, as there's no way this could happen.
self._idx += offset
self._idx %= len(self.items)
new = self.curitem()
+ elif self._mode == self.Modes.exception:
raise
+ elif self._mode == self.Modes.edge: # pragma: no branch
new_idx = self._idx + offset
right_edge = len(self._items) - 1
left_edge = 0 |
codereview_python_data_10566 | import unittest
import warnings
from Bio._py3k import StringIO
-from Bio.PDB.PDBParser import PDBParser
try:
import numpy
This import needs to be after the check for if numpy is installed (so that ``run_tests.py`` can skip the whole test if NumPy is missing).
import unittest
import warnings
from Bio._py3k import StringIO
try:
import numpy |
codereview_python_data_10568 | cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
launch_cvd_path = os.path.join(cvd_bin_dir, 'launch_cvd')
- cvd_address = get_gce_address()
device_memory_mb = environment.get_value('DEVICE_MEMORY_MB',
DEFAULT_DEVICE_MEMORY_MB)
launch_cvd_command_line = (
'{launch_cvd_path} -daemon -memory_mb {device_memory_mb}'.format(
launch_cvd_path=launch_cvd_path, device_memory_mb=device_memory_mb))
- if cvd_address:
- launch_cvd_command_line = 'ssh %s %s' % (cvd_address, launch_cvd_command_line)
- execute_command(launch_cvd_command_line)
def stop_gce_device():
Can we quote the `launch_cvd_command_line` part?
cvd_dir = environment.get_value('CVD_DIR')
cvd_bin_dir = os.path.join(cvd_dir, 'bin')
launch_cvd_path = os.path.join(cvd_bin_dir, 'launch_cvd')
device_memory_mb = environment.get_value('DEVICE_MEMORY_MB',
DEFAULT_DEVICE_MEMORY_MB)
launch_cvd_command_line = (
'{launch_cvd_path} -daemon -memory_mb {device_memory_mb}'.format(
launch_cvd_path=launch_cvd_path, device_memory_mb=device_memory_mb))
+ execute_command(launch_cvd_command_line, on_cuttlefish_host=True)
def stop_gce_device(): |
codereview_python_data_10574 | self.func = lp.common_neighbors
def test_func(G, u, v, expected):
result = self.func(G, u, v)
- assert result == expected
self.test = test_func
def test_K5(self):
Use `assert_equal`, `ok_` and friends instead of `assert` to be consistent with the rest of the library.
self.func = lp.common_neighbors
def test_func(G, u, v, expected):
result = self.func(G, u, v)
+ assert_equal(result, expected)
self.test = test_func
def test_K5(self): |
codereview_python_data_10580 | # - make this selection based on qavg
from os.path import splitext
- warnings.warn("class ContactAnalysis1 will be deprecated, use Contacts instead", DeprecationWarning)
self.selection_strings = self._return_tuple2(kwargs.pop('selection', "name CA or name B*"), "selection")
self.references = self._return_tuple2(kwargs.pop('refgroup', None), "refgroup")
Very good to deprecate CA1. Just change the string to read "ContactAnalysis1 is deprecated and will be removed in 1.0/. Use Contacts instead. If there's a doc string, add the ``` rest .. deprecated: 0.14.0 ``` markup (or whatever version this goes in).
# - make this selection based on qavg
from os.path import splitext
+ warnings.warn("ContactAnalysis1 is deprecated and will be removed in 1.0/. Use Contacts instead.", DeprecationWarning)
self.selection_strings = self._return_tuple2(kwargs.pop('selection', "name CA or name B*"), "selection")
self.references = self._return_tuple2(kwargs.pop('refgroup', None), "refgroup") |
codereview_python_data_10585 | from helpers import RunOnceTask
import luigi
import luigi.scheduler
import luigi.worker
import luigi.rpc
I suppose it was too hard to make it work without using `time.sleep` right?
from helpers import RunOnceTask
import luigi
+import luigi.server
import luigi.scheduler
import luigi.worker
import luigi.rpc |
codereview_python_data_10589 | """Verify the file does not contain 20- or 40- length character strings,
which may be secret keys. Allow strings in the allow list in
https://github.com/awsdocs/aws-doc-sdk-examples/blob/master/scripts/checkin_tests.py,
- and in and in src/allow.list.xml in the AWSDocsChecklistCodeSampleCatalog GitFarm repo."""
error_count = 0
twenties = re.findall("[^A-Z0-9][A][ACGIKNPRS][A-Z]{2}[A-Z0-9]{16}[^A-Z0-9]",
file_contents)
"and in and in" -> "and in" (make same change to all occurrences in this doc).
"""Verify the file does not contain 20- or 40- length character strings,
which may be secret keys. Allow strings in the allow list in
https://github.com/awsdocs/aws-doc-sdk-examples/blob/master/scripts/checkin_tests.py,
+ and in src/allow.list.xml in the AWSDocsChecklistCodeSampleCatalog GitFarm repo."""
error_count = 0
twenties = re.findall("[^A-Z0-9][A][ACGIKNPRS][A-Z]{2}[A-Z0-9]{16}[^A-Z0-9]",
file_contents) |
codereview_python_data_10606 | import sys
import gflags as flags
-from ratelimiter import RateLimiter
# TODO: Investigate improving so we can avoid the pylint disable.
# pylint: disable=line-too-long
Can we add a link to where someone can see the usage example?
import sys
import gflags as flags
# TODO: Investigate improving so we can avoid the pylint disable.
# pylint: disable=line-too-long |
codereview_python_data_10608 | The change in system time between different frames. This can be set as an
attribute, but defaults to 1.0 ps.
``data``
- A dictionary contained all miscellaneous information for the
current Timestep.
``positions``
A numpy array of all positions in this Timestep, otherwise raises a
contained --> containing
The change in system time between different frames. This can be set as an
attribute, but defaults to 1.0 ps.
``data``
+ A dictionary containing all miscellaneous information for the
current Timestep.
``positions``
A numpy array of all positions in this Timestep, otherwise raises a |
codereview_python_data_10611 | assert not host_blocker._is_blocked(url)
-def blocklist_to_url(filename):
"""Get an example.com-URL with the given filename as path."""
- assert not pathlib.Path(filename).is_absolute(), filename
url = QUrl("http://example.com/")
- url.setPath("/" + filename)
assert url.isValid(), url.errorString()
return url
Instead of making this a `pathlib.Path` here, it should take a path object as an argument directly. Perhaps also rename the argument to `path`. Then `create_zipfile` and `create_blocklist` will need to be changed instead, so that they return a `pathlib.Path` object as well.
assert not host_blocker._is_blocked(url)
+def blocklist_to_url(path):
"""Get an example.com-URL with the given filename as path."""
+ assert not path.is_absolute(), path
url = QUrl("http://example.com/")
+ url.setPath("/" + str(path))
assert url.isValid(), url.errorString()
return url |
codereview_python_data_10613 | """
update cpp11 extensions that will run on versions of gcc >4.8
"""
- gcc_version = get_gcc_version()
if gcc_version is not None:
if float(gcc_version) > 4.8:
compile_flags, link_flags = flags
`cc` is undefined. You need to also move over the initial setup from the original function (CC/env/etc.).
"""
update cpp11 extensions that will run on versions of gcc >4.8
"""
+ gcc_version = get_gcc_version(ext.language)
if gcc_version is not None:
if float(gcc_version) > 4.8:
compile_flags, link_flags = flags |
codereview_python_data_10614 | from streamlink.plugin.api import useragents, validate
from streamlink.stream.stream import Stream
from streamlink.stream.stream import StreamIO
log = logging.getLogger(__name__)
```suggestion "password", sensitive=True, metavar="PASSWORD", ``` `PluginArgument`s don't need a plugin prefix. It gets added automatically.
from streamlink.plugin.api import useragents, validate
from streamlink.stream.stream import Stream
from streamlink.stream.stream import StreamIO
+from streamlink.utils.url import update_qsd
+
log = logging.getLogger(__name__) |
codereview_python_data_10618 | return model
-def _tabs(*, win_id_filter=lambda _win_id: True, add_win_id=True, cur_win_id=0):
"""Helper to get the completion model for tabs/other_tabs.
Args:
I think we still need to get a window ID from `data`, and only fall back to `cur_win_id` if the former is missing. Otherwise we might delete the wrong tab if there are multiple windows.
return model
+def _tabs(*, win_id_filter=lambda _win_id: True, add_win_id=True, cur_win_id=None):
"""Helper to get the completion model for tabs/other_tabs.
Args: |
codereview_python_data_10620 | proposal_list = proposals
for i in range(self.num_stages):
rcnn_train_cfg = self.train_cfg.rcnn[i]
lw = self.train_cfg.stage_loss_weights[i]
I suggest that we add a stage indicator like `self.current_stage = i`, thus it can be obtained in the sampler via `context.current_stage` instead of passing an additional argument here.
proposal_list = proposals
for i in range(self.num_stages):
+ self.current_stage = i
rcnn_train_cfg = self.train_cfg.rcnn[i]
lw = self.train_cfg.stage_loss_weights[i] |
codereview_python_data_10622 | stride=1,
padding=0,
dilation=1,
- num_deformable_groups=1):
super(DeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
To be consistent with the normal Conv2d, use `bias=False` instead of `no_bias=True`.
stride=1,
padding=0,
dilation=1,
+ deformable_groups=1):
super(DeformConv, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels |
codereview_python_data_10624 | yield ("* With this command, +;;+ is interpreted literally "
"instead of splitting off a second command.")
if cmd.no_replace_variables:
- yield "* This command does not replace variables like +\{url\}+."
def _get_action_metavar(action, nargs=1):
Please use a raw string (`r"..."`) here or double the backslashes (I'm guessing you're using them because `{` and `}` have some special meaning in asciidoc?)
yield ("* With this command, +;;+ is interpreted literally "
"instead of splitting off a second command.")
if cmd.no_replace_variables:
+ yield r"* This command does not replace variables like +\{url\}+."
def _get_action_metavar(action, nargs=1): |
codereview_python_data_10626 | coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
"""
- from .nsgrid import FastNS
-
if box is None:
# create a pseudobox
# define the max range
See above: would rather import at top of module.
coordinates ``reference[pairs[k, 0]]`` and
``configuration[pairs[k, 1]]``.
"""
if box is None:
# create a pseudobox
# define the max range |
codereview_python_data_10629 | )
def _wrap_partitions(self, partitions):
- if isinstance(partitions, self.instance_type):
- return [self.partition_type(partitions)]
- else:
return [
self.partition_type(
partitions[i],
When will this if statement be triggered?
)
def _wrap_partitions(self, partitions):
+ if not isinstance(partitions, self.instance_type):
return [
self.partition_type(
partitions[i], |
codereview_python_data_10631 | # When enable distributed training, the seed of each worker equals to
# num_worker * rank + worker_id + user_seed
# When non-distributed training, rank is 0
- rank = torch.distributed.get_rank() if dist else 0
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
`rank, world_size = get_dist_info()` can be moved outside, thus we can reuse the rank.
# When enable distributed training, the seed of each worker equals to
# num_worker * rank + worker_id + user_seed
# When non-distributed training, rank is 0
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed) |
codereview_python_data_10637 | yaml._save()
data = autoconfig.read()
- assert data['content.media.audio_capture']['global'] == val
- assert data['content.media.audio_video_capture']['global'] == val
- assert data['content.media.video_capture']['global'] == val
def test_empty_pattern(self, yaml, autoconfig):
valid_pattern = 'https://example.com/*'
Could probably make this a bit nicer with a for-loop: ```python for setting in ['content.media.audio_capture', 'content.media.audio_video_capture', 'content.media.video_capture']: assert data[setting]['global'] == val ```
yaml._save()
data = autoconfig.read()
+ for setting in ['content.media.audio_capture',
+ 'content.media.audio_video_capture',
+ 'content.media.video_capture']:
+ assert data[setting]['global'] == val
def test_empty_pattern(self, yaml, autoconfig):
valid_pattern = 'https://example.com/*' |
codereview_python_data_10639 | np.testing.assert_equal(tm["a"].numpy(), [100])
# Pybind always returns a "shallow copy" of the tensor. This is a copy since
- # the new variable points to a different tensor object. The copy is shallow
- # because the new tensor shares the same memory as the tensor in the map.
tm = o3d.t.geometry.TensorMap("positions")
tm["a"] = o3c.Tensor([100], device=device)
a_alias = tm["a"]
We can skip this test (`@skip`) since this doesn't test Open3D code.
np.testing.assert_equal(tm["a"].numpy(), [100])
# Pybind always returns a "shallow copy" of the tensor. This is a copy since
+ # the new variable points to a different tensor object, and thus the id() is
+ # different. The copy is shallow because the new tensor shares the same
+ # memory as the tensor in the map.
tm = o3d.t.geometry.TensorMap("positions")
tm["a"] = o3c.Tensor([100], device=device)
a_alias = tm["a"] |
codereview_python_data_10643 | cannot be executed. Will do a loop once through all abilities
enumerated in adversary.
"""
- itera = iter(range(0, len(self.adversary.atomic_ordering)))
while not self._is_atomic_closeable():
links = await services.get('planning_svc').get_links(self, buckets=['atomic'])
if links:
await self.wait_for_links_completion([await self.apply(links[-1])])
- self.last_ran = next(itera)
if await self.is_finished():
return
defaults to 0. Also `itera` is kinda non-descript... ability_iter maybe?
cannot be executed. Will do a loop once through all abilities
enumerated in adversary.
"""
+ ability_iter = iter(range(len(self.adversary.atomic_ordering)))
while not self._is_atomic_closeable():
links = await services.get('planning_svc').get_links(self, buckets=['atomic'])
if links:
await self.wait_for_links_completion([await self.apply(links[-1])])
+ self.last_ran = next(ability_iter)
if await self.is_finished():
return |
codereview_python_data_10651 | return js_data
- def simulate_all_and_collect_stats(self) -> int:
specs = {}
for methname in dir(self):
if not methname.startswith('test_'):
Yeah, just glancing at it I've no idea how to use it. Docstrings would be hugely appreciated!
return js_data
+ def simulate_all_and_collect_stats(self):
specs = {}
for methname in dir(self):
if not methname.startswith('test_'): |
codereview_python_data_10652 | channel = match.group("channel")
http.headers.update({"User-Agent": USER_AGENT})
- http.verify = False
hls_url = http.get(HUYA_URL % channel, schema=_hls_schema)
yield "live", HLSStream(self.session, hls_url)
Is there a specific reason for disabling tls verification?
channel = match.group("channel")
http.headers.update({"User-Agent": USER_AGENT})
+ #Some problem with SSL on huya.com now, do not use https
hls_url = http.get(HUYA_URL % channel, schema=_hls_schema)
yield "live", HLSStream(self.session, hls_url) |
codereview_python_data_10653 | except InvalidPage:
# Redirect to page one.
messages.error(request, _('The given page number was invalid.'))
- return HttpResponseRedirect(settings.OSCAR_HOMEPAGE)
return super().get(request, *args, **kwargs)
def get_search_handler(self, *args, **kwargs):
I don't think we should change this particular URL within the catalogue app. The catalogue app has it's own index URL defined, and we should respect that - it's possible that the home page of a project is different from the catalogue root.
except InvalidPage:
# Redirect to page one.
messages.error(request, _('The given page number was invalid.'))
+ return redirect('catalogue:index')
return super().get(request, *args, **kwargs)
def get_search_handler(self, *args, **kwargs): |
codereview_python_data_10660 | import param
import bokeh
-from bokeh.io import push_notebook, Document
if LooseVersion(bokeh.__version__) >= LooseVersion('0.11'):
old_bokeh = False
- from bokeh.io import _CommsHandle
from bokeh.util.notebook import get_comms
else:
old_bokeh = True
How much can we trust that `_CommsHandle` is something we can rely on in future? From the name, it doesn't look like something we are expected to use directly...
import param
import bokeh
+from bokeh.io import Document
if LooseVersion(bokeh.__version__) >= LooseVersion('0.11'):
old_bokeh = False
+ from bokeh.io import push_notebook, _CommsHandle
from bokeh.util.notebook import get_comms
else:
old_bokeh = True |
codereview_python_data_10664 | @classmethod
def get_root(cls, session, inventory_index_id):
- """get the resource root from the inventory.
Args:
session (object): Database session.
nit: `get` should be capitalized: `Get`
@classmethod
def get_root(cls, session, inventory_index_id):
+ """Get the resource root from the inventory.
Args:
session (object): Database session. |
codereview_python_data_10667 | Minimum number of nodes for graphs
max_num_v: int
Maximum number of nodes for graphs
"""
def __init__(self, num_graphs, min_num_v, max_num_v, verbose=False, seed=None):
self.num_graphs = num_graphs
docstring for verbose and seed
Minimum number of nodes for graphs
max_num_v: int
Maximum number of nodes for graphs
+ verbose : bool
+ Whether to print out progress information
+ seed : int, default is None
+ Random seed for data generation
"""
def __init__(self, num_graphs, min_num_v, max_num_v, verbose=False, seed=None):
self.num_graphs = num_graphs |
codereview_python_data_10672 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4425-SEA 1645521828 1501263690</p>
<hr>
<p>Varnish cache server</p>
</body>
nit: Looks like lines 16-31 are duplicated 4 times in this class - can we either put this common logic into a util method, or simply combine the 4 test methods into a single test method? (I think testing create/get/list/delete in a single test method would make sense in this case..)
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4423-SEA 1645521828 4102039176</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_10678 | did = uuid.uuid4().hex.upper()
#use new API key and modified MD5 algorithm
- sign = stupidMD5(("{0}{1}{2}{3}".format(channel, did, LAPI_SECRET, ts)).encode("utf-8"))
data = {
"cdn": "ws",
should be ```sign = stupidMD5("{0}{1}{2}{3}".format(channel, did, LAPI_SECRET, ts))```
did = uuid.uuid4().hex.upper()
#use new API key and modified MD5 algorithm
+ sign = stupidMD5(("{0}{1}{2}{3}".format(channel, did, LAPI_SECRET, ts)))
data = {
"cdn": "ws", |
codereview_python_data_10681 | metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
- outfile_prefix (str | None): The prefix of output file
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000.
Give an example of `outfile_prefix` and the complete filename.
metric (str | list[str]): Metrics to be evaluated.
logger (logging.Logger | str | None): Logger used for printing
related information during evaluation. Default: None.
+ outfile_prefix (str | None): The prefix of output file. It includes
+ the file path and the prefix of filename, e.g., "a/b/prefix".
+ If not specified, a temp file will be created. Default: None.
+ If results are evaluated with COCO protocol, it would be the
+ prefix of output json file. If results are evaluated with
+ cityscapes protocol, it would be the prefix of output txt/png
+ files.
classwise (bool): Whether to evaluating the AP for each class.
proposal_nums (Sequence[int]): Proposal number used for evaluating
recalls, such as recall@100, recall@1000. |
codereview_python_data_10682 | for record in records:
data = base64.b64decode(record['Data'])
obj_name = str(uuid.uuid4())
- obj_path = '%s%s%s' % (prefix, '/', obj_name)
try:
s3.Object(bucket, obj_path).put(Body=data)
except Exception as e:
Wondering what happens if `prefix` ends with a slash. For example, if `prefix` is `"my_prefix/"` and `obj_name` is `"test"`, then `obj_path` would result in `"my_prefix//test"` (which may lead to problems further down the line). Can we do this instead: ``` obj_path = '%s%s%s' % (prefix, '' if prefix.endswith('/') else '/', obj_name) ```
for record in records:
data = base64.b64decode(record['Data'])
obj_name = str(uuid.uuid4())
+ obj_path = '%s%s%s' % (prefix, '' if prefix.endswith('/') else '/', obj_name)
try:
s3.Object(bucket, obj_path).put(Body=data)
except Exception as e: |
codereview_python_data_10686 | results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
- results['flip'] = False
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict(
The random flip only does flip when `results` do not have the key `flip` at the beginning. This line should be removed, otherwise, the random flip will never be executed. Or you may need to pull the newest master since this bug is solved.
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
num_channels = 1 if len(img.shape) < 3 else img.shape[2]
results['img_norm_cfg'] = dict( |
codereview_python_data_10688 | def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
- The logger will be initilized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmdet".
initilized --> initialized.
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
+ The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmdet". |
codereview_python_data_10691 | """
import matplotlib.pyplot as plt
plt.switch_backend('agg')
- cls._notebook = True
As the bokeh renderer also has a ``load_nb`` classmethod (and notebooks are explicitly mentioned in that method name), I would consider making ``_notebook`` into a constant ``notebook_context`` class parameter. The docstring can say it is set to True if the ``load_nb`` classmethod has been called.
"""
import matplotlib.pyplot as plt
plt.switch_backend('agg') |
codereview_python_data_10696 | client = Client()
num_cpus = sum(client.ncores().values())
elif execution_engine == "Multiprocess":
- num_cpus = 4 # todo: need to calc this
elif execution_engine != "Python":
raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
```suggestion import multiprocessing num_cpus = multiprocessing.cpu_count() ``` That's how ray gets the cpu count if none is provided
client = Client()
num_cpus = sum(client.ncores().values())
elif execution_engine == "Multiprocess":
+ import multiprocessing
+
+ num_cpus = multiprocessing.cpu_count()
elif execution_engine != "Python":
raise ImportError("Unrecognized execution engine: {}.".format(execution_engine)) |
codereview_python_data_10706 | HealthCheck.large_base_example
)
for i in hrange(self.cap):
if i not in zero_data.forced_indices:
break
Why does this fix the bug? (What was the bug?)
HealthCheck.large_base_example
)
+ # If the language starts with writes of length >= cap then there is
+ # only one string in it: Everything after cap is forced to be zero (or
+ # to be whatever value is written there). That means that once we've
+ # tried the zero value, there's nothing left for us to do, so we
+ # exit early here.
for i in hrange(self.cap):
if i not in zero_data.forced_indices:
break |
codereview_python_data_10711 | _DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
-HostWorkers = namedtuple('HostWorkers', ['default', 'all'])
class _DatasetNames(Enum):
```suggestion _HostWorkers = namedtuple('HostWorkers', ['default', 'all']) ``` Can you please prefix this with a `_`? I don't want to encourage anyone to `from lightgbm.dask import HostWorkers`.
_DaskPart = Union[np.ndarray, pd_DataFrame, pd_Series, ss.spmatrix]
_PredictionDtype = Union[Type[np.float32], Type[np.float64], Type[np.int32], Type[np.int64]]
+_HostWorkers = namedtuple('HostWorkers', ['default', 'all'])
class _DatasetNames(Enum): |
codereview_python_data_10716 | 'Please check your path and filename and try '
'again. Error: %s', err)
- # Configuring log level for the application
- logger.set_logger_level_from_config(log_level)
-
- if enable_console_log:
- logger.enable_console_log()
-
# Setting up configurations
forseti_inventory_config = forseti_config.get('inventory', {})
inventory_config = InventoryConfig(
Can you just move this higher up in the method, so that we can capture all the logging to the console?
'Please check your path and filename and try '
'again. Error: %s', err)
# Setting up configurations
forseti_inventory_config = forseti_config.get('inventory', {})
inventory_config = InventoryConfig( |
codereview_python_data_10717 | self.unCommitted[0][1].items()])
self.unCommitted = self.unCommitted[1:]
else:
- logger.debug('{} is trying to commit a batch with state root {} '
- 'but no uncommitted found'.format(self, stateRoot))
def setVerkey(self, idr, verkey):
# This method acts as if guardianship is being terminated.
I think this should be a warning as it is
self.unCommitted[0][1].items()])
self.unCommitted = self.unCommitted[1:]
else:
+ logger.warning('{} is trying to commit a batch with state root {} '
+ 'but no uncommitted found'.format(self, stateRoot))
def setVerkey(self, idr, verkey):
# This method acts as if guardianship is being terminated. |
codereview_python_data_10721 | vidx = self.get_dimension_index(channels[0])
val_index = vidx - self.ndims
data = sliced.data[:,:, val_index]
- return Image(data, **dict(self.get_param_values(onlychanged=True),
vdims=[self.vdims[val_index]]))
elif len(channels) > 1:
raise KeyError("Channels can only be selected once in __getitem__")
Not your fault as this was already wrong but this isn't quite right, could use core.util.get_param_values instead of self.get_param_values here.
vidx = self.get_dimension_index(channels[0])
val_index = vidx - self.ndims
data = sliced.data[:,:, val_index]
+ return Image(data, **dict(util.get_param_values(self),
vdims=[self.vdims[val_index]]))
elif len(channels) > 1:
raise KeyError("Channels can only be selected once in __getitem__") |
codereview_python_data_10724 | # Don't upload uninteresting testcases (no crash) or if there is no log to
# correlate it with (not upload_output).
if upload_output:
- log_time = _get_testcase_time(file_path)
upload_testcase(file_path, log_time)
if upload_output:
# Include full output for uploaded logs (crash output, merge output, etc).
crash_result_full = CrashResult(return_code, crash_time, output)
-
- # To provide consistency between stats and logs, we use timestamp taken
- # from stats.
- log_time = _get_testcase_time(file_path)
log = prepare_log_for_upload(crash_result_full.get_stacktrace(),
return_code)
upload_log(log, log_time)
Can we save a _get_testcase_time call since it is called twice for upload_output branch
# Don't upload uninteresting testcases (no crash) or if there is no log to
# correlate it with (not upload_output).
if upload_output:
upload_testcase(file_path, log_time)
if upload_output:
# Include full output for uploaded logs (crash output, merge output, etc).
crash_result_full = CrashResult(return_code, crash_time, output)
log = prepare_log_for_upload(crash_result_full.get_stacktrace(),
return_code)
upload_log(log, log_time) |
codereview_python_data_10726 | return _create_compute_asset(item, 'compute.googleapis.com/ForwardingRule')
-def globalforwardingrule(item):
- return _create_compute_asset(item,
- 'compute.googleapis.com/GlobalForwardingRule')
-
-
def image(item):
return _create_compute_asset(item, 'compute.googleapis.com/Image')
This file shouldn't need to be changed, this is needed when going from the mock API to the CAI dump, not the other way around.
return _create_compute_asset(item, 'compute.googleapis.com/ForwardingRule')
def image(item):
return _create_compute_asset(item, 'compute.googleapis.com/Image') |
codereview_python_data_10732 | 'upgrade_group_objects_upgrade', 'username'])
self._add_option('exclude', self._get_option('excludepkgs'))
- self._add_option('config_file_path', PathOption("/etc/yum/yum.conf"))
self._add_option('plugins', BoolOption(False))
self._add_option('persistdir', PathOption("/var/lib/yum"))
self._add_option('system_cachedir', PathOption("/var/lib/yum"))
Sorry, I have found another problem with yum.conf. The yum conf is in Rhel and Fedore in /etc/yum.conf , but anyway we should keep dnf config_file_path due to: - dnf doesn't provide yum.conf - yum package is provided by yum.conf (yum-depricated) - maintains of two files is more difficult
'upgrade_group_objects_upgrade', 'username'])
self._add_option('exclude', self._get_option('excludepkgs'))
self._add_option('plugins', BoolOption(False))
self._add_option('persistdir', PathOption("/var/lib/yum"))
self._add_option('system_cachedir', PathOption("/var/lib/yum")) |
codereview_python_data_10734 | self.nickname = self.config.get('nickname','')
self.team = self.config.get('team',0)
self.tutorial_run = True
- self.team_run == True
def work(self):
self.team_run = True?
self.nickname = self.config.get('nickname','')
self.team = self.config.get('team',0)
self.tutorial_run = True
+ self.team_run = True
def work(self): |
codereview_python_data_10738 | systemd_cmd = [
"systemd-run", "--unit={0}".format(logcollector.CGROUPS_UNIT),
- "--slice={0}".format(logcollector.CGROUPS_SLICE), "--scope"
]
# More info on resource limits properties in systemd here:
Instead of using a transient scope, I'd use a permanent slice. This has the advantage of allowing the user to override any of our settings with something that is more appropriate to their environment (you can see how the agent uses slices, or we can sync offline)
systemd_cmd = [
"systemd-run", "--unit={0}".format(logcollector.CGROUPS_UNIT),
+ "--slice={0}".format(logcollector.CGROUPS_SLICE)
]
# More info on resource limits properties in systemd here: |
codereview_python_data_10744 | if do_fork():
# TODO(metzman): Use `-fork=2` on Windows.
- arguments.append('%s%d' % (constants.FORK_FLAG, 1))
- fuzzing_strategies.append('%s_%d' % (strategy.FORK_STRATEGY, 1))
# Execute the fuzzer binary with original arguments.
fuzz_result = runner.fuzz(
What does the value of this argument mean? Would it be better to move it into another constant and use on both lines 801 and 802. Would be a bit more convenient when you enable `2`.
if do_fork():
# TODO(metzman): Use `-fork=2` on Windows.
+ num_fuzz_processes = 1
+ arguments.append('%s%d' % (constants.FORK_FLAG, num_fuzz_processes))
+ fuzzing_strategies.append('%s_%d' % (strategy.FORK_STRATEGY,
+ num_fuzz_processes))
# Execute the fuzzer binary with original arguments.
fuzz_result = runner.fuzz( |
codereview_python_data_10751 | links = []
if agent:
if await self._check_untrusted_agents_allowed(agent=agent, operation=operation,
- msg='no cleanup-link created'):
- links.extend(await self._generate_cleanup_links(operation=operation, agent=agent, link_status=link_status))
else:
for agent in operation.agents:
if await self._check_untrusted_agents_allowed(agent=agent, operation=operation,
This block and the block below in the else are identical blocks of code - should be a private function
links = []
if agent:
if await self._check_untrusted_agents_allowed(agent=agent, operation=operation,
+ msg='no cleanup-link created'):
+ links.extend(
+ await self._generate_cleanup_links(operation=operation, agent=agent, link_status=link_status)
+ )
else:
for agent in operation.agents:
if await self._check_untrusted_agents_allowed(agent=agent, operation=operation, |
codereview_python_data_10752 | yield (payload[i:i + chunk_size], True if i + chunk_size >= len(payload) else False)
for chunk, final in get_chunk(websocket_message.content):
- data = self.connections[other_conn].send(Message(data = chunk, message_finished = final))
other_conn.send(data)
if self.flow.stream:
- data = self.connections[other_conn].send(Message(data = event.data, message_finished = event.message_finished))
other_conn.send(data)
return True
same whitespace as above
yield (payload[i:i + chunk_size], True if i + chunk_size >= len(payload) else False)
for chunk, final in get_chunk(websocket_message.content):
+ data = self.connections[other_conn].send(Message(data=chunk, message_finished=final))
other_conn.send(data)
if self.flow.stream:
+ data = self.connections[other_conn].send(Message(data=event.data, message_finished=event.message_finished))
other_conn.send(data)
return True |
codereview_python_data_10755 | stub += f" : {self.target.name}({self.target.value})"
return stub
- def __init__(self, source=None, edge=None, target=None, score=1, origin=None):
super().__init__()
self.source = source
self.edge = edge
Much like with Facts, Relationships are defined as having, at the bare minimum, 1 source fact - without it, there's no way to actually interact with/search for the Relationship. Is there some error caused by this not having a default value that we've overlooked with the API upgrades?
stub += f" : {self.target.name}({self.target.value})"
return stub
+ def __init__(self, source, edge=None, target=None, score=1, origin=None):
super().__init__()
self.source = source
self.edge = edge |
codereview_python_data_10759 | def cleanup(self):
"""
- Allows defining cleanup actions to perform on plot deletion.
"""
@property
Why not raise ``NotImplementedError`` here?
def cleanup(self):
"""
+ Cleans up references to the plot on the attached Stream
+ subscribers.
"""
+ plots = self.traverse(lambda x: x, [GenericElementPlot])
+ for plot in plots:
+ for stream in set(plot.streams):
+ stream._subscribers = [
+ (p, subscriber) for p, subscriber in stream._subscribers
+ if get_method_owner(subscriber) not in plots]
@property |
codereview_python_data_10760 | else:
# Explicit count with a tab that doesn't exist.
return
- elif curtab.pin is True:
message.info("Tab is pinned!")
-
else:
curtab.openurl(cur_url)
Please remove the blank line here - it makes me think the if-block is over here, while an `else:` actually follows :wink:
else:
# Explicit count with a tab that doesn't exist.
return
+ elif curtab.data.pinned:
message.info("Tab is pinned!")
else:
curtab.openurl(cur_url) |
codereview_python_data_10764 | secretsmanager_client.delete_secret(SecretId=item)
-@pytest.fixture
-def only_localstack():
- if os.environ.get("TEST_TARGET") == "AWS_CLOUD":
- pytest.skip("test only applicable if run against localstack")
@pytest.fixture
Out of curiosity - did we make this change to allow dynamically assigning a value to `os.environ["TEST_TARGET"]` during test execution? I kind of liked the decorator style `@only_localstack` - makes the condition a bit more explicit. Looks like `skipif` also allows to specify a condition string, e.g. `pytest.mark.skipif('os.environ.get("TEST_TARGET") == "AWS_CLOUD"')` - could that be an option? (not sure if that gets lazily evaluated at runtime right before the execution of the annotated test method starts, though..)
secretsmanager_client.delete_secret(SecretId=item)
+only_localstack = pytest.mark.skipif(
+ os.environ.get("TEST_TARGET") == "AWS_CLOUD",
+ reason="test only applicable if run against localstack",
+)
@pytest.fixture |
codereview_python_data_10775 | pokemon.get('individual_stamina', 0)
]})
matched_pokemon.append(pokemon)
- #remove as egg and add as pokemon
- inventory.pokemons().remove(pokemon['id'])
- inventory.pokemons().add(inventory.Pokemon(pokemon))
continue
if "player_stats" in inv_data:
self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0)
@DeXtroTip it's better for 184 to go inside _apply_incubators (line 99), ie. once you get a reply that you successfully use an incubator on an egg, you remove it from the inventory. what do you think ?
pokemon.get('individual_stamina', 0)
]})
matched_pokemon.append(pokemon)
continue
if "player_stats" in inv_data:
self.km_walked = inv_data.get("player_stats", {}).get("km_walked", 0) |
codereview_python_data_10787 | rel_dim = relation_dim * entity_dim
else:
rel_dim = relation_dim
- self.relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu() if args.mix_cpu_gpu and args.num_proc > 1 else device)
if model_name == 'TransE':
self.score_func = TransEScore(gamma)
should it be "or"? If it's mixed training, the external embedding is on CPU.
rel_dim = relation_dim * entity_dim
else:
rel_dim = relation_dim
+ self.relation_emb = ExternalEmbedding(args, n_relations, rel_dim, F.cpu() if args.mix_cpu_gpu else device)
if model_name == 'TransE':
self.score_func = TransEScore(gamma) |
codereview_python_data_10789 | ###############################################################################
# Working with multigraphs
# ~~~~~~~~~~~~~~~~~~~~~~~~
-# Many graph applications need parallel edges. By default class:`DGLGraph`
-# is already a multigraph
g_multi = dgl.DGLGraph()
g_multi.add_nodes(10)
Many graph applications need parallel edges, which class:`DGLGraph` supports by default.
###############################################################################
# Working with multigraphs
# ~~~~~~~~~~~~~~~~~~~~~~~~
+# Many graph applications need parallel edges,
+# which class:DGLGraph supports by default.
g_multi = dgl.DGLGraph()
g_multi.add_nodes(10) |
codereview_python_data_10796 | subsample_for_bin=50000, objective="regression",
min_split_gain=0, min_child_weight=5, min_child_samples=10,
subsample=1, subsample_freq=1, colsample_bytree=1,
- reg_alpha=0, reg_lambda=0, scale_pos_weight=1,
- is_unbalance=False, seed=0, nthread=-1, silent=True,
- sigmoid=1.0, huber_delta=1.0, max_position=20, label_gain=None,
drop_rate=0.1, skip_drop=0.5, max_drop=50,
uniform_drop=False, xgboost_dart_mode=False):
super(LGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves,
remove scale_pos_weight, is_unbalance, sigmoid, max_position and label_gain, they are not available for regression task.
subsample_for_bin=50000, objective="regression",
min_split_gain=0, min_child_weight=5, min_child_samples=10,
subsample=1, subsample_freq=1, colsample_bytree=1,
+ reg_alpha=0, reg_lambda=0,
+ seed=0, nthread=-1, silent=True,
+ huber_delta=1.0,
drop_rate=0.1, skip_drop=0.5, max_drop=50,
uniform_drop=False, xgboost_dart_mode=False):
super(LGBMRegressor, self).__init__(boosting_type=boosting_type, num_leaves=num_leaves, |
codereview_python_data_10816 | # now superseded by Tm_NN.
warnings.warn(
- "Tm_staluc may be depreciated in the future. Use Tm_NN instead.",
BiopythonDeprecationWarning,
)
if not rna:
If you're actually deprecating it, the message needs changing too.
# now superseded by Tm_NN.
warnings.warn(
+ "Tm_staluc is deprecated; please use Tm_NN instead.",
BiopythonDeprecationWarning,
)
if not rna: |
codereview_python_data_10821 | """Mark all notifications of the currently logged in user as read"""
request.user.notifications.mark_all_as_read()
- utm_source = request.GET.get("utm_source")
- if utm_source == "pontoon-addon-automation":
- log_ux_action(
- action_type="mark_all_notifications_as_read",
- experiment="Notifications 1.0",
- )
return JsonResponse({"status": True})
I think we should collect this ux action on all calls, and add `utm_source` as data.
"""Mark all notifications of the currently logged in user as read"""
request.user.notifications.mark_all_as_read()
+ log_ux_action(
+ action_type="mark_all_notifications_as_read",
+ experiment="Notifications 1.0",
+ data={"utm_source": request.GET.get("utm_source"),},
+ )
return JsonResponse({"status": True}) |
codereview_python_data_10826 | return False
-def is_outside_bio(name):
- name = name.split(".")
- if "Bio" in name[0]:
- return False
- return True
-
-
# The default verbosity (not verbose)
VERBOSITY = 0
I'd use ``name[0].startswith("Bio")`` or maybe even ``name[0] in ["Bio", "BioSQL"]`` to be explicit?
return False
# The default verbosity (not verbose)
VERBOSITY = 0 |
codereview_python_data_10833 | with_label=True,
test_mode=False,
extra_aug=None,
- keep_ratio_rescale=True,
- dataset_scale_factor=1.):
- # in test mode or not
- self.test_mode = test_mode
- # if scale the base dataset
- self.dataset_scale_factor = dataset_scale_factor
# load annotations (and proposals)
self.img_infos = self.load_annotations(ann_file)
if proposal_file is not None:
Implement this function as a wrapper dataset `RepeatDataset`.
with_label=True,
test_mode=False,
extra_aug=None,
+ keep_ratio_rescale=True):
# load annotations (and proposals)
self.img_infos = self.load_annotations(ann_file)
if proposal_file is not None: |
codereview_python_data_10837 | @wraps(func)
def decorator(*args, **kwargs):
from listenbrainz.webserver.errors import APIServiceUnavailable
- if not current_app.config["SQLALCHEMY_TIMESCALE_URI"]:
raise APIServiceUnavailable("The listen database is momentarily offline. " +
"Please wait a few minutes and try again.")
return func(*args, **kwargs)
we don't need to make this change for today's release, but I think that this check would be better to verify if the timescale listenstore is None instead of looking at the config option. that way we can support bringing up LB with these error messages even if there is a URI set but the host is down (for example). I'll open a ticket after the review.
@wraps(func)
def decorator(*args, **kwargs):
from listenbrainz.webserver.errors import APIServiceUnavailable
+ if timescale_connection._ts is None:
raise APIServiceUnavailable("The listen database is momentarily offline. " +
"Please wait a few minutes and try again.")
return func(*args, **kwargs) |
codereview_python_data_10850 | def join(self, other):
"""Return a merge of the sequences in other, spaced by the sequence from self.
- Accepts all Seq objects and Strings as objects to be concatinated with the spacer
>>> spacer = MutableSeq('NNNNN')
- >>> seqlist = list([Seq("AAA"),Seq("TTT"),Seq("PPP")])
>>> concatenated = spacer.join(seqlist)
>>> concatenated
MutableSeq('AAANNNNNTTTNNNNNPPP')
Spaces after the commas to follow PEP8 style please. Also there is no reason to have ``list(...)`` here.
def join(self, other):
"""Return a merge of the sequences in other, spaced by the sequence from self.
+ Accepts all Seq objects and Strings as objects to be concatenated with the spacer
>>> spacer = MutableSeq('NNNNN')
+ >>> seqlist = [Seq("AAA"), Seq("TTT"), Seq("PPP")]
>>> concatenated = spacer.join(seqlist)
>>> concatenated
MutableSeq('AAANNNNNTTTNNNNNPPP') |
codereview_python_data_10856 | response.headers['Content-Length'] = str(len(response._content))
-# Replace localstack account id by moto account id
-def fix_account_id(resource_arn):
- search = r'arn{col}aws{col}([^:%]+){col}([^:%]*){col}{acc}{col}'.format(col=':', acc=TEST_AWS_ACCOUNT_ID)
- replace = r'arn{col}aws{col}\1{col}\2{col}{acc}{col}'.format(col=':', acc=MOTO_CLOUDFORMATION_ACCOUNT_ID)
- return re.sub(search, replace, resource_arn)
-
-
class ProxyListenerCloudFormation(ProxyListener):
def forward_request(self, method, path, data, headers):
if method == 'OPTIONS':
Looks like this function is duplicating functionality that we already have elsewhere. Can we please use `fix_account_id_in_arns()` from `aws_stack.py`, to avoid duplication. We can call it from line 226 as: ``` req_data['ChangeSetName'] = fix_account_id_in_arns(req_data['ChangeSetName'], existing=TEST_AWS_ACCOUNT_ID, replace=MOTO_CLOUDFORMATION_ACCOUNT_ID) ```
response.headers['Content-Length'] = str(len(response._content))
class ProxyListenerCloudFormation(ProxyListener):
def forward_request(self, method, path, data, headers):
if method == 'OPTIONS': |
codereview_python_data_10860 | LOGGER.debug('Sending inventory summary by email.')
email_summary_config = (
- self.notifier_config.get('inventory').get('email_summary')
- )
try:
if self.notifier_config.get('email_connector'):
Move this parenthesis back to the previous line.
LOGGER.debug('Sending inventory summary by email.')
email_summary_config = (
+ self.notifier_config.get('inventory').get('email_summary'))
try:
if self.notifier_config.get('email_connector'): |
codereview_python_data_10861 | """
# This copes with mixed strand features & all on reverse:
parts = [loc.extract(parent_sequence) for loc in self.parts]
- f_seq = "".join(parts)
return f_seq
This returns a string, not a Seq. How about: ```python return Seq("").join(parts) ```
"""
# This copes with mixed strand features & all on reverse:
parts = [loc.extract(parent_sequence) for loc in self.parts]
+ f_seq = Seq("").join(parts)
return f_seq |
codereview_python_data_10865 | return _callback
-def record_evaluation(eval_result: dict) -> CallbackWithAttributes:
"""Create a callback that records the evaluation history into ``eval_result``.
Parameters
```suggestion def record_evaluation(eval_result: Dict[str, Dict[str, List[Any]]]) -> CallbackWithAttributes: ``` I think this could be a bit more specific, and being more specific will get us more informative errors from `mypy`.
return _callback
+def record_evaluation(eval_result: Dict[str, Dict[str, List[Any]]]) -> CallbackWithAttributes:
"""Create a callback that records the evaluation history into ``eval_result``.
Parameters |
codereview_python_data_10866 | # BSD License
import networkx as nx
from networkx.utils import reverse_cuthill_mckee_ordering
-try:
- import numpy as np
-except ImportError:
- raise ImportError('rcm requires NumPy ', 'http://scipy.org/')
# build low-bandwidth numpy matrix
G = nx.grid_2d_graph(3, 3)
For the examples, I would just import things and let them fail. I don't feel strongly about this, but it seems like the examples should be what people normally would write in a script and I don't think the should normally try to import.
# BSD License
import networkx as nx
from networkx.utils import reverse_cuthill_mckee_ordering
+import numpy as np
# build low-bandwidth numpy matrix
G = nx.grid_2d_graph(3, 3) |
codereview_python_data_10872 | def find_cpp_operation_type(self, operator, operand_types=None):
if operand_types is not None:
- operands = operand_types # note that the first argument is not
- # necessarily "self" (non-member operators) so complete types
- # must be specified
else:
operands = [self]
# pos == None => no errors
```suggestion # Note that the first argument is not necessarily "self" # (with non-member operators), so complete types # must be specified. operands = operand_types ```
def find_cpp_operation_type(self, operator, operand_types=None):
if operand_types is not None:
+ # Note that the first argument is not necessarily "self"
+ # (with non-member operators), so complete types
+ # must be specified.
+ operands = operand_types
else:
operands = [self]
# pos == None => no errors |
codereview_python_data_10880 | >>> target = LocalTarget('~/some_file.txt')
>>> target = LocalTarget(pathlib.Path('~') / 'some_file.txt')
- >>> target.exists()
False
"""
This kind of docs will try to execute (and verify that you get `False`). Hence why Travis turns red. Please change the doc style to the non executing or make it side effect free. Then we can merge this. :)
>>> target = LocalTarget('~/some_file.txt')
>>> target = LocalTarget(pathlib.Path('~') / 'some_file.txt')
+ >>> LocalTarget('lol.txt').exists()
False
""" |
codereview_python_data_10886 | if average_size is not None:
from hypothesis._settings import note_deprecation
note_deprecation(
- 'The average_size argument has been disabled, is deprecated, '
- 'and will be removed in a future version. Upgrades since '
- 'Hypothesis 1.x mean we can generate useful data without this '
- 'hint. Please open an issue if the default distribution of '
- 'examples does not work for your tests.'
)
check_valid_size(min_size, 'min_size')
It might be worth making this message slightly more explicit about what the user should do: e.g. "You should remove this argument as it no longer has any effect"
if average_size is not None:
from hypothesis._settings import note_deprecation
note_deprecation(
+ 'You should remove the average_size argument, because it is '
+ 'deprecated and no longer has any effect. Please open an issue '
+ 'if the default distribution of examples does not work for you.'
)
check_valid_size(min_size, 'min_size') |
codereview_python_data_10888 | """
if len(pos_args) != 1:
return node
- # TODO Doubt: What's the case for it
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode):
This transforms something like `frozenset([])` into `frozenset()`. It just avoids building an empty list/tuple/whatever.
"""
if len(pos_args) != 1:
return node
+ # If args is an empty sequence, remove it
if pos_args[0].is_sequence_constructor and not pos_args[0].args:
del pos_args[0]
elif isinstance(pos_args[0], ExprNodes.ListNode): |
codereview_python_data_10894 | model_name (str): Model name.
progress_queue (Queue): Progress queue.
"""
- # pylint: disable=broad-except
try:
self.scanner.run(model_name,
progress_queue,
self.service_config)
- except Exception as e:
LOGGER.error(e)
progress_queue.put('Error occurred during the scanning process.')
progress_queue.put(None)
Move this pylint disable to the same line as the except, so that this is not propagated down through this whole file. Fix above too. ``` except Exception as e: # pylint: disable=broad-except ```
model_name (str): Model name.
progress_queue (Queue): Progress queue.
"""
try:
self.scanner.run(model_name,
progress_queue,
self.service_config)
+ except Exception as e: # pylint: disable=broad-except
LOGGER.error(e)
progress_queue.put('Error occurred during the scanning process.')
progress_queue.put(None) |
codereview_python_data_10897 | learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
def fit_fc(learn:Learner, tot_epochs:int=None, lr:float=defaults.lr, moms:Tuple[float,float]=(0.95,0.85), start_pct:float=0.72,
- wd:float=None, callbacks:Optional[CallbackList]=None, show_curve:bool=False)->None:
"Fit a model with Flat Cosine Annealing"
max_lr = learn.lr_range(lr)
callbacks = listify(callbacks)
Some of the arguments here are not used (`show_curve` for instance)
learn.fit(cyc_len, max_lr, wd=wd, callbacks=callbacks)
def fit_fc(learn:Learner, tot_epochs:int=None, lr:float=defaults.lr, moms:Tuple[float,float]=(0.95,0.85), start_pct:float=0.72,
+ callbacks:Optional[CallbackList]=None)->None:
"Fit a model with Flat Cosine Annealing"
max_lr = learn.lr_range(lr)
callbacks = listify(callbacks) |
codereview_python_data_10903 | # needed.
return
- with open(self.suppress_file, 'r',
- encoding='utf-8', errors='ignore') as file_handle:
self.__suppress_info = suppress_file_handler.\
get_suppress_data(file_handle)
What kind of exception do we catch here?
# needed.
return
+ with open(self.suppress_file, 'r') as file_handle:
self.__suppress_info = suppress_file_handler.\
get_suppress_data(file_handle) |
codereview_python_data_10906 | self.data = (x, y, text, direction, points, arrowstyle)
- return self.__class__(*args, **settings)
-
def dimension_values(self, dimension, expanded=True, flat=True):
index = self.get_dimension_index(dimension)
if index == 0:
Maybe I'm confused by the diff but shouldn't this last line be removed as well?
self.data = (x, y, text, direction, points, arrowstyle)
def dimension_values(self, dimension, expanded=True, flat=True):
index = self.get_dimension_index(dimension)
if index == 0: |
codereview_python_data_10910 | weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
- Group/query data, only used for ranking task.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10]``, that means that you have 5 groups, where the first 10 records are in the first group, records 11-30 are in the second group, etc.
Repeated info. ```suggestion Group/query data. Only used in the learning-to-rank task. ```
weight : list, numpy 1-D array, pandas Series or None, optional (default=None)
Weight for each instance.
group : list, numpy 1-D array, pandas Series or None, optional (default=None)
+ Group/query data.
Only used in the learning-to-rank task.
sum(group) = n_samples.
For example, if you have a 100-document dataset with ``group = [10, 20, 40, 10, 10]``, that means that you have 5 groups, where the first 10 records are in the first group, records 11-30 are in the second group, etc. |
codereview_python_data_10913 | -# This code is part of the Biopython distribution and governed by its
-# license. Please see the LICENSE file that should have been included
-# as part of this package.
"""Provide code to access the EBI tools."""
Can we have all of ``Bio.EBI`` dual licenced from the start please? See #898 and our current wording as used in ``Bio/SeqRecord.py``, ```python # This file is part of the Biopython distribution and governed by your # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. ```
+# Copyright 2017 by Berenice Batut (berenice.batut@gmail.com). All rights reserved.
+# Revision copyright 2017 by Francesco Gastaldello. All rights reserved.
+#
+# This file is part of the Biopython distribution and governed by your
+# choice of the "Biopython License Agreement" or the "BSD 3-Clause License".
+# Please see the LICENSE file that should have been included as part of this
+# package.
"""Provide code to access the EBI tools.""" |
codereview_python_data_10914 | self.assertEqual(len(alignments), 4)
self.assertEqual(len(alignments[0]), 2)
for a in alignments:
- self.assertEqual((len(a), a.get_alignment_length()))
for r in a:
print("%s %s %i" % (r.seq, r.id, r.annotations["original_length"]))
# print(a.annotations)
Too many brackets - you are passing a single argument (a tuple of 2 elements) to the ``assertEqual`` method. Try: ``` self.assertEqual(len(a), a.get_alignment_length()) ``` This is why TravisCI is currently failing, ``` ====================================================================== ERROR: test_example (test_AlignIO_FastaIO_unittest.TestSelf) ---------------------------------------------------------------------- Traceback (most recent call last): File "/home/travis/build/biopython/biopython/Tests/test_AlignIO_FastaIO_unittest.py", line 231, in test_example self.assertEqual((len(a), a.get_alignment_length())) TypeError: assertEqual() missing 1 required positional argument: 'second' ```
self.assertEqual(len(alignments), 4)
self.assertEqual(len(alignments[0]), 2)
for a in alignments:
+ self.assertEqual(len(a), a.get_alignment_length())
for r in a:
print("%s %s %i" % (r.seq, r.id, r.annotations["original_length"]))
# print(a.annotations) |
codereview_python_data_10915 | from webserver.redis_connection import _redis
import db.user
-#TODO
-# Add rate limit support to scraper
-
# Using _ and not - here so I can re-use these keys for use in the g object
RATELIMIT_PER_TOKEN_KEY = "rate_limit_per_token_limit"
RATELIMIT_PER_IP_KEY = "rate_limit_per_ip_limit"
Is that done?
from webserver.redis_connection import _redis
import db.user
# Using _ and not - here so I can re-use these keys for use in the g object
RATELIMIT_PER_TOKEN_KEY = "rate_limit_per_token_limit"
RATELIMIT_PER_IP_KEY = "rate_limit_per_ip_limit" |
codereview_python_data_10929 | """
suggested_filename = utils.sanitize_filename(suggested_filename)
- if url.toDisplayString()[:7] == "file://":
- return "FILE"
-
q = usertypes.Question(parent)
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format(
This "magic" value seems quite odd. Why not handle this in `handle_download` before even calling `get_filename_question`?
"""
suggested_filename = utils.sanitize_filename(suggested_filename)
q = usertypes.Question(parent)
q.title = "Save file to:"
q.text = "Please enter a location for <b>{}</b>".format( |
codereview_python_data_10930 | def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
- use the more explicit get() method instead. Caution: operation is O(n),
- not O(1)."""
return self._find_no_duplicates(name)
Switch `Caution:` to `.. warning::` here too?
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
+ use the more explicit get() method instead.
+
+ .. warning:: operation is O(n), not O(1)."""
return self._find_no_duplicates(name) |
codereview_python_data_10932 | .. versionchanged:: 0.11.0
Keyword *suppress_progmet* and *progress_meter_freq* were removed.
'''
- try:
- from scipy import sparse
- except ImportError:
- print("scipy.sparse cannot be imported")
- return
-
if returntype == "numpy":
adj = (distance_array(coord, coord, box=box) < cutoff)
This can be put in the sparse return branch, and it needs to raise a proper ImportError rather than returning nothing
.. versionchanged:: 0.11.0
Keyword *suppress_progmet* and *progress_meter_freq* were removed.
'''
+
+ from scipy import sparse
if returntype == "numpy":
adj = (distance_array(coord, coord, box=box) < cutoff) |
codereview_python_data_10935 | asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
- asset = self.equity_for_id(sid)
elif asset_type == 'future':
- asset = self.futures_contract_for_id(sid)
else:
asset = None
Do we have any contexts in-which this is False? We have it as True by default. The variable itself may just be a crufty artifact.
asset_type = self.asset_type_by_sid(sid)
if asset_type == 'equity':
+ asset = self._retrieve_equity(sid)
elif asset_type == 'future':
+ asset = self._retrieve_futures_contract(sid)
else:
asset = None |
codereview_python_data_10939 | lgb_train,
num_boost_round=100,
valid_sets=lgb_eval,
- # you can use a list to represent multiple valid_datas/valid_names
- # don't use tuple, tuple is used to represent one dataset
early_stopping_rounds=10)
# save model to file
you can delete this line too
lgb_train,
num_boost_round=100,
valid_sets=lgb_eval,
early_stopping_rounds=10)
# save model to file |
codereview_python_data_10950 | self.dbutils.execute(self.cursor, sql, args)
def executemany(self, sql, args):
- """Just execute an sql command.
"""
if os.name == "java":
sql = sql.replace("%s", "?")
Many commands? e.g. `Execute many SQL commands.`
self.dbutils.execute(self.cursor, sql, args)
def executemany(self, sql, args):
+ """Execute many sql commands.
"""
if os.name == "java":
sql = sql.replace("%s", "?") |
codereview_python_data_10951 | def get_pubkey_from_prv(self, file_name):
if not os.path.exists(file_name):
- raise IOError("File not found: {0}".format(file_name))
else:
cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd,
file_name)
Not the right way to raise IOError. While this sets the message, it doesn't set the errno member of the exception instance, it doesn't set the filename, and it puts too much information in strerror. If you have to do this, this is the right way to do it: ``` raise IOError(errno.ENOENT, "File not found", file_name) ```
def get_pubkey_from_prv(self, file_name):
if not os.path.exists(file_name):
+ raise IOError(errno.ENOENT, "File not found", file_name)
else:
cmd = "{0} rsa -in {1} -pubout 2>/dev/null".format(self.openssl_cmd,
file_name) |
codereview_python_data_10955 | comm_id=plot.comm.id,
plot_id=plot_id)
js = '\n'.join([js, comm_js])
- element_id = plot_id
- html = "<div id='%s' style='display: table; margin: 0 auto;'>%s</div>" % (element_id, html)
if not os.environ.get('HV_DOC_HTML', False) and js is not None:
- js = embed_js.format(element_id=element_id, plot_id=plot_id, html=html) + js
data['text/html'] = html
if js:
Why define ``element_id`` if you can just replace this with ``plot.id`` (or ``plot_id``)?
comm_id=plot.comm.id,
plot_id=plot_id)
js = '\n'.join([js, comm_js])
+ html = "<div id='%s' style='display: table; margin: 0 auto;'>%s</div>" % (plot_id, html)
if not os.environ.get('HV_DOC_HTML', False) and js is not None:
+ js = embed_js.format(widget_id=widget_id, plot_id=plot_id, html=html) + js
data['text/html'] = html
if js: |
codereview_python_data_10960 | rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
- rpn_losses = self.rpn_head.loss(*rpn_loss_inputs)
losses.update(rpn_losses)
proposal_inputs = rpn_outs + (img_meta, self.test_cfg.rpn)
Could you show your consideration for this modification?
rpn_outs = self.rpn_head(x)
rpn_loss_inputs = rpn_outs + (gt_bboxes, img_meta,
self.train_cfg.rpn)
+ rpn_losses = self.rpn_head.loss(
+ *rpn_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses.update(rpn_losses)
proposal_inputs = rpn_outs + (img_meta, self.test_cfg.rpn) |
codereview_python_data_10961 | def populate_indices(self):
"""Populate keywords for fast test case list searching."""
- self.keywords = list(search_tokenizer.tokenize(self.name))
def _pre_put_hook(self):
"""Pre-put hook."""
actually, let's make it so that we can search by project as well.
def populate_indices(self):
"""Populate keywords for fast test case list searching."""
+ self.keywords = list(
+ search_tokenizer.tokenize(self.name)
+ | search_tokenizer.tokenize(self.project))
def _pre_put_hook(self):
"""Pre-put hook.""" |
codereview_python_data_10962 | update_state = 'Off' if state == 'On' else 'On'
print(f"Setting control state to '{update_state}'.")
response = update_routing_control_state(routing_control_arn, cluster_endpoints, update_state)
- status = response.get('ResponseMetadata', {}).get('HTTPStatusCode', 'Unknown')
- if status == 200:
print('Success!')
else:
- print(f'Something went wrong. Status: {status}.')
print('-'*88)
# snippet-end:[python.example_code.route53-recovery-cluster.Scenario_SetControlState]
We can just update it to be as follows: `if response: print("Success") else: print("Error")`
update_state = 'Off' if state == 'On' else 'On'
print(f"Setting control state to '{update_state}'.")
response = update_routing_control_state(routing_control_arn, cluster_endpoints, update_state)
+ if response:
print('Success!')
else:
+ print(f'Something went wrong.')
print('-'*88)
# snippet-end:[python.example_code.route53-recovery-cluster.Scenario_SetControlState] |
codereview_python_data_10968 | props.merge(scenario.get("properties"))
props.merge(self.execution.get("properties"))
- junit_version = str(self.settings.get("junit-version", "5"))
if junit_version == "5":
props.merge({"junit_version": 5})
default should be JUnit 4
props.merge(scenario.get("properties"))
props.merge(self.execution.get("properties"))
+ junit_version = str(self.settings.get("junit-version", "4"))
if junit_version == "5":
props.merge({"junit_version": 5}) |
codereview_python_data_10974 | return mda.Universe(GSD)
def test_gsd_positions(GSD_U):
- assert_almost_equal(TXYZ_U.atoms.positions[0],
- [-5.40, -10.2, 10.2])
I'll try to figure out how to do this based on other examples in the testsuite
return mda.Universe(GSD)
def test_gsd_positions(GSD_U):
+ # first frame first particle
+ ts = GSD_U.trajectory[0]
+ assert_almost_equal(GSD_U.atoms.positions[0],
+ [ -5.4000001 , -10.19999981, -10.19999981])
+ # second frame first particle
+ ts = GSD_U.trajectory[1]
+ assert_almost_equal(GSD_U.atoms.positions[0],
+ [ -5.58348083, -9.98546982, -10.17657185]) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.