id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_6013 | if status is 1:
self.api.get_inventory()
response_dict = self.api.call()
- if self.config.smart_catch:
id_cp_tuples = self.get_id_cp_tuples_for_pokemonid(pokemon['pokemon_data']['pokemon_id'],response_dict)
prev_id, prev_cp = (0,0)
for id_cp in id_cp_tuples:
I remember there's another function to get_inventory(), right?
if status is 1:
self.api.get_inventory()
response_dict = self.api.call()
+ if self.config.cp == "smart":
id_cp_tuples = self.get_id_cp_tuples_for_pokemonid(pokemon['pokemon_data']['pokemon_id'],response_dict)
prev_id, prev_cp = (0,0)
for id_cp in id_cp_tuples: |
codereview_python_data_6017 | """
minimum_size = self.minimumTabSizeHint(index)
height = config.get('tabs', 'tabbar-size')
if self.vertical:
confwidth = str(config.get('tabs', 'width'))
if confwidth.endswith('%'):
I think the setting should still have a value like `auto` by default, which then here still uses the font height - otherwise it'll be confusing for people when they change the font size and the font is just cut off (or worse - they already have a bigger font size, and suddenly the bar is smaller). If you need help with figuring out how to add a new config type for that (i.e. `auto` or an integer >= 8), please let me know and I'll help!
"""
minimum_size = self.minimumTabSizeHint(index)
height = config.get('tabs', 'tabbar-size')
+ if height == -1:
+ height = self.fontMetrics().height()
if self.vertical:
confwidth = str(config.get('tabs', 'width'))
if confwidth.endswith('%'): |
codereview_python_data_6024 | # Except we do want to notify on a multipart upload completion, which does use a query.
elif method == 'POST' and query.startswith('uploadId'):
return True
- else:
- return False
# instantiate listener
nitpick: `else` technically not needed
# Except we do want to notify on a multipart upload completion, which does use a query.
elif method == 'POST' and query.startswith('uploadId'):
return True
# instantiate listener |
codereview_python_data_6027 | config = copy.deepcopy(self.engine.config)
- for key in ("cli", "cli-aliases"):
- if key in config:
- del config[key]
-
provisioning = config.get(Provisioning.PROV)
self._filter_unused_modules(config, provisioning)
why not use existing blacklist config?
config = copy.deepcopy(self.engine.config)
provisioning = config.get(Provisioning.PROV)
self._filter_unused_modules(config, provisioning) |
codereview_python_data_6028 | ann_id = self.coco.get_ann_ids(img_ids=[i])
ann_ids.extend(ann_id)
assert len(set(ann_ids)) == len(
- ann_ids), "Annotation ids in '{}' are not unique!".format(ann_file)
return data_infos
def get_ann_info(self, idx):
use f string instead
ann_id = self.coco.get_ann_ids(img_ids=[i])
ann_ids.extend(ann_id)
assert len(set(ann_ids)) == len(
+ ann_ids), f"Annotation ids in '{ann_file}' are not unique!"
return data_infos
def get_ann_info(self, idx): |
codereview_python_data_6029 | self.assertTrue(os.path.exists(path))
JMeterExecutor.JMETER_DOWNLOAD_LINK = jmeter_link
JMeterExecutor.PLUGINS_DOWNLOAD_TPL = plugins_link
JMeterExecutor.JMETER_VER = jmeter_ver
This was intentional, to check that it does not install it again. And to cover other branches of the code.
self.assertTrue(os.path.exists(path))
+ obj = JMeterExecutor()
+ obj.engine = EngineEmul()
+ obj.settings.merge({"path": path})
+
+ obj.execution = BetterDict()
+ obj.execution.merge({"scenario": {"requests": ["http://localhost"]}})
+
+ obj.prepare()
+
JMeterExecutor.JMETER_DOWNLOAD_LINK = jmeter_link
JMeterExecutor.PLUGINS_DOWNLOAD_TPL = plugins_link
JMeterExecutor.JMETER_VER = jmeter_ver |
codereview_python_data_6031 | num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
- labels = anchors.new_zeros(
- num_valid_anchors, dtype=torch.long) + background_label
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds
We could change it to `labels = anchors.new_empty(num_valid_anchors, dtype=torch.long).fill_(background_label)`.
num_valid_anchors = anchors.shape[0]
bbox_targets = torch.zeros_like(anchors)
bbox_weights = torch.zeros_like(anchors)
+ labels = anchors.new_empty(
+ num_valid_anchors, dtype=torch.long).fill_(background_label)
label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float)
pos_inds = sampling_result.pos_inds |
codereview_python_data_6032 | BACKBONES = Registry('backbone')
NECKS = Registry('neck')
-UPPER_NECKS = Registry('upper_neck')
ROI_EXTRACTORS = Registry('roi_extractor')
HEADS = Registry('head')
DETECTORS = Registry('detector')
All `upper_neck` can be renamed to `shared_head`, which may be more clear.
BACKBONES = Registry('backbone')
NECKS = Registry('neck')
+SHARED_HEADS = Registry('shared_head')
ROI_EXTRACTORS = Registry('roi_extractor')
HEADS = Registry('head')
DETECTORS = Registry('detector') |
codereview_python_data_6034 | schema_seq = int(tar.extractfile(member).read().strip())
if schema_seq != LISTENS_DUMP_SCHEMA_VERSION:
raise SchemaMismatchException('Incorrect schema version! Expected: %d, got: %d.'
- 'Please, get the latest version of the dump.'
% (LISTENS_DUMP_SCHEMA_VERSION, schema_seq))
elif file_name.endswith('.listens'):
A better message might be: "Please ensure that the data dump version matches the code version in order to import the data"
schema_seq = int(tar.extractfile(member).read().strip())
if schema_seq != LISTENS_DUMP_SCHEMA_VERSION:
raise SchemaMismatchException('Incorrect schema version! Expected: %d, got: %d.'
+ 'Please ensure that the data dump version matches the code version'
+ 'in order to import the data.'
% (LISTENS_DUMP_SCHEMA_VERSION, schema_seq))
elif file_name.endswith('.listens'): |
codereview_python_data_6040 | except StopIteration:
raise ValueError("Problem in misc lines before sequence")
-
-if __name__ == "__main__":
- from Bio._utils import run_doctest
- run_doctest(verbose=0)
Are there any doctests in this file? If not, there is no reason to having these three lines of code.
except StopIteration:
raise ValueError("Problem in misc lines before sequence")
+ |
codereview_python_data_6042 | def search(text, output_format="tab", sort="score", oragnism="", columns=(),
isoform=False, compress=False, offset=0, limit=0):
"""Perform a query over the UniProt API.
-
More at: https://www.uniprot.org/help/api_queries
"""
cgi = "https://www.uniprot.org/uniprot/?"
We should probably provide a little more help here, specifically that columns should be a list/tuple of strings, and perhaps that isoform and compress are treated as booleans, with offset and limit expected to be integers.
def search(text, output_format="tab", sort="score", oragnism="", columns=(),
isoform=False, compress=False, offset=0, limit=0):
"""Perform a query over the UniProt API.
+
+ output_format: options:
+ html, tab, xls, fasta, gff, txt, xml, rdf, list, rss
+ columns: list/tuple of strings from the following options:
+ citation, clusters, comments, domains, domain, ec, id, entry name,
+ existence, families, features, genes, go, go-id, interactor,7
+ keywords, last-modified, length, organism, organism-id, pathway,
+ protein names, reviewed, sequence, 3d, version, virus hosts
More at: https://www.uniprot.org/help/api_queries
"""
cgi = "https://www.uniprot.org/uniprot/?" |
codereview_python_data_6045 | build_revision = fuzzer_utils.get_build_revision()
job = environment.get_value('JOB_NAME')
# fuzzer name is filled by fuzz_task.
- testcase_run = fuzzer_stats.TestcaseRun('', job, build_revision,
current_timestamp())
testcase_run['command'] = fuzzer_command
Can we use None instead of ''
build_revision = fuzzer_utils.get_build_revision()
job = environment.get_value('JOB_NAME')
# fuzzer name is filled by fuzz_task.
+ testcase_run = fuzzer_stats.TestcaseRun(None, job, build_revision,
current_timestamp())
testcase_run['command'] = fuzzer_command |
codereview_python_data_6047 | async def upload(self, request):
try:
reader = await request.multipart()
- exfil_dir = await self.create_unique_exfil_sub_directory()
while True:
field = await reader.next()
if not field:
good idea. changes: - make this a private function (_) - put this function under a docstring called """ PRIVATE """ (look at the bottom of the operation_svc.py for an example of where the private functions are stored)
async def upload(self, request):
try:
reader = await request.multipart()
+ exfil_dir = await self._create_unique_exfil_sub_directory()
while True:
field = await reader.next()
if not field: |
codereview_python_data_6050 | Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
- # NOTE the batched image size information may be useful, e.g.
- # in DETR, this is needed for the construction of masks, which is
- # then used for the transformer_head.
- input_img_shape = tuple(img.size()[-2:])
- for img_meta in img_metas:
- img_meta['input_img_shape'] = input_img_shape
-
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
Are these modification duplicate? Or should we move it into base detector.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
+ super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore) |
codereview_python_data_6052 | self._lines = None
def get_stock_info(self, product, options):
- "Hook for implementing strategies that depend on product options"
return self.strategy.fetch_for_product(product)
def add_product(self, product, quantity=1, options=None):
Let's properly decorate docstring with three double quotes.
self._lines = None
def get_stock_info(self, product, options):
+ """
+ Hook for implementing strategies that depend on product options
+ """
+ # The built-in strategies don't use options, so initially disregard
+ # them.
return self.strategy.fetch_for_product(product)
def add_product(self, product, quantity=1, options=None): |
codereview_python_data_6057 | super().__init__(database_manager)
self.write_req_validator = write_req_validator
- def additional_dynamic_validation(self, request: Request, req_pp_time: Optional[int]):
- pass
-
def authorize(self, request):
self.write_req_validator.validate(request,
[AuthActionEdit(txn_type=LEDGERS_FREEZE,
Maybe call super implementation, or even better omit this override?
super().__init__(database_manager)
self.write_req_validator = write_req_validator
def authorize(self, request):
self.write_req_validator.validate(request,
[AuthActionEdit(txn_type=LEDGERS_FREEZE, |
codereview_python_data_6058 | bool: Whether this action is a subset of the other action.
"""
return (self.action == other.action and
- (other.applies_to_all or
- self.any_value or
- other.any_value or not
other.expanded_rules or
all([
self.ports_are_subset(
I recommend keeping the order the same, so you always check self.any_value then other.any_value then (self|other).applies_to_all, then the expanded rules checks.
bool: Whether this action is a subset of the other action.
"""
return (self.action == other.action and
+ (self.any_value or
+ other.any_value or
+ other.applies_to_all or not
other.expanded_rules or
all([
self.ports_are_subset( |
codereview_python_data_6059 | else:
self[self.SUCCESSES] += 1
- self[self.RESP_TIMES][r_time] += 1
- self[self.RESP_TIMES_HDR].record_value(int(round(r_time, 3) * 1000))
if byte_count is not None:
self[self.BYTE_COUNT] += byte_count
multiply before rounding?
else:
self[self.SUCCESSES] += 1
+ self[self.RESP_TIMES_HDR].record_value(int(round(r_time * 1000, 3)))
if byte_count is not None:
self[self.BYTE_COUNT] += byte_count |
codereview_python_data_6061 | ):
try:
val = configuration.find(attrname)
- vals = [mapper(el) for el in val.text.strip().split('\n')]
except:
pass
else:
Wow that fixed it? What was wrong with splitting on any white space? I thought the problem was not doing `.split()[1:]`
):
try:
val = configuration.find(attrname)
+ vals = [mapper(el) for el in val.text.strip().split()]
except:
pass
else: |
codereview_python_data_6062 | # Internal Cell
@Normalize
def setups(self, to:Tabular):
- store_attr(means=dict(getattr(to, 'train', to).conts.mean()),
- stds=dict(getattr(to, 'train', to).conts.std(ddof=0)+1e-7),
- but='to')
return self(to)
@Normalize
Move the `but=`s to the first line of the function - no need to use xtra vertical space for that little thing! :)
# Internal Cell
@Normalize
def setups(self, to:Tabular):
+ store_attr(but='to', means=dict(getattr(to, 'train', to).conts.mean()),
+ stds=dict(getattr(to, 'train', to).conts.std(ddof=0)+1e-7))
return self(to)
@Normalize |
codereview_python_data_6071 | recipient, recipient=recipient, verb="", description=description
)
- self.stdout.write(
- "Suggestion notifications sent to {count} users.".format(count=len(data))
- )
Nitpick/question: what do you think about using f-strings?
recipient, recipient=recipient, verb="", description=description
)
+ self.stdout.write(f"Suggestion notifications sent to {len(data)} users.") |
codereview_python_data_6072 | '''
src_data, tgt_data = self.src[mode], self.tgt[mode]
n = len(src_data)
- # make sure all devices have the same number of batches
n = n // ndev * ndev
# XXX: is partition then shuffle equivalent to shuffle then partition?
I don't think so, but in order to make sure all training data are accessible during training, it seems we should divide first then shuffle.
'''
src_data, tgt_data = self.src[mode], self.tgt[mode]
n = len(src_data)
+ # make sure all devices have the same number of batch
n = n // ndev * ndev
# XXX: is partition then shuffle equivalent to shuffle then partition? |
codereview_python_data_6077 | return comment + GENERIC_INCORRECT_COMMENT.format(label=wrong_label) + suffix
-def _label_display_name(issue):
- """Return label display for a issue (based on its issue tracker type)."""
- issue_tracker_name = issue.issue_tracker.name
- if issue_tracker_name == 'monorail':
- return 'label'
- elif issue_tracker_name == 'buganizer':
- return 'hotlist ID'
-
- raise Exception('Unknown issue tracker.')
-
-
def job_platform_to_real_platform(job_platform):
"""Get real platform from job platform."""
for platform in data_types.PLATFORMS:
We shouldn't hardcode the issue tracker names here, as the cleanup code should have no knowledge of which issue tracker implementations are provided. a better alternative would be to have a ```python @property def label_type(self): return 'label' # default ``` in the IssueTracker.
return comment + GENERIC_INCORRECT_COMMENT.format(label=wrong_label) + suffix
def job_platform_to_real_platform(job_platform):
"""Get real platform from job platform."""
for platform in data_types.PLATFORMS: |
codereview_python_data_6078 | eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = None
- if cfg.resume_from is None and cfg.auto_resume:
resume_from = find_latest_checkpoint(cfg.work_dir)
if resume_from is not None:
cfg.resume_from = resume_from
```python resume_from=None if cfg.resume_from is not None and cfg.get('auto_resume',False): warnings.warn(..) resume_from=cfg.resume_from elif cfg.resume_from is not None: resume_from=cfg.resume_from elif cfg.get('auto_resume',False) is True: resume_from=find_latest_checkpoint() if resume_from: ... ``` ...
eval_hook(val_dataloader, **eval_cfg), priority='LOW')
resume_from = None
+ if cfg.resume_from is None and cfg.get('auto_resume'):
resume_from = find_latest_checkpoint(cfg.work_dir)
if resume_from is not None:
cfg.resume_from = resume_from |
codereview_python_data_6084 | self.universes = [self.universe1, self.universe2, self.universe_rev]
self.psa = MDAnalysis.analysis.psa.PSAnalysis(self.universes, \
path_select='name CA', \
- targetdir=tempdir.TempDir())
self.psa.generate_paths(align=True)
self.psa.paths[-1] = self.psa.paths[-1][::-1,:,:] # reverse third path
self._run()
`TempDir()` is a context manager and I think if you use it like `TempDir().name` (which would give you the directory name) then the directory will be created and immediately destroyed. Either wrap the whole thing in ``` python with tempdir.in_tempdir(): ``` or use `tempfile.mkdtemp()` explicitly together with explicit clean up in `tearDown()`.
self.universes = [self.universe1, self.universe2, self.universe_rev]
self.psa = MDAnalysis.analysis.psa.PSAnalysis(self.universes, \
path_select='name CA', \
+ targetdir=self.outdir)
self.psa.generate_paths(align=True)
self.psa.paths[-1] = self.psa.paths[-1][::-1,:,:] # reverse third path
self._run() |
codereview_python_data_6088 | _define_word.return_value,
]
assert _define_words_in_sentence(fakes.course1, "{foo bar} {baz quux}", True) == [
_define_word.return_value,
_define_word.return_value,
please introduce a new test case for this one
_define_word.return_value,
]
+ @patch("librelingo_json_export.dictionary._define_word")
+ def test_defines_every_word_that_has_space(self, _define_word):
+ _define_word.return_value = fakes.fake_value()
assert _define_words_in_sentence(fakes.course1, "{foo bar} {baz quux}", True) == [
_define_word.return_value,
_define_word.return_value, |
codereview_python_data_6092 | """Handler that schedules ML train jobs."""
from base import tasks
-from base import utils
from datastore import data_types
from datastore import fuzz_target_utils
from handlers import base_handler
Should we rename gradientfuzz to something like gradient_generator or something similar to rnn_generator ? Same for train_gradientfuzz files ?
"""Handler that schedules ML train jobs."""
from base import tasks
from datastore import data_types
from datastore import fuzz_target_utils
from handlers import base_handler |
codereview_python_data_6114 | self.client.timeout = dehumanize_time(self.settings.get("timeout", self.client.timeout))
self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
- self.monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit",
- self.DEFAULT_MONITORING_BUFFER_LIMIT)
self.browser_open = self.settings.get("browser-open", self.browser_open)
token = self.settings.get("token", "")
if not token:
Usual style is to put original field value as default, especially when you set it to constant already
self.client.timeout = dehumanize_time(self.settings.get("timeout", self.client.timeout))
self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
+ self.monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", self.monitoring_buffer_limit)
self.browser_open = self.settings.get("browser-open", self.browser_open)
token = self.settings.get("token", "")
if not token: |
codereview_python_data_6120 | -h, --help Show this message and exit.
Commands:
- codemod `hypothesis codemod` refactors deprecated or inefficent code.
fuzz [hypofuzz] runs tests with an adaptive coverage-guided fuzzer.
write `hypothesis write` writes property-based tests for you!
```suggestion codemod `hypothesis codemod` refactors deprecated or inefficient code. ```
-h, --help Show this message and exit.
Commands:
+ codemod `hypothesis codemod` refactors deprecated or inefficient code.
fuzz [hypofuzz] runs tests with an adaptive coverage-guided fuzzer.
write `hypothesis write` writes property-based tests for you! |
codereview_python_data_6126 | :returns tuple (number_of_files_copied, total_size_copied_in_bytes)
"""
- (dst_bucket, dst_key) = self._path_to_bucket_and_key(destination_path)
-
# don't allow threads to be less than 3
threads = 3 if threads < 3 else threads
from boto3.s3.transfer import TransferConfig
If we're going to move the split of `source_path` into src_bucket/src_key here, why not also do the same with `destination_path`? That way, the only two required args for `_copy_*()` are `source_path` and `destination_path`. That also mirrors the requirements of `copy()`
:returns tuple (number_of_files_copied, total_size_copied_in_bytes)
"""
# don't allow threads to be less than 3
threads = 3 if threads < 3 else threads
from boto3.s3.transfer import TransferConfig |
codereview_python_data_6135 | .. deprecated:: 1.0.0
- :func: `notwithin_coordinates_factory` is no longer supported and will be removed in 2.0.0. as part
- of the removal of the :func:`density_from_PDB` function.
"""
# Benchmark of FABP system (solvent 3400 OH2, protein 2100 atoms) on G4 powerbook, 500 frames
# cpu/s relative speedup use_kdtree
Two things here: - As far as I'm aware, there shouldn't be a space between :func: and `notwithin_coordinates_factory`. - The sentence seems to exceed the PEP8 80 character limit.
.. deprecated:: 1.0.0
+ :func:`notwithin_coordinates_factory` is no longer supported and will be
+ removed in 2.0.0.
"""
# Benchmark of FABP system (solvent 3400 OH2, protein 2100 atoms) on G4 powerbook, 500 frames
# cpu/s relative speedup use_kdtree |
codereview_python_data_6145 | B : Numpy matrix
The modularity matrix of G.
- Notes
- -----
- For MultiGraph/MultiDiGraph, the edges weights are summed.
-
See Also
--------
to_numpy_matrix
Given the `@not_implemented_for` decorators, what does this note mean. Similarly with `modularity_spectrum`?
B : Numpy matrix
The modularity matrix of G.
See Also
--------
to_numpy_matrix |
codereview_python_data_6147 | # to the lock file and fail because the file is already locked by the previous process.
# The -n flag in flock will fail the process right away when the process is not able to acquire the lock so we won't
# queue up the jobs.
-(echo "{run_frequency} /usr/bin/flock -n /tmp/forseti_cron_runner.lock $FORSETI_HOME/setup/gcp/scripts/run_forseti.sh") | crontab -u $USER -
echo "Added the run_forseti.sh to crontab under user $USER"
echo "Execution of startup script finished"
Does this resolve the scenario when the user-triggered forseti process is running, and it would be killed by the cron job restarting the server?
# to the lock file and fail because the file is already locked by the previous process.
# The -n flag in flock will fail the process right away when the process is not able to acquire the lock so we won't
# queue up the jobs.
+# If the cron job failed the acquire lock on the process, it will log a warning message to syslog.
+(echo "{run_frequency} /usr/bin/flock -n /tmp/forseti_cron_runner.lock $FORSETI_HOME/setup/gcp/scripts/run_forseti.sh ||
+ echo "Warning: previous cron job 'run_forseti.sh' is still running and the new cron job is attempting to run,
+ exiting..." 2>&1 | logger) | crontab -u $USER -
echo "Added the run_forseti.sh to crontab under user $USER"
echo "Execution of startup script finished" |
codereview_python_data_6150 | display_name = self._GetRowValue(query_hash, row, 'given_displayname')
fullname = self._GetRowValue(query_hash, row, 'fullname')
username = '{0!s} <{1!s}>'.format(fullname, display_name)
event_data = SkypeAccountEventData()
why the change, what other type can fullname, display_name be?
display_name = self._GetRowValue(query_hash, row, 'given_displayname')
fullname = self._GetRowValue(query_hash, row, 'fullname')
+
+ # TODO: Move this to the formatter, and ensure username is rendered
+ # properly when fullname and/or display_name is None.
username = '{0!s} <{1!s}>'.format(fullname, display_name)
event_data = SkypeAccountEventData() |
codereview_python_data_6155 | @pytest.mark.parametrize('smi,chirality', [
- ('C[C@@H](C(=O)O)N', 'R'),
- ('C[C@H](C(=O)O)N', 'S'),
])
def test_chirality(smi, chirality):
Chem = pytest.importorskip('rdkit.Chem', reason='requires rdkit')
Isn't this a bit redundant? Or you want a specific test for the dtype?
@pytest.mark.parametrize('smi,chirality', [
+ ('C[C@@H](C(=O)O)N', 'S'),
+ ('C[C@H](C(=O)O)N', 'R'),
])
def test_chirality(smi, chirality):
Chem = pytest.importorskip('rdkit.Chem', reason='requires rdkit') |
codereview_python_data_6157 | self.shared_roi_extractor = False
else:
self.shared_roi_extractor = True
self.mask_head = builder.build_head(mask_head)
self.train_cfg = train_cfg
`self.mask_roi_extractor = self.bbox_roi_extractor` can be added here, which will make the inference more simple.
self.shared_roi_extractor = False
else:
self.shared_roi_extractor = True
+ self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = builder.build_head(mask_head)
self.train_cfg = train_cfg |
codereview_python_data_6158 | Args:
message (str): error message
"""
- LOGGER.error(message)
super(Error, self).__init__(message)
It would be better for the raiser or the catcher to log the error, rather than log it here where you can't tell where the exception is coming from.
Args:
message (str): error message
"""
super(Error, self).__init__(message) |
codereview_python_data_6161 | """
itemcount = 1
for item in self._data:
- itemcount += self.count_for(item)
return itemcount
def get_space_left(self):
if `self._data[item_id].get('count', False)` fails and return `False`, then this line raises an error. It should only add if `self.count_for(item)` is an instance of int.
"""
itemcount = 1
for item in self._data:
+ current_item_count = self.count_for(item)
+ if current_item_count:
+ itemcount += current_item_count
return itemcount
def get_space_left(self): |
codereview_python_data_6167 | )
def _write_flows(self, path, flows):
- f = open(path, "wb")
- fw = io.FlowWriter(f)
- for i in flows:
- fw.add(i)
- f.close()
def save_one_flow(self, path, flow):
return self._write_flows(path, [flow])
We should use `with open(...) as f:` more.
)
def _write_flows(self, path, flows):
+ with open(path, "wb") as f:
+ fw = io.FlowWriter(f)
+ for i in flows:
+ fw.add(i)
def save_one_flow(self, path, flow):
return self._write_flows(path, [flow]) |
codereview_python_data_6174 | if residual:
if indim != hiddendim:
self.residual_fc = nn.Linear(indim, hiddendim, bias=False)
- nn.init.xavier_normal_(self.residual_fc.weight.data, gain=1.414)
def forward(self, nodes):
ret = nodes.data['accum']
Would it better to change to `gain=nn.init.calculate_gain('relu'))`? which is equivalent to `gain=1.414`
if residual:
if indim != hiddendim:
self.residual_fc = nn.Linear(indim, hiddendim, bias=False)
+ nn.init.xavier_normal_(self.residual_fc.weight.data, gain=nn.init.calculate_gain('relu'))
def forward(self, nodes):
ret = nodes.data['accum'] |
codereview_python_data_6177 | flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
else:
- raise ValueError(f'Invalid flipping direction "{direction}"')
return flipped
def __call__(self, results):
Use single quote to wrap the str.
flipped[..., 1::4] = h - bboxes[..., 3::4]
flipped[..., 3::4] = h - bboxes[..., 1::4]
else:
+ raise ValueError(f"Invalid flipping direction '{direction}'")
return flipped
def __call__(self, results): |
codereview_python_data_6187 | selection = trans_u.residues[0].atoms
center_pos = selection.center_of_mass()
matrix = rotation_matrix(np.deg2rad(angle), vector, center_pos)
- rotation = matrix[:3, :3]
- translation = matrix[:3, 3]
- ref.positions = np.dot(ref.positions, rotation) + translation
- transformed = rotateby(angle, vector, ag=selection, center='mass')(trans)
assert_array_almost_equal(transformed.positions, ref.positions, decimal=6)
def test_rotateby_atomgroup_cog_pbc(rotate_universes):
Can you use `ag.transform(matrix)` for the test. It's an atom group method that accepts the 4x4 matrix for translation and rotation.
selection = trans_u.residues[0].atoms
center_pos = selection.center_of_mass()
matrix = rotation_matrix(np.deg2rad(angle), vector, center_pos)
+ ref_u.atoms.transform(matrix)
+ transformed = rotateby(angle, vector, ag=selection, center_of='mass')(trans)
assert_array_almost_equal(transformed.positions, ref.positions, decimal=6)
def test_rotateby_atomgroup_cog_pbc(rotate_universes): |
codereview_python_data_6189 | yield "this is a generator" # pragma: no cover
def send_response(self, response):
- if response.content == None:
- raise HttpException("Cannot assemble flow with None content")
self.send_response_headers(response)
self.send_response_body(response, [response.content])
...Cannot assemble flow with **missing content**
yield "this is a generator" # pragma: no cover
def send_response(self, response):
+ if response.content is None:
+ raise HttpException("Cannot assemble flow with missing content")
self.send_response_headers(response)
self.send_response_body(response, [response.content]) |
codereview_python_data_6194 | def check_import(self, lib_path):
import tensorflow as tf
- import nvidia.dali as dali # DALI symbols need to be loaded
print("Importing the TF library to check for errors")
try:
tf.load_op_library(lib_path)
So we cannot install DALI and plugin in one go as DALI needs to be fully available here, right?
def check_import(self, lib_path):
import tensorflow as tf
+ assert can_import_dali() # DALI symbols need to be loaded
print("Importing the TF library to check for errors")
try:
tf.load_op_library(lib_path) |
codereview_python_data_6197 | keep_partitioning=True,
lengths=None,
enumerate_partitions=True,
- max_retries=0,
)
# pending completion
This is a separate change. Please limit this PR to only adding _ability_ to pass extra keywords, and use them separately (you could use that in the `to_csv`-hang-fixing PR after this one is merged).
keep_partitioning=True,
lengths=None,
enumerate_partitions=True,
)
# pending completion |
codereview_python_data_6209 | return x * out
-class DYReLU(BaseModule):
- """Dynamic ReLU (DY-ReLU) module.
See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.
Current implementation is specialized for task-aware attention in DyHead.
Shall we keep using DyReLU? It is consistent with DyHead and DyDCNv2
return x * out
+class DyReLU(BaseModule):
+ """Dynamic ReLU (DyReLU) module.
See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.
Current implementation is specialized for task-aware attention in DyHead. |
codereview_python_data_6210 | """Schedule computing new batch from source callback by the parallel pool."""
dst_chunk_i = (self.flat_iter_idx + lead) % pool.contexts[context_i].queue_depth
if self.batch:
- pool.schedule_batch(context_i, dst_chunk_i, TaskArgs.make_batch(
self.callback_args(None, epoch_idx, lead=lead)))
else:
sample_range_start = self.current_sample + batch_size * lead
sample_range_end = sample_range_start + batch_size
iteration = self.current_iter + lead
- work_batch = TaskArgs.make_sample(
sample_range_start, sample_range_end, iteration, epoch_idx)
pool.schedule_batch(context_i, dst_chunk_i, work_batch)
I feel like isn't really the best place to calculate the dst_chunk_id, which is kinda internal to implementation of the pool - imo it should be hidden, with just the batch and epoch id being enough to indicate which one we request and which one we get.
"""Schedule computing new batch from source callback by the parallel pool."""
dst_chunk_i = (self.flat_iter_idx + lead) % pool.contexts[context_i].queue_depth
if self.batch:
+ pool.schedule_batch(context_i, dst_chunk_i, _TaskArgs.make_batch(
self.callback_args(None, epoch_idx, lead=lead)))
else:
sample_range_start = self.current_sample + batch_size * lead
sample_range_end = sample_range_start + batch_size
iteration = self.current_iter + lead
+ work_batch = _TaskArgs.make_sample(
sample_range_start, sample_range_end, iteration, epoch_idx)
pool.schedule_batch(context_i, dst_chunk_i, work_batch) |
codereview_python_data_6213 | _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
- mask_roi_extractor = self.mask_roi_extractor[-1]
- mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i]
Thanks for your work. This two statements seems redundant.
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
+ mask_feats = self.mask_roi_extractor[-1](
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
for i in range(self.num_stages):
mask_roi_extractor = self.mask_roi_extractor[i] |
codereview_python_data_6220 | mode=self.upsample_method,
align_corners=align_corners)
upsampler_cfg_['type'] = self.upsample_method
- _, self.upsample = build_upsampler_layer(upsampler_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
Also add `**self.upsample_cfg` for other options.
mode=self.upsample_method,
align_corners=align_corners)
upsampler_cfg_['type'] = self.upsample_method
+ _, self.upsample = build_upsample_layer(upsampler_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = ( |
codereview_python_data_6228 | 'FUCHSIA_DIR', os.path.join(self.build_dir, self.FUCHSIA_DIR_REL_PATH))
environment.set_value('FUCHSIA_RESOURCES_DIR', self.build_dir)
- # Does not support partial unpack.
- assert environment.get_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES')
result = super(FuchsiaBuild, self).setup()
if not result:
return result
Can we add a message after ","
'FUCHSIA_DIR', os.path.join(self.build_dir, self.FUCHSIA_DIR_REL_PATH))
environment.set_value('FUCHSIA_RESOURCES_DIR', self.build_dir)
+ assert environment.get_value('UNPACK_ALL_FUZZ_TARGETS_AND_FILES'), \
+ 'Fuchsia does not support partial unpacks'
result = super(FuchsiaBuild, self).setup()
if not result:
return result |
codereview_python_data_6241 | np.testing.assert_allclose(output, expected_output)
def test_all_ones():
image = tf.ones([10, 10, 1], tf.uint8)
output = dist_ops.euclidean_dist_transform(image)
Do you think that we could test the new algo against `scipy.ndimage.distance_transform_edt`?
np.testing.assert_allclose(output, expected_output)
+@pytest.mark.with_device(["cpu", "gpu"])
def test_all_ones():
image = tf.ones([10, 10, 1], tf.uint8)
output = dist_ops.euclidean_dist_transform(image) |
codereview_python_data_6244 | def _count_diff_NG86(codon1, codon2, codon_table=default_codon_table):
- """Count differences between two codons, three-letter string; (PRIVATE).
The function will take multiple pathways from codon1 to codon2
into account.
The original was fine as it was. If you want to end with ``(PRIVATE)`` I would remove the semi-colon here.
def _count_diff_NG86(codon1, codon2, codon_table=default_codon_table):
+ """Count differences between two codons, three-letter string (PRIVATE).
The function will take multiple pathways from codon1 to codon2
into account. |
codereview_python_data_6246 | from . import backend
from .set_default_backend import set_default_backend
-from .pytorch.sparse import gspmm_hetero
-from .pytorch.sparse import gsddmm_hetero
_enabled_apis = set()
we should import any function in pytorch backend here.
from . import backend
from .set_default_backend import set_default_backend
_enabled_apis = set() |
codereview_python_data_6247 | return [self.app, pickle_loc] + self.app_options()
def run(self):
- name = re.sub(r'[^\w]', '_', self.name)
- self.run_path = tempfile.mkdtemp(prefix=name)
- self.run_pickle = os.path.join(self.run_path, '.'.join([name, 'pickle']))
with open(self.run_pickle, 'wb') as fd:
# Copy module file to run path.
module_path = os.path.abspath(inspect.getfile(self.__class__))
I'd rename variable `name` to something like `run_path_prefix` (maybe some better option) due to two reasons: - it is actually a path, not a name anymore - we must be having a hard time dealing with `name` and `self.name` in the same place
return [self.app, pickle_loc] + self.app_options()
def run(self):
+ path_name_fragment = re.sub(r'[^\w]', '_', self.name)
+ self.run_path = tempfile.mkdtemp(prefix=path_name_fragment)
+ self.run_pickle = os.path.join(self.run_path, '.'.join([path_name_fragment, 'pickle']))
with open(self.run_pickle, 'wb') as fd:
# Copy module file to run path.
module_path = os.path.abspath(inspect.getfile(self.__class__)) |
codereview_python_data_6248 | self._init_key_label()
def _check_save_support(self, save):
- if self.question.option is None:
raise Error("No setting available to save the answer for this "
"question.")
Should this be `if save and self.question.option is None:`?
self._init_key_label()
def _check_save_support(self, save):
+ if save and self.question.option is None:
raise Error("No setting available to save the answer for this "
"question.") |
codereview_python_data_6249 | (test_Y == argmax_Y.float()).sum().item() / len(test_Y) * 100))
###############################################################################
-# The figure here is an animation where you plot graphs with the probability that a trained model
# assigns its Amazon SageMaker ground truth label to it.
#
# .. image:: https://s3.us-east-2.amazonaws.com/dgl.ai/tutorial/batch/test_eval4.gif
As @yzh119 has explained, the label is not from Amazon SageMaker. It is a categorical variable from the dataset itself indicating the graph type of each sample. How about "The animation here plots the probability a trained model predicts the correct graph type."?
(test_Y == argmax_Y.float()).sum().item() / len(test_Y) * 100))
###############################################################################
+# The animation here plots the probability that a trained model predicts the correct graph type.
# assigns its Amazon SageMaker ground truth label to it.
#
# .. image:: https://s3.us-east-2.amazonaws.com/dgl.ai/tutorial/batch/test_eval4.gif |
codereview_python_data_6252 | DBA = dual_barabasi_albert_graph(100, m1, m2, p, seed, initial=initial)
BA1 = barabasi_albert_graph(100, m1, seed, initial=initial)
- BA2 = barabasi_albert_graph(100, m1, seed, initial=initial)
assert (
min(BA1.size(), BA2.size()) <= DBA.size() <= max(BA1.size(), BA2.size())
)
Isn't `initial` mutated and returned? So DBA and BA1 and BA2 will end up all the same object? If true, we will need to make the mutating nature very clear -- or change to `G = initial.copy()`
DBA = dual_barabasi_albert_graph(100, m1, m2, p, seed, initial=initial)
BA1 = barabasi_albert_graph(100, m1, seed, initial=initial)
+ BA2 = barabasi_albert_graph(100, m2, seed, initial=initial)
assert (
min(BA1.size(), BA2.size()) <= DBA.size() <= max(BA1.size(), BA2.size())
) |
codereview_python_data_6254 | @register_query(LocalMongoDBConnection)
-def put_pre_commit_state(conn, state):
commit_id = state['commit_id']
return conn.run(
conn.collection('pre_commit')
All functions to save data have "store" as prefix, would it make sense to use it here as well so we are consistent?
@register_query(LocalMongoDBConnection)
+def store_pre_commit_state(conn, state):
commit_id = state['commit_id']
return conn.run(
conn.collection('pre_commit') |
codereview_python_data_6260 | else:
candidates.append((func, func_type))
- # Optimize the most common case of no overloading...
- if len(candidates) == 1 and not validate_types_fully:
- return candidates[0][0]
- elif len(candidates) == 0:
if pos is not None:
func, errmsg = errors[0]
if len(errors) == 1 or [1 for func, e in errors if e == errmsg]:
This looks funny and seems worth explaining - why continue if we only have a single candidate anyway?
else:
candidates.append((func, func_type))
+ if len(candidates) == 0:
if pos is not None:
func, errmsg = errors[0]
if len(errors) == 1 or [1 for func, e in errors if e == errmsg]: |
codereview_python_data_6263 | if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
- "--export_path",
dest="export_path",
type=str,
required=False,
```suggestion "--export-path", ``` plus note that you should change the calling line in `conf.py` now, otherwise the variables' table won't be regenerated properly
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
+ "--export-path",
dest="export_path",
type=str,
required=False, |
codereview_python_data_6265 | from urwid import BaseScreen
from bzt import TaurusInternalException, TaurusNetworkError, ToolError
-from bzt.six import string_types, iteritems, binary_type, text_type, b, integer_types, request, file_type, etree, parse
-def get_output(args, env=None):
- return subprocess.check_output(args, env=env, stderr=subprocess.STDOUT)
def get_full_path(path, default=None, step_up=0):
The name does not reflect starting subprocess. It is confusing now...
from urwid import BaseScreen
from bzt import TaurusInternalException, TaurusNetworkError, ToolError
+from bzt.six import stream_decode, file_type, etree, parse
+from bzt.six import string_types, iteritems, binary_type, text_type, b, integer_types, request
+def sync_run(args, env=None):
+ output = subprocess.check_output(args, env=env, stderr=subprocess.STDOUT)
+ return stream_decode(output).rstrip()
def get_full_path(path, default=None, step_up=0): |
codereview_python_data_6270 | def _list_from_csv(csv_string, caster=None):
- """Transform the given comma-separated string into a list.
:param csv_string: comma-separated input string
:type csv_string: string
Can you make this end ``... into a list (PRIVATE).`` too please.
def _list_from_csv(csv_string, caster=None):
+ """Transform the given comma-separated string into a list (PRIVATE).
:param csv_string: comma-separated input string
:type csv_string: string |
codereview_python_data_6276 | CHANGED_OPTIONS = {
('content', 'cookies-accept'):
_get_value_transformer('default', 'no-3rdparty'),
- ('storage', 'download-directory'):
- _get_value_transformer('', '%'),
}
changed = pyqtSignal(str, str)
I think this will change `''` to `'%'` unconditionally, i.e. there'll be no way to set `''` anymore. This should really only be used for values which make no sense anymore.
CHANGED_OPTIONS = {
('content', 'cookies-accept'):
_get_value_transformer('default', 'no-3rdparty'),
}
changed = pyqtSignal(str, str) |
codereview_python_data_6277 | import numpy as np
from Bio.KDTree import _CKDTree
-# Bio 1.71 API different from previous :(
-from distutils import version
-import Bio
-_NEW_BIO_KDTREE = version.LooseVersion(Bio.__version__) >= version.LooseVersion('1.7.1')
-del Bio
-del version
-
-
from MDAnalysis.lib.distances import _box_check, _check_array, apply_PBC
from MDAnalysis.lib.mdamath import norm, triclinic_vectors, triclinic_box
I think you mean Bio version 1.71
import numpy as np
from Bio.KDTree import _CKDTree
from MDAnalysis.lib.distances import _box_check, _check_array, apply_PBC
from MDAnalysis.lib.mdamath import norm, triclinic_vectors, triclinic_box |
codereview_python_data_6283 | # Requires Python 2.4+ and Openssl 1.0+
#
-import azurelinuxagent.utils.shellutil as shellutil
-from azurelinuxagent.distro.default.osutil import DefaultOSUtil
class AlpineOSUtil(DefaultOSUtil):
def __init__(self):
looks like you just need to update imports to `azurelinuxagent.common.utils.shellutil` and `azurelinuxagent.common.osutil.default`
# Requires Python 2.4+ and Openssl 1.0+
#
+import azurelinuxagent.common.utils.shellutil as shellutil
+from azurelinuxagent.common.osutil.default import DefaultOSUtil
class AlpineOSUtil(DefaultOSUtil):
def __init__(self): |
codereview_python_data_6288 | def get_config(self):
config = {
"margin": self.margin,
}
base_config = super().get_config()
return {**base_config, **config}
Should we also serialize `self.soft` here?
def get_config(self):
config = {
"margin": self.margin,
+ "soft": self.soft,
}
base_config = super().get_config()
return {**base_config, **config} |
codereview_python_data_6293 | Parameters
----------
- diha : list of 4 np.int64 element tuples
- The dihedrals not involving hydrogens
- dihh : list of 4 np.int64 element tuples
- The dihedrals involving hydrogens
Returns
-------
nitpicky, but here you could just have `if not vals in dihed:` and not have the `continue` branch
Parameters
----------
+ diha : list of tuples
+ The atom ids of dihedrals not involving hydrogens
+ dihh : list of tuples
+ The atom ids of dihedrals involving hydrogens
Returns
------- |
codereview_python_data_6300 | usecols=usecols,
**kwargs
)
-
pandas_df = parser.read().dropna(how="all")
# Since we know the number of rows that occur before this partition, we can
# correctly assign the index in cases of RangeIndex. If it is not a RangeIndex,
What is this change for?
usecols=usecols,
**kwargs
)
+ # In excel if you create a row with only a border (no values), this parser will
+ # interpret that as a row of NaN values. Pandas discards these values, so we
+ # also must discard these values.
pandas_df = parser.read().dropna(how="all")
# Since we know the number of rows that occur before this partition, we can
# correctly assign the index in cases of RangeIndex. If it is not a RangeIndex, |
codereview_python_data_6301 | Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
-from .compat import json
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
Why did you change this?
Callable, Mapping,
cookielib, urlunparse, urlsplit, urlencode, str, bytes,
is_py2, chardet, builtin_str, basestring)
+from .compat import json as complexjson
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically |
codereview_python_data_6305 | }
-# @test_utils.run_all_in_graph_and_eager_modes
-# class TranslateOpTest(tf.test.TestCase):
-
-
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_translate(dtype):
I think you can remove this.
}
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
@pytest.mark.parametrize("dtype", _DTYPES)
def test_translate(dtype): |
codereview_python_data_6311 | Notes
-----
Graphs may have node labels, node attributes, edge labels, and edge attributes,
- varing from different dataset.
"""
_url = r"https://www.chrsmrrs.com/graphkerneldatasets/{}.zip"
I suggest we clearly describe the changes here, so that people will know instantly by reading the doc. Ideally people would know how to adapt their own code just by reading this doc.
Notes
-----
Graphs may have node labels, node attributes, edge labels, and edge attributes,
+ varing from different dataset.
+
+ Labels are mapped to :math:`\lbrace 0,\cdots,n-1 \rbrace` where :math:`n` is the
+ number of labels (some datasets have raw labels :math:`\lbrace -1, 1 \rbrace` which
+ will be mapped to :math:`\lbrace 0, 1 \rbrace`). In previous versions, the minimum
+ label was added so that :math:`\lbrace -1, 1 \rbrace` was mapped to
+ :math:`\lbrace 0, 2 \rbrace`.
"""
_url = r"https://www.chrsmrrs.com/graphkerneldatasets/{}.zip" |
codereview_python_data_6316 | Enum for defining the Feature Names for all internal and CRP features
"""
MultiConfig = "MultipleExtensionsPerHandler"
- ExtensionTelemetryPipeline = "ExtensionTelemetryPipeline"
class AgentFeature(object):
we should move this class to its own file we also need to clarify its intent (and maybe rename it?) "new features that the Linux Guest Agent supports" seems too vague
Enum for defining the Feature Names for all internal and CRP features
"""
MultiConfig = "MultipleExtensionsPerHandler"
class AgentFeature(object): |
codereview_python_data_6317 | calculated_md5 = dirhash.dirhash(path, 'md5', ignore=ignore, match=included_extensions)
if md5 == calculated_md5:
return version
- return 'unknown'
class Access(Enum):
APP = 0
I feel like this should return a tuple-- the version (number), and the hash, separately. we will display the former on the GUI whereas the latter is for validation only
calculated_md5 = dirhash.dirhash(path, 'md5', ignore=ignore, match=included_extensions)
if md5 == calculated_md5:
return version
+ return None
class Access(Enum):
APP = 0 |
codereview_python_data_6323 | class StringSelection(Selection):
"""Selections based on text attributes
- .. versionchanged:: 0.21
- Supports multiple wildcards, based on fnmatch
"""
def __init__(self, parser, tokens):
vals = grab_not_keywords(tokens)
@Iv-Hristov the text entry in a `versionchanged` needs to be indented (this is what is causing Travis to fail). So: ``` .. versionchanged:: 1.0.0 Supports... ```
class StringSelection(Selection):
"""Selections based on text attributes
+ .. versionchanged:: 0.21.0
+ Supports multiple wildcards, based on fnmatch
"""
def __init__(self, parser, tokens):
vals = grab_not_keywords(tokens) |
codereview_python_data_6327 | class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
-class StreamConsumedError(RequestException):
"""The content for this response was already consumed"""
This needs to doubly inherit, like so: ``` python class StreamConsumedError(RequestException, TypeError): ```
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
+class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed""" |
codereview_python_data_6331 | import collections
import os
-
-import shade
-import paramiko
import time
import tempfile
from molecule import util
from molecule.driver import basedriver
Since `time` and `tempfile` are part of the standard library, they should be imorted with `os` (above), in alphabetical order. `Paramiko` should stay with `shade`, however, should go before it.
import collections
import os
import time
import tempfile
+import paramiko
+import shade
+
from molecule import util
from molecule.driver import basedriver |
codereview_python_data_6335 | Tries harder: if the thing to inspect is a class but typing.get_type_hints
raises an error or returns no hints, then this function will try calling it
- on the __init__ method. This second step often helps with user-defined
- classes on older versions of Python.
Never errors: instead of raising TypeError for uninspectable objects, or
NameError for unresolvable forward references, just return an empty dict.
Please also update the docstring of this function to describe what it does now, and why we need the additional logic for classes.
Tries harder: if the thing to inspect is a class but typing.get_type_hints
raises an error or returns no hints, then this function will try calling it
+ on the __init__ method. This second step often helps with user-defined
+ classes on older versions of Python. The third step we take is trying
+ to fetch types from the __signature__ property.
+ They override any other ones we found earlier.
Never errors: instead of raising TypeError for uninspectable objects, or
NameError for unresolvable forward references, just return an empty dict. |
codereview_python_data_6336 | async def load_ability_file(self, filename, access):
for entries in self.strip_yml(filename):
for ab in entries:
- ability_id = ab.pop('id') if 'id' in ab else ab.pop('ability_id', None)
name = ab.pop('name', '')
description = ab.pop('description', '')
tactic = ab.pop('tactic', None)
should the default value of `ab.pop()` be `[]` rather than an empty dict? It looks like `load_executors_from_list` wants the `executors` parameter to be a list
async def load_ability_file(self, filename, access):
for entries in self.strip_yml(filename):
for ab in entries:
+ ability_id = ab.pop('id', None)
name = ab.pop('name', '')
description = ab.pop('description', '')
tactic = ab.pop('tactic', None) |
codereview_python_data_6346 | please use format(motif, format_spec).
"""
warnings.warn(
- """\
-Motif.format has been deprecated, and we intend to remove it in a future
-release of Biopython. Instead of motif.format(format_spec), please use
-format(motif, format_spec).
-""",
BiopythonDeprecationWarning,
)
return self.__format__(format_spec)
I think this slash is not needed.
please use format(motif, format_spec).
"""
warnings.warn(
+ "Motif.format has been deprecated, and we intend to remove it in a "
+ "future release of Biopython. Instead of motif.format(format_spec), "
+ "please use format(motif, format_spec).",
BiopythonDeprecationWarning,
)
return self.__format__(format_spec) |
codereview_python_data_6348 | ) -> None:
lines = text.splitlines()
for action, full_insert in zip(*[iter(lines)] * 2):
- # if '0x71F85B2E46976bD21302B64329868fd15eb0D127' in action:
- # __import__("pdb").set_trace()
- # a = 1
-
if full_insert == '*':
full_insert = action
Is this a leftover?
) -> None:
lines = text.splitlines()
for action, full_insert in zip(*[iter(lines)] * 2):
if full_insert == '*':
full_insert = action |
codereview_python_data_6352 | Slice object to check.
sequence_len : int, optional
Length of the sequence to index with the passed `slc`.
- If not specified the function won't consider `slc` to
- be a full-grab if its ``.stop`` attribute is equal or
- greater than the sequence length.
Returns
-------
wait, how the function can know sequence length if it's `None` to compare `slc.stop` to it?.. something's wrong with this docstring, pls fix
Slice object to check.
sequence_len : int, optional
Length of the sequence to index with the passed `slc`.
+ If not specified the function won't be able to check whether
+ ``slc.stop`` is equal or greater than the sequence length to
+ consider `slc` to be a full-grab, and so, only slices with
+ ``.stop is None`` are considered to be a full-grab.
Returns
------- |
codereview_python_data_6360 | """
def _get_new_resampler(key):
- return Resampler(self._dataframe[key], **self.resample_args)
from .series import Series
I think @YarShev was against my simplification - this indeed makes it harder to make a subclass if we create `Resampler` explicitly
"""
def _get_new_resampler(key):
+ subset = self._dataframe[key]
+ resampler = type(self)(subset, **self.resample_kwargs)
+ return resampler
from .series import Series |
codereview_python_data_6371 | for i in range(0, 20):
al = [Atom() for j in range(100)]
ns = NeighborSearch(al)
- for i in [0, 1, 2, 3, 4, 5, 6]:
self.assertEqual(i, len(ns.search_all(5.0)))
Why not use ``range(7)`` which gives 0, 1, 2, 3, 4, 5 ,6?
for i in range(0, 20):
al = [Atom() for j in range(100)]
ns = NeighborSearch(al)
+ for i in range(7):
self.assertEqual(i, len(ns.search_all(5.0))) |
codereview_python_data_6375 | RUBYGEMS_API_KEY = os.path.join(SECRETS, 'api_key.yaml')
def decrypt_secrets():
subprocess.check_call([
'openssl', 'aes-256-cbc',
minor: why is this defined in the top-level tooling file, if it's a Ruby-specific constant?
RUBYGEMS_API_KEY = os.path.join(SECRETS, 'api_key.yaml')
+SECRET_FILES = [
+ DEPLOY_KEY, PYPIRC, RUBYGEMS_API_KEY
+]
+
+
def decrypt_secrets():
subprocess.check_call([
'openssl', 'aes-256-cbc', |
codereview_python_data_6380 | def __init__(self, learn:Learner, n_step:int = 1, drop_last:bool = False):
super().__init__(learn)
- self.n_step = n_step
- self.drop_last = drop_last
def on_train_begin(self, **kwargs):
"check if loss is reduction"
Both on one line (fastai style guide)
def __init__(self, learn:Learner, n_step:int = 1, drop_last:bool = False):
super().__init__(learn)
+ self.n_step, self.drop_last = n_step, drop_last
def on_train_begin(self, **kwargs):
"check if loss is reduction" |
codereview_python_data_6383 | """
index = count if count is not None else index
- if index in ('last', self._current_index() + 1):
self._tab_focus_last()
return
-
- if index is None:
self.tab_next()
return
As for not raising the error in the `self._current_index() + 1` case - what about this? ```python if index == 'last': self._tab_focus_last() return elif index == self._current_index() + 1: self._tab_focus_last(show_error=False) return elif index is None: # from below self.tab_next() return ``` Then in `_tab_focus_last`, add a `show_error` argument, and just return instead of raising `CommandError` if it's false.
"""
index = count if count is not None else index
+ if index == 'last':
self._tab_focus_last()
return
+ elif index == self._current_index() + 1:
+ self._tab_focus_last(show_error=False)
+ return
+ elif index is None:
self.tab_next()
return |
codereview_python_data_6385 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4478-SEA 1645521665 3876263177</p>
<hr>
<p>Varnish cache server</p>
</body>
nit: We could use `select_attributes(..)` from `utils/common.py` here, to make this a bit shorter: ``` attrs = ('filterName', 'logGroupName', 'filterPattern', 'destinationArn', 'roleArn') entry = select_attributes(data, attrs) entry['creationTime'] = now_utc() subscription_filters.append(entry) ```
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4465-SEA 1645521665 174095590</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_6390 | Multiplier. Default: 2
avg_deg : int, optional
Average degree. Default: 3
- pq : list of pair of nonnegative float or str, optional
- Random densities. Default: Appendix_C
rng : numpy.random.RandomState, optional
Random number generator. Default: None
- Raises
- ------
- RuntimeError is raised if pq is not a list or string.
-
Examples
--------
>>> data = SBMMixtureDataset(n_graphs=16, n_nodes=10000, n_communities=2)
I'm confused by this `Appendix_C`. What are the options for `str` input, and what does a float input mean?
Multiplier. Default: 2
avg_deg : int, optional
Average degree. Default: 3
rng : numpy.random.RandomState, optional
Random number generator. Default: None
Examples
--------
>>> data = SBMMixtureDataset(n_graphs=16, n_nodes=10000, n_communities=2) |
codereview_python_data_6392 | sig.freeze(self.request.id)
if self.request.is_eager:
- return sig.apply().get()
- # task_result = sig.apply()
- # with allow_join_result():
- # return task_result.get()
else:
sig.delay()
raise Ignore('Replaced by new task')
This is probably what you meant. ```suggestion task_result = sig.apply() with allow_join_result(): return task_result.get() ```
sig.freeze(self.request.id)
if self.request.is_eager:
+ task_result = sig.apply()
+ with allow_join_result():
+ return task_result.get()
else:
sig.delay()
raise Ignore('Replaced by new task') |
codereview_python_data_6393 | if project_name == TFA_RELEASE:
# TODO: remove if-else condition when tf supports package consolidation.
if platform.system() == 'Linux':
- REQUIRED_PACKAGES.append('tensorflow-gpu >= 2.0.0-rc0')
else:
- REQUIRED_PACKAGES.append('tensorflow >= 2.0.0-rc0')
elif project_name == TFA_NIGHTLY:
# TODO: remove if-else condition when tf-nightly supports package consolidation.
if platform.system() == 'Linux':
Is the version requirement `>=` correct? For example if a user froze `tensorflow_addons` to a version built against TensorFlow 2.0 and then update to TensorFlow 2.1, will it work correctly?
if project_name == TFA_RELEASE:
# TODO: remove if-else condition when tf supports package consolidation.
if platform.system() == 'Linux':
+ REQUIRED_PACKAGES.append('tensorflow-gpu == 2.0.0-rc0')
else:
+ REQUIRED_PACKAGES.append('tensorflow == 2.0.0-rc0')
elif project_name == TFA_NIGHTLY:
# TODO: remove if-else condition when tf-nightly supports package consolidation.
if platform.system() == 'Linux': |
codereview_python_data_6395 | if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
- r = struct.__class__()
- for k, v in six.iteritems(struct):
- r[k] = getpaths(v)
- return r
elif isinstance(struct, (list, tuple)):
return struct.__class__(getpaths(r) for r in struct)
else:
What's this? The constructor for the dict subtype? Will this work if it's a immutable type?
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
+ return struct.__class__((k, getpaths(v)) for k, v in six.iteritems(struct))
elif isinstance(struct, (list, tuple)):
return struct.__class__(getpaths(r) for r in struct)
else: |
codereview_python_data_6398 | images, bb, labels = self.input(name="Reader")
return self.base_define_graph(images, labels, bb)
-test_data = {\
COCOReaderPipeline: [["/data/coco/coco-2017/coco2017/train2017", "/data/coco/coco-2017/coco2017/annotations/instances_train2017.json"],
["/data/coco/coco-2017/coco2017/val2017", "/data/coco/coco-2017/coco2017/annotations/instances_val2017.json"]]
}
is that `\` intentional?
images, bb, labels = self.input(name="Reader")
return self.base_define_graph(images, labels, bb)
+test_data = {
COCOReaderPipeline: [["/data/coco/coco-2017/coco2017/train2017", "/data/coco/coco-2017/coco2017/annotations/instances_train2017.json"],
["/data/coco/coco-2017/coco2017/val2017", "/data/coco/coco-2017/coco2017/annotations/instances_val2017.json"]]
} |
codereview_python_data_6401 | dim_names = [d.name for d in dimensions]
if dynamic:
- group_dims = [d.name for d in self.kdims if d not in dimensions]
- kdims = [self.get_dimension(d) for d in group_dims]
group_kwargs = dict(util.get_param_values(self), kdims=kdims)
group_kwargs.update(kwargs)
- kdims = group_kwargs['kdims']
- drop_dim = len(kdims) != len(group_kwargs['kdims'])
def load_subset(*args):
constraint = dict(zip(dim_names, args))
group = self.select(**constraint)
``kdims`` is shadowed a number of times. If ``len(kdims)`` is the same for line 527 and 530, I would inline the access on ``group_kwargs`` (which doesn't seem to be mutated anywhere). i.e: ``data = group.reindex(group_kwargs['kdims'])`` If the lengths are different, I am happy to discard this suggestion.
dim_names = [d.name for d in dimensions]
if dynamic:
+ group_dims = [kd for kd in self.kdims if kd not in dimensions]
+ kdims = [self.get_dimension(d) for d in kwargs.pop('kdims', group_dims)]
+ drop_dim = len(group_dims) != len(kdims)
group_kwargs = dict(util.get_param_values(self), kdims=kdims)
group_kwargs.update(kwargs)
def load_subset(*args):
constraint = dict(zip(dim_names, args))
group = self.select(**constraint) |
codereview_python_data_6408 | if not pass_through:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), new.shape)
- return new if pass_through else tf.where(
- broadcast_finished, cur, new)
if impute_finished:
next_state = tf.nest.map_structure(_maybe_copy_state,
Let's merge this `if` statement with the one above, e.g: ```python if not pass_through: ... return tf.where(...) else: return new ```
if not pass_through:
broadcast_finished = tf.broadcast_to(
tf.expand_dims(finished, axis=-1), new.shape)
+ return tf.where(broadcast_finished, cur, new)
+ else:
+ return new
if impute_finished:
next_state = tf.nest.map_structure(_maybe_copy_state, |
codereview_python_data_6416 | naive_dice=False,
loss_weight=1.0,
eps=1e-3):
- """`Dice Loss, there are two forms of dice loss is supported:
- the one proposed in `V-Net: Fully Convolutional Neural
Networks for Volumetric Medical Image Segmentation
'`' is unnecessary?
naive_dice=False,
loss_weight=1.0,
eps=1e-3):
+ """Dice Loss, there are two forms of dice loss is supported:
- the one proposed in `V-Net: Fully Convolutional Neural
Networks for Volumetric Medical Image Segmentation |
codereview_python_data_6417 | def get(self, k, d=None):
"""Return the value in the dictionary.
-
- If key not found the second
- attribute is returned. By default it is None.
"""
try:
return self.__getitem__(k)
That's odd line breaking. We could explain the short hand k=key and d=default (which followed the Python built-in dictionary docstring). How about: ``` If the key (k) is not found, this returns None unless a specified default (d) is specified. ```
def get(self, k, d=None):
"""Return the value in the dictionary.
+
+ If the key (k) is not found, this returns None unless a specified
+ default (d) is specified.
"""
try:
return self.__getitem__(k) |
codereview_python_data_6419 | # this is important for some web applications that store authentication-related info in cookies (it took a long time to figure out)
if response2.headers.get('set-cookie'):
- headers['Cookie'] = response2.headers.get('set-cookie')
# get the challenge
auth_header_value = response2.headers[auth_header_field]
I might be missing something, but where is the `headers` dict defined here?
# this is important for some web applications that store authentication-related info in cookies (it took a long time to figure out)
if response2.headers.get('set-cookie'):
+ response2.headers['Cookie'] = response2.headers.get('set-cookie')
# get the challenge
auth_header_value = response2.headers[auth_header_field] |
codereview_python_data_6427 | if self.master.options.mode != "regular":
r.append("[%s]" % self.master.options.mode)
if self.master.options.scripts:
- scripts = list(chain.from_iterable([glob(re) for re in self.master.options.scripts]))
r.append("[scripts:%s]" % len(scripts))
if self.master.options.save_stream_file:
This snippet seems to appear the second time now - maybe split it off into a function and reuse it?
if self.master.options.mode != "regular":
r.append("[%s]" % self.master.options.mode)
if self.master.options.scripts:
+ scripts = script.get_scripts()
r.append("[scripts:%s]" % len(scripts))
if self.master.options.save_stream_file: |
codereview_python_data_6431 | ----------
graph : DGLGraph
The graph.
- feat : mxnet.NDArray
If a mxnet.NDArray is given, the input feature of shape :math:`(N, D_{in})`
where :math:`D_{in}` is size of input feature, :math:`N` is the number of
nodes.
mxnet.NDArray or pair of mxnet.NDArray
----------
graph : DGLGraph
The graph.
+ feat : mxnet.NDArray or a pair of mxnet.NDArray
If a mxnet.NDArray is given, the input feature of shape :math:`(N, D_{in})`
where :math:`D_{in}` is size of input feature, :math:`N` is the number of
nodes. |
codereview_python_data_6432 | "Bio.Align.Applications",
"Bio.Align.substitution_matrices",
"Bio.AlignIO",
"Bio.Application",
"Bio.Blast",
"Bio.CAPS",
If you want to keep a ``Bio.Alphabet`` stub, we need to keep it on the list of installed packages.
"Bio.Align.Applications",
"Bio.Align.substitution_matrices",
"Bio.AlignIO",
+ "Bio.Alphabet",
"Bio.Application",
"Bio.Blast",
"Bio.CAPS", |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.