id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_12232
|
'charset', 'charpartition', 'taxpartition', 'matrix',
'tree', 'utree', 'translate', 'codonposset', 'title']
KNOWN_NEXUS_BLOCKS = ['trees', 'data', 'characters', 'taxa', 'sets', 'codons']
-PUNCTUATION = r'()[]{}\,;:=*\'"`+-<>'
MRBAYESSAFE = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_'
WHITESPACE = ' \t\n'
# SPECIALCOMMENTS = ['!','&','%','/','\\','@'] # original list of special comments
This changes the value, ```python >>> '()[]{}\,;:=*\'"`+-<>' '()[]{}\\,;:=*\'"`+-<>' >>> r'()[]{}\,;:=*\'"`+-<>' '()[]{}\\,;:=*\\\'"`+-<>' ``` Consider this example: ``` $ cat /tmp/x.py old = '()[]{}\,;:=*\'"`+-<>' new = '()[]{}\\,;:=*\'"`+-<>' assert old == new $ flake8 --select W605 /tmp/x.py /tmp/x.py:1:8: W605 invalid escape sequence '\,' ``` So, use this instead: ```python PUNCTUATION = '()[]{}\\,;:=*\\\'"`+-<>' ```
'charset', 'charpartition', 'taxpartition', 'matrix',
'tree', 'utree', 'translate', 'codonposset', 'title']
KNOWN_NEXUS_BLOCKS = ['trees', 'data', 'characters', 'taxa', 'sets', 'codons']
+PUNCTUATION = '()[]{}\\,;:=*\\\'"`+-<>'
MRBAYESSAFE = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890_'
WHITESPACE = ' \t\n'
# SPECIALCOMMENTS = ['!','&','%','/','\\','@'] # original list of special comments
|
codereview_python_data_12235
|
hour,min = 16,0
ref = ref[7:]
- refDate = datetime.utcnow().replace(hour=hour,minute=min,second=0,tzinfo=pytz.utc)
#Day reference
if ref in ('yesterday','today','tomorrow'): #yesterday, today, tomorrow
Same, with `django.utils.timezone` you can do `timezone.now().replace(hour=hour,minute=min,second=0)`.
hour,min = 16,0
ref = ref[7:]
+ refDate = timezone.now().replace(hour=hour,minute=min,second=0)
#Day reference
if ref in ('yesterday','today','tomorrow'): #yesterday, today, tomorrow
|
codereview_python_data_12252
|
@pyqtSlot(str)
def _on_config_changed(self, option: str) -> None:
if option.startswith('fonts.tabs.'):
- self.update()
self.ensurePolished()
self._set_icon_size()
elif option == 'tabs.favicons.scale':
Why are those two needed?
@pyqtSlot(str)
def _on_config_changed(self, option: str) -> None:
if option.startswith('fonts.tabs.'):
self.ensurePolished()
self._set_icon_size()
elif option == 'tabs.favicons.scale':
|
codereview_python_data_12254
|
A new dataframe with the updated labels.
"""
- def new_labels_mapper(x):
- return str(prefix) + str(x)
if axis == 0:
return self.rename(new_row_labels=new_labels_mapper)
```suggestion def new_labels_mapper(x, prefix=str(prefix)): return prefix + str(x) ``` some very small speedup here
A new dataframe with the updated labels.
"""
+ def new_labels_mapper(x, prefix=str(prefix)):
+ return prefix + str(x)
if axis == 0:
return self.rename(new_row_labels=new_labels_mapper)
|
codereview_python_data_12255
|
def generate_scoped_enum_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
- name = entry.cname or entry.name or ""
- header, footer = self.sue_header_footer(type, "enum class", name)
- code.putln(header)
enum_values = entry.enum_values
if not enum_values:
error(entry.pos, "Empty enum definition not allowed outside a 'cdef extern from' block")
This seems fairly late for such an error. Isn't this something that the parser could detect? Or at least the declaration analysis?
def generate_scoped_enum_definition(self, entry, code):
code.mark_pos(entry.pos)
type = entry.type
+ code.putln(
+ "enum class %s : %s {" %
+ (type.cname, type.underlying_type.empty_declaration_code())
+ )
enum_values = entry.enum_values
if not enum_values:
error(entry.pos, "Empty enum definition not allowed outside a 'cdef extern from' block")
|
codereview_python_data_12267
|
try:
return safe_run(cmd)
except subprocess.CalledProcessError as e:
- if not safe:
return ""
if "No such container" in e.stdout.decode(config.DEFAULT_ENCODING):
raise NoSuchContainer(container_name_or_id, stdout=e.stdout, stderr=e.stderr)
I see that you've copied the code from above, but wondering if we had this wrong to begin with. I guess we should change this to: ``` if safe: return "" ``` ?
try:
return safe_run(cmd)
except subprocess.CalledProcessError as e:
+ if safe:
return ""
if "No such container" in e.stdout.decode(config.DEFAULT_ENCODING):
raise NoSuchContainer(container_name_or_id, stdout=e.stdout, stderr=e.stderr)
|
codereview_python_data_12271
|
r = s.get(url, stream=True, verify=verify_ssl)
# check status code before attempting to read body
if r.status_code != 200:
- raise 'Failed to download %s, response code %s' % (url, r.status_code)
total = 0
try:
The raised object should be an instance of `Exception` (otherwise we get a `TypeError`). Can we change this to: ``` raise Exception('Failed to download %s, response code %s' % (url, r.status_code)) ```
r = s.get(url, stream=True, verify=verify_ssl)
# check status code before attempting to read body
if r.status_code != 200:
+ raise Exception('Failed to download %s, response code %s' % (url, r.status_code))
total = 0
try:
|
codereview_python_data_12287
|
tag_string = container['image_tag'].format(container['image'],
container['image_version'])
- if 'disable_cache' in container and container['disable_cache'] is True:
- nocache = True
- else:
- nocache = False
errors = False
This would prob be simpler syntax. ``` nocache = container.get('disable_cache', False) ```
tag_string = container['image_tag'].format(container['image'],
container['image_version'])
+ nocache = container.get('disable_cache', False)
errors = False
|
codereview_python_data_12295
|
# Note: MrBayes may choke on large alignments if not interleaved
if interleave is None:
- interleave = False if columns <= 1000 else True
n.write_nexus_data(self.handle, interleave=interleave)
def _classify_alphabet_for_nexus(self, alphabet):
How about simplifying that to just: ``` if interleave is None: interleave = (columns > 1000) ```
# Note: MrBayes may choke on large alignments if not interleaved
if interleave is None:
+ interleave = (columns > 1000)
n.write_nexus_data(self.handle, interleave=interleave)
def _classify_alphabet_for_nexus(self, alphabet):
|
codereview_python_data_12296
|
@pytest.fixture()
def normalized_timeseries(self, h):
- return self._normalize_timeseries(h)
# keys are the names in the h.table
reference = {
Should just put this method into the fixture rather than refer to it
@pytest.fixture()
def normalized_timeseries(self, h):
+ # timeseries in normalized form: (t, d_indx1, a_indx1, d_index0, a_index0, donor, acceptor, dist, angle)
+ # array index: 0 1 2 3 4 5 6 7 8
+ timeseries = [[t] + item
+ for t, hframe in zip(h.timesteps, h.timeseries)
+ for item in hframe]
+ return timeseries
# keys are the names in the h.table
reference = {
|
codereview_python_data_12304
|
return ret
finally:
pcap_freealldevs(devs)
- if(conf.use_winpcapy):
get_if_list = winpcapy_get_if_list
def in6_getifaddr():
err = create_string_buffer(PCAP_ERRBUF_SIZE)
Can you rewrite without parenthesis? `if conf.use_winpcapy:`
return ret
finally:
pcap_freealldevs(devs)
+ if conf.use_winpcapy:
get_if_list = winpcapy_get_if_list
def in6_getifaddr():
err = create_string_buffer(PCAP_ERRBUF_SIZE)
|
codereview_python_data_12305
|
import warnings
-from Bio import Alphabet, BiopythonDeprecationWarning
from Bio.Align import _aligners
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord, _RestrictedDict
Personally I would keep this as two lines, but like with the resorting of the imports.
import warnings
+from Bio import Alphabet
+from Bio import BiopythonDeprecationWarning
from Bio.Align import _aligners
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord, _RestrictedDict
|
codereview_python_data_12308
|
cmdline.type = "protein"
self.assertEqual(
str(cmdline),
- t_coffee_exe + " -output clustalw_aln "
- "-infile Fasta/fa01 -outfile Fasta/tc_out.aln "
"-type protein -outorder input -gapopen -2 -gapext -5",
)
stdout, stderr = cmdline()
Can probably save a line by changing where the long string is split?
cmdline.type = "protein"
self.assertEqual(
str(cmdline),
+ t_coffee_exe
+ + " -output clustalw_aln -infile Fasta/fa01 -outfile Fasta/tc_out.aln "
"-type protein -outorder input -gapopen -2 -gapext -5",
)
stdout, stderr = cmdline()
|
codereview_python_data_12317
|
Encoding for text data.
prettyprint : bool (optional, default: True)
If True use line breaks and indenting in output XML.
- version: string (optional default: '1.2draft')
- The version of GEXF to be used for nodes attrbutes checking
Examples
--------
```suggestion version: string (optional, default: '1.2draft') The version of GEXF to be used for nodes attributes checking ```
Encoding for text data.
prettyprint : bool (optional, default: True)
If True use line breaks and indenting in output XML.
+ version: string (optional, default: '1.2draft')
+ The version of GEXF to be used for nodes attributes checking
Examples
--------
|
codereview_python_data_12319
|
else:
raise DGLError('Unsupported graph data type:', type(data))
-def hetero_from_relations(rel_graphs, num_nodes_per_type=None, index_dtype='int64'):
"""Create a heterograph from graphs representing connections of each relation.
The input is a list of heterographs where the ``i``th graph contains edges of type
I feel the index type of the result graph should be inferred from the input relation graphs. What do you think?
else:
raise DGLError('Unsupported graph data type:', type(data))
+def hetero_from_relations(rel_graphs, num_nodes_per_type=None):
"""Create a heterograph from graphs representing connections of each relation.
The input is a list of heterographs where the ``i``th graph contains edges of type
|
codereview_python_data_12322
|
install_requires = [
'tornado>=4.0,<5',
'python-daemon<3.0',
- 'toml',
]
if os.environ.get('READTHEDOCS', None) == 'True':
No version specification?
install_requires = [
'tornado>=4.0,<5',
'python-daemon<3.0',
]
if os.environ.get('READTHEDOCS', None) == 'True':
|
codereview_python_data_12323
|
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def load_annotations(self, ann_file):
- """Load annotation from COCO style annotation file
Args:
ann_file (str): Path of annotation file.
Missing the last `.`
'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush')
def load_annotations(self, ann_file):
+ """Load annotation from COCO style annotation file.
Args:
ann_file (str): Path of annotation file.
|
codereview_python_data_12324
|
def test_word_with_duplicated_character_in_mixed_case(self):
self.assertIs(is_isogram("Alphabet"), False)
- def test_word_with_duplicated_character_in_mixed_case_lowercase_first(self):
self.assertIs(is_isogram("alphAbet"), False)
def test_hypothetical_isogrammic_word_with_hyphen(self):
This line is too long. In cases where the canonical test name is too long to use in its exact form, it is acceptable to reword the test name. Might I suggest `test_word_with_duplicated_letter_in_mixed_case_lowercase_first`?
def test_word_with_duplicated_character_in_mixed_case(self):
self.assertIs(is_isogram("Alphabet"), False)
+ def test_word_with_duplicated_letter_in_mixed_case_lowercase_first(self):
self.assertIs(is_isogram("alphAbet"), False)
def test_hypothetical_isogrammic_word_with_hyphen(self):
|
codereview_python_data_12329
|
def max_id(self):
return int(self.MAX_FUTURE_SECONDS + calendar.timegm(time.gmtime()))
- def fetch_listens_from_storage(*args):
""" Override this method in PostgresListenStore class """
raise NotImplementedError()
Can you explain this?
def max_id(self):
return int(self.MAX_FUTURE_SECONDS + calendar.timegm(time.gmtime()))
+ def fetch_listens_from_storage():
""" Override this method in PostgresListenStore class """
raise NotImplementedError()
|
codereview_python_data_12331
|
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
- if not title.startswith('\\') and not title.strartswith('/'):
setattr(codes, title.upper(), code)
I don't think `strartswith` is the name of this method.
for code, titles in _codes.items():
for title in titles:
setattr(codes, title, code)
+ if not if not title.startswith(('\\', '/')):
setattr(codes, title.upper(), code)
|
codereview_python_data_12340
|
ntypes = list(sorted(ntype_set))
else:
ntypes = list(sorted(num_nodes_per_type.keys()))
- num_nodes_per_type = utils.toindex([num_nodes_per_type[ntype] for ntype in ntypes])
ntype_dict = {ntype: i for i, ntype in enumerate(ntypes)}
for rgrh in rel_graphs:
if rgrh._graph.dtype != index_dtype:
raise Exception("Expect relation graphs to be {}, but got {}".format(
Does that mean we should keep all the `dgl_type_t` static casting in the code?
ntypes = list(sorted(ntype_set))
else:
ntypes = list(sorted(num_nodes_per_type.keys()))
+ num_nodes_per_type = utils.toindex([num_nodes_per_type[ntype] for ntype in ntypes], "int64")
ntype_dict = {ntype: i for i, ntype in enumerate(ntypes)}
+ index_dtype = rel_graphs[0]._graph.dtype
for rgrh in rel_graphs:
if rgrh._graph.dtype != index_dtype:
raise Exception("Expect relation graphs to be {}, but got {}".format(
|
codereview_python_data_12345
|
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
- out_file = osp.join(out_dir,
- osp.basename(img_meta['filename']))
else:
out_file = None
For different datasets, we may want to save the images differently. Sometimes the datasets are categorized into several subfolders, and we want to keep it when saving visualization results, and sometimes we just want to save all images in `out_dir`. An argument may be added to control the behavior.
img_show = mmcv.imresize(img_show, (ori_w, ori_h))
if out_dir:
+ out_file = osp.join(out_dir, img_meta['ori_filename'])
else:
out_file = None
|
codereview_python_data_12347
|
GTTGCTTCTGGCGTGGGTGGGGGGG
<BLANKLINE>
- Note some formats require a binary handle (bytes) rather than a text mode
- handle (strings), and may require you to specify the molecule type when it
- cannot be determined by the parser:
>>> from Bio import SeqIO
>>> from io import BytesIO
This sounds a bit strange. Text/binary handles and molecule types are two independent things.
GTTGCTTCTGGCGTGGGTGGGGGGG
<BLANKLINE>
+ Note some formats like SeqXML require you to specify the molecule type
+ when it cannot be determined by the parser:
>>> from Bio import SeqIO
>>> from io import BytesIO
|
codereview_python_data_12350
|
algo.register_trading_control(AssetDateBounds(on_error='fail'))
def handle_data(algo, data):
- # This should work because sid 3's is valid during the algo
- # lifetime.
algo.order(algo.sid(3), 1)
# Sid already expired.
typo `3's is` --> `3 is`
algo.register_trading_control(AssetDateBounds(on_error='fail'))
def handle_data(algo, data):
+ # This should work because sid 3 is valid during the algo lifetime.
algo.order(algo.sid(3), 1)
# Sid already expired.
|
codereview_python_data_12352
|
return len(self.masks)
def rescale(self, scale, interpolation='nearest'):
- """See `BaseInstanceMasks.rescale()`."""
if len(self.masks) == 0:
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
``` :func:`BaseInstanceMasks.rescale()` ```
return len(self.masks)
def rescale(self, scale, interpolation='nearest'):
+ """See :func:`BaseInstanceMasks.rescale()`."""
if len(self.masks) == 0:
new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)
rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)
|
codereview_python_data_12357
|
with self.assertRaisesRegex(ValueError, err):
FeatureLocation(42, 23, 1)
with self.assertRaisesRegex(ValueError, err):
FeatureLocation(BeforePosition(42), AfterPosition(23), -1)
- # Features with UnknownPositions should pass test
FeatureLocation(42, UnknownPosition())
FeatureLocation(UnknownPosition(), 42)
Include some zero positions, and also special case where start and end are equal (e.g. location of digest enzyme cutting site between two bases)?
with self.assertRaisesRegex(ValueError, err):
FeatureLocation(42, 23, 1)
+ with self.assertRaisesRegex(ValueError, err):
+ FeatureLocation(42, 0, 1)
+
with self.assertRaisesRegex(ValueError, err):
FeatureLocation(BeforePosition(42), AfterPosition(23), -1)
+ # Features with UnknownPositions should pass check
FeatureLocation(42, UnknownPosition())
FeatureLocation(UnknownPosition(), 42)
|
codereview_python_data_12369
|
Returns:
str|PathFilterScanTreeNode: a scan object, which is either
- a scan tree sub node, a path of the default value.
"""
return self._path_segments.get(path_segment, self.default_value)
I don't really undersand. "a path to the default value" maybe?
Returns:
str|PathFilterScanTreeNode: a scan object, which is either
+ a scan tree sub node, a path or the default value.
"""
return self._path_segments.get(path_segment, self.default_value)
|
codereview_python_data_12373
|
)
# multiple column selection by label
- assert frame[:, f['value', 'id']].to_list()[0] == frame[:, ['value', 'id']].to_list()[0]
# multiple column selection by position
- assert frame[:, f[-1, 0]].to_list()[0] == frame[:, ['value', 'id']].to_list()[0]
#-------------------------------------------------------------------------------
# Integer-valued `j`
There is a function `assert_equals()` for comparing frames: ``` assert_equals(frame[:, f['value', 'id']], frame[:, ['value', 'id']]) ```
)
# multiple column selection by label
+ assert_equals(frame[:, f['value', 'id']], frame[:, ['value', 'id']])
# multiple column selection by position
+ assert_equals(frame[:, f[-1, 0]], frame[:, ['value', 'id']])
#-------------------------------------------------------------------------------
# Integer-valued `j`
|
codereview_python_data_12378
|
class NAMDBINReader(base.SingleFrameReaderBase):
"""Reader for NAMD binary files.
- .. versionchanged:: 1.0.0
"""
Should be `.. versionadded:: 1.0.0.`
class NAMDBINReader(base.SingleFrameReaderBase):
"""Reader for NAMD binary files.
+ .. versionchanged:: 1.0.0.
"""
|
codereview_python_data_12382
|
plugin.UpdateChainAndProcess(parser_mediator, registry_key)
except (IOError, dfwinreg_errors.WinRegistryValueError) as exception:
parser_mediator.ProduceExtractionError(
- 'in key: {0:s} {1!s}'.format(registry_key.path, exception))
def _NormalizeKeyPath(self, key_path):
"""Normalizes a Windows Registry key path.
+ with error ?
plugin.UpdateChainAndProcess(parser_mediator, registry_key)
except (IOError, dfwinreg_errors.WinRegistryValueError) as exception:
parser_mediator.ProduceExtractionError(
+ 'in key: {0:s} error: {1!s}'.format(registry_key.path, exception))
def _NormalizeKeyPath(self, key_path):
"""Normalizes a Windows Registry key path.
|
codereview_python_data_12383
|
In case of excessive usage of the E-utilities, NCBI will attempt to contact
a user at the email address provided before blocking access to the
E-utilities.""", UserWarning)
- # Tell Entrez if we want to set a custom cache location.
- if "cache" not in params:
- params["cache"] = cache
return params
I don't think these lines are needed. IIRC, this is building a record of all the parameters to be encoding into the URL or post sent to the NCBI. They don't need to know about where we might be caching DTD files on the user's machine (and revealing this could leak information like their computer account username).
In case of excessive usage of the E-utilities, NCBI will attempt to contact
a user at the email address provided before blocking access to the
E-utilities.""", UserWarning)
return params
|
codereview_python_data_12385
|
txn_type,
field=None,
value=None,
- is_owner: bool = True,
- non_ledger_did: bool = False):
self.txn_type = txn_type
self.field = str(field) if field is not None else ''
self.value = str(value) if value is not None else ''
self.is_owner = is_owner
- self.non_ledger_did = non_ledger_did
def get_action_id(self) -> str:
return compile_action_id(txn_type=self.txn_type,
Maybe call it `need_to_be_on_ledger` to be consistent with a field name in AuthConstraint?
txn_type,
field=None,
value=None,
+ is_owner: bool = True):
self.txn_type = txn_type
self.field = str(field) if field is not None else ''
self.value = str(value) if value is not None else ''
self.is_owner = is_owner
def get_action_id(self) -> str:
return compile_action_id(txn_type=self.txn_type,
|
codereview_python_data_12396
|
@pytest.mark.parametrize(
- "fn", ["max", "min", "median", "skew", "kurt", "sem", "std", "var"]
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
This list is not the same as the list of affected operations.
@pytest.mark.parametrize(
+ "fn", ["max", "min", "median", "mean", "skew", "kurt", "sem", "std", "var"]
)
@pytest.mark.parametrize(
"numeric_only", bool_arg_values, ids=arg_keys("numeric_only", bool_arg_keys)
|
codereview_python_data_12401
|
try:
proc = next(w for w in self._pool if w.pid == pid)
except StopIteration:
- logger.critical("process with pid=%s already exited :( this will be handled elsewhere.", pid)
return
assert proc.inqW_fd not in self._fileno_to_inq
assert proc.inqW_fd not in self._all_inqueues
Should this be logged?
try:
proc = next(w for w in self._pool if w.pid == pid)
except StopIteration:
+ logger.warning("process with pid=%s already exited :( - handling this elsewhere ...", pid)
return
assert proc.inqW_fd not in self._fileno_to_inq
assert proc.inqW_fd not in self._all_inqueues
|
codereview_python_data_12407
|
(overwrite) to write to the file!
"""
- def __init__(self, win_id, parent=None):
- super().__init__(win_id, parent)
-
def _cleanup(self):
"""Clean up temporary files after the userscript finished."""
if self._cleaned_up:
You can remove the overridden `__init__`now.
(overwrite) to write to the file!
"""
def _cleanup(self):
"""Clean up temporary files after the userscript finished."""
if self._cleaned_up:
|
codereview_python_data_12409
|
shellutil.run_command(["eject", dvd])
except shellutil.CommandError as cmd_err:
if chk_err:
- raise OSUtilError("Failed to eject dvd: ret={0}".format(cmd_err.returncode))
def try_load_atapiix_mod(self):
try:
we should probably add the stdout/stderr of the command (the original code was logging errors)
shellutil.run_command(["eject", dvd])
except shellutil.CommandError as cmd_err:
if chk_err:
+
+ msg = """Failed to eject dvd: ret={0}
+ [stdout]
+ {1}
+
+ [stderr]
+ {2}
+ """.format(cmd_err.returncode, cmd_err.stdout, cmd_err.stderr)
+
+ raise OSUtilError(msg)
def try_load_atapiix_mod(self):
try:
|
codereview_python_data_12413
|
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
- @tracing.traced()
def get_tables(self, project_id, dataset_id):
"""Return BigQuery tables stored in the requested project_id.
This decorator is used for classes, use the `@tracing.trace()` instead for class methods or functions
except (errors.HttpError, HttpLib2Error) as e:
raise api_errors.ApiExecutionError(project_id, e)
+ @tracing.trace()
def get_tables(self, project_id, dataset_id):
"""Return BigQuery tables stored in the requested project_id.
|
codereview_python_data_12415
|
wikimedia tile source without warning.
This config parameter was introduced as wikimedia tile sources
- can no longer be used outside the wikimedia domain.""")
def __call__(self, **params):
self.param.set_param(**params)
```suggestion can no longer be used outside the wikimedia domain, as of April 2021.""") ```
wikimedia tile source without warning.
This config parameter was introduced as wikimedia tile sources
+ can no longer be used outside the wikimedia domain, as of
+ April 2021.""")
def __call__(self, **params):
self.param.set_param(**params)
|
codereview_python_data_12419
|
request = self.client.objects().get_media(bucket=bucket, object=obj)
downloader = http.MediaIoBaseDownload(fp, request, chunksize=chunksize)
-
- try:
- _, done = downloader.next_chunk()
- except errors.HttpError as err:
- if err.resp.status < 500:
- raise
return return_fp
I don't think this check is in the right place. You will silently swallow 5xx errors. IIUC, you'll want to retry on those. If you remove the `try .. except` wrapper here, and replace `retry_if_exception_type` with a custom function that checks the exception type and status code, I think that we should be fine.
request = self.client.objects().get_media(bucket=bucket, object=obj)
downloader = http.MediaIoBaseDownload(fp, request, chunksize=chunksize)
+ _, done = downloader.next_chunk()
return return_fp
|
codereview_python_data_12420
|
# divide in degrees for mean reducer.
if reduce_op == 'mean':
- if g._graph.number_of_etypes() > 1:
- raise NotImplementedError("Reduce op 'mean' is not supported in "
- "the new heterograph API. Use multi_update_all().")
ret_shape = F.shape(ret)
deg = g.in_degrees()
deg = F.astype(F.clamp(deg, 1, max(g.number_of_edges(), 1)), F.dtype(ret))
Users will not understand what this *new* API is referring to in the future. `"Cannot set both intra-type and inter-type reduce operators as 'mean' using update_all. Please use multi_update_all instead."`. That being said, such error should be raised inside the `update_all` function instead of here. The reason is that `gspmm` might be invoked by other high-level APIs than `update_all` and this error message will be out-of-context.
# divide in degrees for mean reducer.
if reduce_op == 'mean':
ret_shape = F.shape(ret)
deg = g.in_degrees()
deg = F.astype(F.clamp(deg, 1, max(g.number_of_edges(), 1)), F.dtype(ret))
|
codereview_python_data_12426
|
return gsddmm_internal(
g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target)
else:
- # TODO (Israt): Call reshape func
lhs_and_rhs_tuple = tuple(list(lhs_data) + list(rhs_data))
return gsddmm_internal_hetero(g, op, len(lhs_data), lhs_target,
rhs_target, *lhs_and_rhs_tuple)
What does this TODO mean?
return gsddmm_internal(
g._graph, op, lhs_data, rhs_data, lhs_target, rhs_target)
else:
+ # TODO (Israt): Call reshape_lhs_rhs() on lhs and rhs data to match their dimension
+ # and avoid broadcasting issue. Handle the case where different nodes have
+ # different dimensions, and different etypes may need different broadcasting
+ # dims for the same node.
lhs_and_rhs_tuple = tuple(list(lhs_data) + list(rhs_data))
return gsddmm_internal_hetero(g, op, len(lhs_data), lhs_target,
rhs_target, *lhs_and_rhs_tuple)
|
codereview_python_data_12427
|
else:
get_weight = lambda u, v, data: data.get(weight, 1)
- length = dict(_dijkstra(G, source, get_weight, target=target))
try:
return length[target]
It seems we don't need this dict either. We just want the one length. So we can iterate through the values from `_dijkstra` and look for the target. If it doesn't come we know to throw the exception. That saves the space for the dict and probably time too. Something like: ``` for n,length in _dijkstra(G, source, get_weight, target=target) if n == target: return length raise nx.NetworkXNoPath("node %s not reachable from %s"%(source, target)) ``` what do you think?
else:
get_weight = lambda u, v, data: data.get(weight, 1)
+ length = _dijkstra(G, source, get_weight, target=target)
try:
return length[target]
|
codereview_python_data_12430
|
"""Add a strategy into our existing strategy pool."""
self.strategy_names.add(strategy_tuple.name)
- def remove_strategy(self, strategy_tuple):
- """Remove a strategy from our existing strategy pool."""
- if strategy_tuple.name in self.strategy_names:
- self.strategy_names.remove(strategy_tuple.name)
-
def do_strategy(self, strategy_tuple):
"""Boolean value representing whether or not a strategy is in our strategy
pool."""
Discussed offline. This could end up getting a bit brittle depending on the order of how things are modified, and we don't necessarily expect everything enabled in the pool to be added to fuzzing_strategies if it causes issues.
"""Add a strategy into our existing strategy pool."""
self.strategy_names.add(strategy_tuple.name)
def do_strategy(self, strategy_tuple):
"""Boolean value representing whether or not a strategy is in our strategy
pool."""
|
codereview_python_data_12431
|
self._built = True
def feed_input(self, data_node, data, layout="", cuda_stream = None):
- """Bind a NumPy array (or a list thereof) to an output of ExternalSource.
- In the case of the GPU input, it is the user responsibility to modify the
- provided GPU memory content only using provided stream (DALI schedules
- a copy on it and all work is properly queued). If no stream is provided
- feed_input blocks until the provided memory is copied to the internal buffer
Parameters
----------
Can't we handle anything with [cuda] array interface?
self._built = True
def feed_input(self, data_node, data, layout="", cuda_stream = None):
+ """Pass a mutlidimensional array (or a list thereof) to an output of ExternalSource.
+ In the case of the GPU input, the data must be modified on the same stream as the one
+ used by feed_input. See ``cuda_stream`` parameter for details.
Parameters
----------
|
codereview_python_data_12438
|
node_subgraph
"""
if len(deprecated_kwargs) != 0:
- raise DGLError("Key word argument preserve_nodes is deprecated. "
- "Use relabel_nodes instead.")
if graph.is_block and relabel_nodes:
raise DGLError('Extracting subgraph from a block graph is not allowed.')
if not isinstance(edges, Mapping):
Suggest raise deprecation warning for this release to have a smoother transition.
node_subgraph
"""
if len(deprecated_kwargs) != 0:
+ dgl_warning(
+ "Key word argument preserve_nodes is deprecated. Use relabel_nodes instead.")
+ relabel_nodes = not deprecated_kwargs.get('preserve_nodes')
if graph.is_block and relabel_nodes:
raise DGLError('Extracting subgraph from a block graph is not allowed.')
if not isinstance(edges, Mapping):
|
codereview_python_data_12441
|
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4424-SEA 1645542746 1393056521</p>
<hr>
<p>Varnish cache server</p>
</body>
this isn't needed either
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4435-SEA 1645542746 631690005</p>
<hr>
<p>Varnish cache server</p>
</body>
|
codereview_python_data_12442
|
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)])
"""
- # TODO For some reason this fails when `ebunch` is an iterator.
- ebunch = list(ebunch)
self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch),
**attr)
What is the failure? Can we fix this before merging?
>>> G = nx.Graph() # or DiGraph, MultiGraph, MultiDiGraph, etc
>>> G.add_weighted_edges_from([(0,1,3.0),(1,2,7.5)])
"""
self.add_edges_from(((u, v, {weight: d}) for u, v, d in ebunch),
**attr)
|
codereview_python_data_12447
|
return selection
# Second Legendre polynomial
- lg2 = lambda self,x : (3*x*x - 1)/2
def run(self, **kwargs):
"""
Do not use `sys.stdout.write`: if you want to show progress, use our own `lib.log.ProgressMeter` class.
return selection
# Second Legendre polynomial
+ lg2 = lambda self, x: (3*x*x - 1)/2
def run(self, **kwargs):
"""
|
codereview_python_data_12449
|
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
- return [
- (bbox_result, segm_result)
- for bbox_result, segm_result in zip(bbox_results, segm_results)
- ]
def aug_test(self, x, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
`return list(zip(bbox_results, segm_results))` is enough.
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
+ return list(zip(bbox_results, segm_results))
def aug_test(self, x, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
|
codereview_python_data_12454
|
self.assertEqual(basename(test_data[trace]["path"][-1]).replace('.ab1', ''), record.name)
self.assertEqual(test_data[trace]['seq'], str(record.seq))
self.assertEqual(test_data[trace]['qual'], record.letter_annotations['phred_quality'])
- print(record)
self.assertEqual(test_data[trace]['sample'], record.id)
self.assertEqual(test_data[trace]['sample_well'], record.annotations['sample_well'])
self.assertEqual(test_data[trace]['machine_model'], record.annotations['machine_model'])
Avoid prints in the unit tests.
self.assertEqual(basename(test_data[trace]["path"][-1]).replace('.ab1', ''), record.name)
self.assertEqual(test_data[trace]['seq'], str(record.seq))
self.assertEqual(test_data[trace]['qual'], record.letter_annotations['phred_quality'])
self.assertEqual(test_data[trace]['sample'], record.id)
self.assertEqual(test_data[trace]['sample_well'], record.annotations['sample_well'])
self.assertEqual(test_data[trace]['machine_model'], record.annotations['machine_model'])
|
codereview_python_data_12455
|
else:
self.sources = source
if not self.sim_params:
self.sim_params = create_simulation_parameters(
start=start,
We may want to take a step back and consider whether or not `run` should still supporting having the `start` and `end` paramaters. One thing to note is that if `run` is supporting multiple runs, e.g. on a DataFrame, all runs would share the same sim_params if this logic is used. Should this logic be changed so that this `self.sim_params` is in the `__init__` and then if `start` and `end` are specified a new `sim_params` is created? The specifying parameters and of `start` and `end` seems at odds with the concept of simulation parameters.
else:
self.sources = source
+ if sim_params:
+ self.sim_params = sim_params
+
if not self.sim_params:
self.sim_params = create_simulation_parameters(
start=start,
|
codereview_python_data_12460
|
)
include_ancestors = (
expr_type is s_types.ExprType.Insert
- or include_descendants
)
return irtyputils.type_to_typeref(
schema,
It's probably cleaner to restate DML types here in case `include_descendants` deviates from the current set of conditions.
)
include_ancestors = (
expr_type is s_types.ExprType.Insert
+ or expr_type is s_types.ExprType.Update
+ or expr_type is s_types.ExprType.Delete
)
return irtyputils.type_to_typeref(
schema,
|
codereview_python_data_12466
|
def join(self, other):
"""Return a merge of the sequences in other, spaced by the sequence from self.
- Accepts Seq/UnknownSeq objects and Strings as objects to be concatinated with
the spacer
>>> spacer = UnknownSeq(5)
- >>> seqlist = list([Seq("AAA"),Seq("TTT"),Seq("PPP")])
>>> concatenated = spacer.join(seqlist)
>>> concatenated
Seq('AAA?????TTT?????PPP')
This can be one line, can't it?
def join(self, other):
"""Return a merge of the sequences in other, spaced by the sequence from self.
+ Accepts Seq/UnknownSeq objects and Strings as objects to be concatenated with
the spacer
>>> spacer = UnknownSeq(5)
+ >>> seqlist = [Seq("AAA"), Seq("TTT"), Seq("PPP")]
>>> concatenated = spacer.join(seqlist)
>>> concatenated
Seq('AAA?????TTT?????PPP')
|
codereview_python_data_12471
|
binding.role: bigquery_acl.role,
}
- # only compare fields that were set
- rule_regex_to_val.pop(None, None)
-
return regular_exp.all_match(rule_regex_to_val)
Remove the 'pop' here? A rule will always have a dataset and role
binding.role: bigquery_acl.role,
}
return regular_exp.all_match(rule_regex_to_val)
|
codereview_python_data_12472
|
self._read_next_timestep()
- try:
- self._get_dt()
- except OSError:
- raise ValueError("Supplied n_atoms {} is incompatible "
- "with provided trajectory file. "
- "Maybe `topology` is wrong?".format(self.n_atoms))
-
-
def _read_trz_header(self):
"""Reads the header of the trz trajectory"""
self._headerdtype = np.dtype([
It is not clear why `_get_dt()` will fail with the wrong `n_atom`. What's the stack trace and where does the ValueError come from? (I assume this relies on being able to do `self.next()`, which actually fills the `ts`.... but then why does `self._read_next_timestep()` on the preceding line work? ) This might work and fix an error but I think we should rather check sooner and in a more transparent manner if at all possible.
self._read_next_timestep()
def _read_trz_header(self):
"""Reads the header of the trz trajectory"""
self._headerdtype = np.dtype([
|
codereview_python_data_12482
|
>>> G = nx.DiGraph(nx.path_graph(4))
>>> sr = nx.simrank_matrix(G)
>>> sr
- [[ 1. 0. 0.8181254 0. ]
- [ 0. 1. 0. 0.8181254]
- [ 0.8181254 0. 1. 0. ]
- [ 0. 0.8181254 0. 1. ]]
Notes
-----
from travis tests, the reason of failure ``` Expected: [[ 1. 0. 0.8181254 0. ] [ 0. 1. 0. 0.8181254] [ 0.8181254 0. 1. 0. ] [ 0. 0.8181254 0. 1. ]] Got: array([[ 1. , 0. , 0.8181254, 0. ], [ 0. , 1. , 0. , 0.8181254], [ 0.8181254, 0. , 1. , 0. ], [ 0. , 0.8181254, 0. , 1. ]]) ```
>>> G = nx.DiGraph(nx.path_graph(4))
>>> sr = nx.simrank_matrix(G)
>>> sr
+ array([[ 1., 0., 0.8181254, 0.],
+ [ 0., 1., 0., 0.8181254],
+ [ 0.8181254, 0., 1., 0.],
+ [ 0., 0.8181254, 0., 1.]])
Notes
-----
|
codereview_python_data_12487
|
np_t[0:2, 1:3, 0:4][0:1, 0:2, 2:3])
# Slice a zero-dim tensor
- with pytest.raises(Exception):
o3c.Tensor.ones((), device=device)[:]
- with pytest.raises(Exception):
o3c.Tensor.ones((), device=device)[0:1]
`with pytest.raises(RuntimeError, match="Cannot slice a scalar (0-dim) tensor.")` (more specific exception class with error message) also below...
np_t[0:2, 1:3, 0:4][0:1, 0:2, 2:3])
# Slice a zero-dim tensor
+ with pytest.raises(RuntimeError,
+ match=r"Cannot slice a scalar \(0-dim\) tensor."):
o3c.Tensor.ones((), device=device)[:]
+ with pytest.raises(RuntimeError,
+ match=r"Cannot slice a scalar \(0-dim\) tensor."):
o3c.Tensor.ones((), device=device)[0:1]
|
codereview_python_data_12498
|
[1, 0, 0, 1, 1],
[1, 1, 0, 0, 1],
[1, 1, 1, 0, 0]], dtype=bool)
- # There's are also two nans in the independent data.
independent_nan_grid = np.array([[0],
[0],
[1],
should "both" be "either"? Dates for which the dependent or independent (or both) data is nan cannot be included in the beta calculation.
[1, 0, 0, 1, 1],
[1, 1, 0, 0, 1],
[1, 1, 1, 0, 0]], dtype=bool)
+ # There are also two nans in the independent data.
independent_nan_grid = np.array([[0],
[0],
[1],
|
codereview_python_data_12504
|
"""
with debug.log_time(log.completion, 'Setting filter pattern'):
self.pattern = val
- val = val.casefold()
val = re.escape(val)
val = val.replace(r'\ ', r'.*')
- self.patternre = re.compile(val)
self.invalidateFilter()
sortcol = 0
try:
I guess since we're using regexes now, we could simply use `re.IGNORECASE` when compiling instead of using `casefold()`.
"""
with debug.log_time(log.completion, 'Setting filter pattern'):
self.pattern = val
val = re.escape(val)
val = val.replace(r'\ ', r'.*')
+ self.pattern_re = re.compile(val, re.IGNORECASE)
self.invalidateFilter()
sortcol = 0
try:
|
codereview_python_data_12511
|
self.handle_private_mode = False
self._tmphist = None
- self.history = []
- if history:
self.history = history
def __getitem__(self, idx):
What's the reason for this change? The idiom ``` python def foo(self, bar=None): if bar is None: self.bar = [] else: self.bar = bar ``` Is commonly used in Python, and strictly speaking, your code doesn't do the same (though it doesn't matter in this case, I think) - if someone passes an empty custom container in it, `self.history` will be `[]` instead of that custom container.
self.handle_private_mode = False
self._tmphist = None
+ if history is None:
+ self.history = []
+ else:
self.history = history
def __getitem__(self, idx):
|
codereview_python_data_12512
|
logger.info("Starting to compile the library.")
cmake_cmd = ["cmake", "../compile/"]
- if opencl_python_package:
use_gpu = False
- use_mpi = False
- nomp = False
- use_hdfs = False
- cmake_cmd.append("-D__OPENCL_PYTHON_PACKAGE=ON")
if use_gpu:
cmake_cmd.append("-DUSE_GPU=ON")
if boost_root:
Again, I don't think we need this as this build option is for maintainers.
logger.info("Starting to compile the library.")
cmake_cmd = ["cmake", "../compile/"]
+ if integrated_opencl:
use_gpu = False
+ cmake_cmd.append("-D__INTEGRATE_OPENCL=ON")
if use_gpu:
cmake_cmd.append("-DUSE_GPU=ON")
if boost_root:
|
codereview_python_data_12513
|
violation_map[resource['resource']],
global_configs,
notifier_configs, None))
if notifier['name'] != 'email_violations':
chosen_pipeline = find_notifiers(notifier['name'])
notifiers.append(chosen_pipeline(
Instead of `None`, can you use `notifier.get('configuration')`, which will return `None` if `configuration` is not present? This way, you can keep most of the original code, and not create a separate block of code here. ``` if notifier['name'] != 'email_violations': chosen_pipeline = find_notifiers(notifier['name']) notifiers.append(chosen_pipeline( resource['resource'], inventory_index_id, violation_map[resource['resource']], global_configs, notifier_configs, notifier.get('configuration'))) ```
violation_map[resource['resource']],
global_configs,
notifier_configs, None))
+ # else block below is added for backward compatibility
+ else:
+ notifiers.append(
+ email_violations.EmailViolations(
+ resource['resource'],
+ inventory_index_id,
+ violation_map[resource['resource']],
+ global_configs,
+ notifier_configs,
+ notifier['configuration']))
+
if notifier['name'] != 'email_violations':
chosen_pipeline = find_notifiers(notifier['name'])
notifiers.append(chosen_pipeline(
|
codereview_python_data_12514
|
assert type(disp_uv) is float
def test_impossible_things(self):
- G=nx.davis_southern_women_graph()
disp = nx.dispersion(G)
- for d in disp:
- for dd in d:
- assert dd >= 0
Failed on python 3, and a good thing. This isn't checking what you think. ``` python for u in disp: for v in disp[u]: assert disp[u][v] >= 0 ```
assert type(disp_uv) is float
def test_impossible_things(self):
+ G=nx.karate_club_graph()
disp = nx.dispersion(G)
+ for u in disp:
+ for v in disp[u]:
+ assert disp[u][v] >= 0
|
codereview_python_data_12516
|
from multiprocessing import Process, Queue
from typing import Dict, Tuple, Union
-import localstack.utils.docker
from localstack import config
from localstack.constants import (
THUNDRA_APIKEY,
nitpick: So far, we've usually used relative (not absolute) import names. We could continue using this pattern, e.g. something like: ``` from localstack.utils import docker as docker_utils ``` (not critical at all, though - shouldn't hold back the merge)
from multiprocessing import Process, Queue
from typing import Dict, Tuple, Union
from localstack import config
from localstack.constants import (
THUNDRA_APIKEY,
|
codereview_python_data_12525
|
-def detect_anagrams(string, candidates):
pass
Your suggestion looks solid, but could you also change `string` to something more meaningful like `word`?
+def detect_anagrams(word, candidates):
pass
|
codereview_python_data_12527
|
label : string, optional
If not None, the parsed nodes will be renamed according to node
- attributes indicated by `label`. Default value: `'label'`.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
Exceptions can (and should) be rendered as class names and linked as `:exc:ValueError`.
label : string, optional
If not None, the parsed nodes will be renamed according to node
+ attributes indicated by `label`. Default value: 'label'.
destringizer : callable, optional
A destringizer that recovers values stored as strings in GML. If it
|
codereview_python_data_12531
|
]
}
"""
task_def_arn = luigi.Parameter(default=None)
Could you update the above docstring to include this parameter and a description? (I don't use ECS, so perhaps this is obvious and unnecessary)
]
}
+ :param cluster: str defining the ECS cluster to use.
+ When this is not defined it will use the default one.
+
"""
task_def_arn = luigi.Parameter(default=None)
|
codereview_python_data_12534
|
self[key] = value
def get_default_endpoint(self):
- """Get server address from the forseti_client_conf.yaml file
Returns:
str: Forseti server endpoint
To be accurate, this description should be shorter ```Get server address``` as it could either be the conf file OR the default value```
self[key] = value
def get_default_endpoint(self):
+ """Get server address.
Returns:
str: Forseti server endpoint
|
codereview_python_data_12547
|
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
"""
- config_utils.autoconfigure()
-
- self.tendermint_host = tendermint_host or bigchaindb.config['tendermint']['host']
- self.tendermint_port = tendermint_port or bigchaindb.config['tendermint']['port']
- self.endpoint = 'http://{}:{}/'.format(self.tendermint_host, self.tendermint_port)
- self.mode_list = ('broadcast_tx_async',
- 'broadcast_tx_sync',
- 'broadcast_tx_commit')
consensusPlugin = bigchaindb.config.get('consensus_plugin')
if consensusPlugin:
This code has been added to the old `core.py`. As a part of the cleanup, we decided not to add any code to the older files. So, it would be better to move this to `tendermint/lib.py`
connection (:class:`~bigchaindb.backend.connection.Connection`):
A connection to the database.
"""
consensusPlugin = bigchaindb.config.get('consensus_plugin')
if consensusPlugin:
|
codereview_python_data_12555
|
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
In this measure the weight is interpreted as the connection strength.
Returns
-------
nodes : dictionary
```suggestion In this measure the weight is interpreted as the connection strength. ``` There needs to be a blank line in between one section and the next section's header, otherwise the docstring won't be parsed properly.
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
In this measure the weight is interpreted as the connection strength.
+
Returns
-------
nodes : dictionary
|
codereview_python_data_12559
|
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
-import sys
import os
import stat
from azurelinuxagent.common.utils import shellutil
from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler
from tests.tools import *
Same nit as elsewhere, recommend alphabetical order
# http://msdn.microsoft.com/en-us/library/cc227282%28PROT.10%29.aspx
# http://msdn.microsoft.com/en-us/library/cc227259%28PROT.13%29.aspx
import os
import stat
+import sys
from azurelinuxagent.common.utils import shellutil
from azurelinuxagent.daemon.resourcedisk import get_resourcedisk_handler
from tests.tools import *
|
codereview_python_data_12560
|
'sendgrid==5.6.0',
'simple-crypt==4.1.7',
'unicodecsv==0.14.1',
- 'xxhash==1.4.1',
# Setup related.
'grpcio==1.18.0',
'grpcio-tools==1.18.0',
Have you looked into the hash functions in hashlib for python? They hash to the same value by default. Installing a new library just for hashing might not be ideal. ``` import hashlib hashlib.algorithms_available {'sha3_256', 'shake256', 'sha3_512', 'ripemd160', 'sha512', 'sha384', 'md5', 'shake_256', 'md5-sha1', 'md4', 'whirlpool', 'sha3-512', 'blake2b512', 'sha224', 'shake_128', 'sha1', 'sha3-384', 'blake2b', 'shake128', 'sha3-256', 'sha512-256', 'sha3-224', 'sha256', 'sm3', 'blake2s256', 'sha3_384', 'blake2s', 'sha512-224', 'sha3_224'} ```
'sendgrid==5.6.0',
'simple-crypt==4.1.7',
'unicodecsv==0.14.1',
# Setup related.
'grpcio==1.18.0',
'grpcio-tools==1.18.0',
|
codereview_python_data_12565
|
strethaddress_to_identifier('0xfAE4Ee59CDd86e3Be9e8b90b53AA866327D7c090'): 'CPC',
'ONE-2': 'ONE',
strethaddress_to_identifier('0xf4CD3d3Fda8d7Fd6C5a500203e38640A70Bf9577'): 'YFDAI',
- 'BTMX': 'ASD',
strethaddress_to_identifier('0xEA1ea0972fa092dd463f2968F9bB51Cc4c981D71'): 'MODEFI',
strethaddress_to_identifier('0x824a50dF33AC1B41Afc52f4194E2e8356C17C3aC'): 'KICK',
}
What's this? I don't see any BTMX asset in the DB
strethaddress_to_identifier('0xfAE4Ee59CDd86e3Be9e8b90b53AA866327D7c090'): 'CPC',
'ONE-2': 'ONE',
strethaddress_to_identifier('0xf4CD3d3Fda8d7Fd6C5a500203e38640A70Bf9577'): 'YFDAI',
+ strethaddress_to_identifier('0xcca0c9c383076649604eE31b20248BC04FdF61cA'): 'ASD',
strethaddress_to_identifier('0xEA1ea0972fa092dd463f2968F9bB51Cc4c981D71'): 'MODEFI',
strethaddress_to_identifier('0x824a50dF33AC1B41Afc52f4194E2e8356C17C3aC'): 'KICK',
}
|
codereview_python_data_12568
|
cycle_timestamp, configs, admin_api_client, dao)
])
else:
- raise inventory_errors.LoadDataPipelineError(
'Unable to inventory groups with specified arguments:\n%s',
- self.configs)
def _run_pipelines(pipelines):
"""Run the pipelines to load data.
nit: if you are raising this here, then we need to add an error handler upstream for this; otherwise this will show up as a fatal exception to user
cycle_timestamp, configs, admin_api_client, dao)
])
else:
+ raise api_errors.ApiExecutionError(
'Unable to inventory groups with specified arguments:\n%s',
+ configs)
+
+ return pipelines
def _run_pipelines(pipelines):
"""Run the pipelines to load data.
|
codereview_python_data_12569
|
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
- def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
It might not be a good idea to hardcode the shape of images in this place. Can we use img.shape?
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
+ def forward_dummy(self, imgs):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
|
codereview_python_data_12571
|
if params["local_listen_port"] == 12400:
# If local_listen_port is set to LightGBM's default value
# then we find a random open port for each worker
- worker_address_to_port = client.run(_find_random_open_port)
else:
# If another port was specified then we search for an open port
# in [local_listen_port, local_listen_port+999] for each worker
Can you please add `workers=worker_map.keys()` to this call? That would avoid running this on workers that don't hold any of the training data.
if params["local_listen_port"] == 12400:
# If local_listen_port is set to LightGBM's default value
# then we find a random open port for each worker
+ worker_address_to_port = client.run(_find_random_open_port,
+ workers=worker_map.keys())
else:
# If another port was specified then we search for an open port
# in [local_listen_port, local_listen_port+999] for each worker
|
codereview_python_data_12572
|
return PolygonMasks(resized_masks, *out_shape)
def shear(self,
- shear_matrix,
out_shape,
- fill_val=0,
- flags=cv2.INTER_NEAREST,
- borderMode=cv2.BORDER_CONSTANT):
raise NotImplementedError
def to_bitmap(self):
We also need to implement shear operation for polygon masks.
return PolygonMasks(resized_masks, *out_shape)
def shear(self,
out_shape,
+ magnitude,
+ direction='horizontal',
+ border_value=0,
+ interpolation='bilinear'):
raise NotImplementedError
def to_bitmap(self):
|
codereview_python_data_12573
|
('graph vc', 'brown', 'brown'),
('graph rps', 'dark green', 'dark green'),
('graph fail', 'dark red', 'dark red'),
- ('graph r_time', 'dark blue', 'dark blue'),
('graph lt', 'dark cyan', 'dark cyan'),
('graph cn', 'dark magenta', 'dark magenta'),
('stat-hdr', 'light gray', 'dark blue'),
Why changing string constants?
('graph vc', 'brown', 'brown'),
('graph rps', 'dark green', 'dark green'),
('graph fail', 'dark red', 'dark red'),
+ ('graph rt', 'dark blue', 'dark blue'),
('graph lt', 'dark cyan', 'dark cyan'),
('graph cn', 'dark magenta', 'dark magenta'),
('stat-hdr', 'light gray', 'dark blue'),
|
codereview_python_data_12574
|
'SELECT location FROM user_credentials UNION '
'SELECT location FROM amm_swaps',
)
- locations_raw = cursor.fetchall()
- locations = {Location.deserialize_from_db(loc[0]) for loc in locations_raw}
cursor.execute('SELECT DISTINCT type FROM amm_events')
- for event_type in cursor.fetchall():
if EventType.deserialize_from_db(event_type[0]) in (EventType.MINT_SUSHISWAP, EventType.BURN_SUSHISWAP): # noqa: E501
locations.add(Location.SUSHISWAP)
else:
you don't need fetchall here. You would end up iterating twice. ```suggestion for event_type in cursor: ```
'SELECT location FROM user_credentials UNION '
'SELECT location FROM amm_swaps',
)
+ locations = {Location.deserialize_from_db(loc[0]) for loc in cursor}
cursor.execute('SELECT DISTINCT type FROM amm_events')
+ for event_type in cursor:
if EventType.deserialize_from_db(event_type[0]) in (EventType.MINT_SUSHISWAP, EventType.BURN_SUSHISWAP): # noqa: E501
locations.add(Location.SUSHISWAP)
else:
|
codereview_python_data_12575
|
:param ppTime: the time at which PRE-PREPARE was sent
:param req: the client REQUEST
"""
- return self.default_executor(DOMAIN_LEDGER_ID, ppTime, reqs,
stateRoot, txnRoot)
def update_txn_with_extra_data(self, txn):
This is an API method defined in Plenum... Please be careful when renaming API methods
:param ppTime: the time at which PRE-PREPARE was sent
:param req: the client REQUEST
"""
+ return self.default_executer(DOMAIN_LEDGER_ID, ppTime, reqs,
stateRoot, txnRoot)
def update_txn_with_extra_data(self, txn):
|
codereview_python_data_12577
|
else:
line_width = width
- if isinstance(style, str):
linestyle = style
elif np.iterable(style):
if len(style) == len(edge_pos):
I think this logic can be simplified a bit: ```suggestion if np.iterable(style) and not isinstance(style, str): if len(style) == len(edge_pos): linestyle = style[i] else: # Cycle through styles linestyle = style[i % len(style)] else: linestyle = style ```
else:
line_width = width
+ if isinstance(style, str) or isinstance(style, tuple):
linestyle = style
elif np.iterable(style):
if len(style) == len(edge_pos):
|
codereview_python_data_12579
|
"""Return the name of this builtin function."""
raise NotImplementedError
-class BundledFunction(BuiltinFunction):
- """A builtin function that bundles multiple functions.
Parameters
----------
Why subclass `BuiltinFunction`? `BundledFunction` could be comprised of both UDF and builtin, especially when used to bundle message functions.
"""Return the name of this builtin function."""
raise NotImplementedError
+class BundledFunction(object):
+ """A utility class that bundles multiple functions.
Parameters
----------
|
codereview_python_data_12581
|
dest_lng
)
- if self.bot.config.walk == 0:
- self.speed = self.bot.config.walk_min - random() * (self.bot.config.walk_max - self.bot.config.walk_min)
- else:
- self.speed = self.bot.config.walk
self.destLat = dest_lat
self.destLng = dest_lng
I think this should be `self.speed = self.bot.config.walk_max - random() * (self.bot.config.walk_max - self.bot.config.walk_min)` ?
dest_lng
)
+ self.speed = self.bot.config.walk_max - random() * (self.bot.config.walk_max - self.bot.config.walk_min)
self.destLat = dest_lat
self.destLng = dest_lng
|
codereview_python_data_12585
|
console.logger.info("streamlink is running as root! Be careful!")
-def check_current_versions():
"""Show current installed versions"""
if args.loglevel == "debug":
# MAC OS X
maybe this method should have a different name, like log current versions? might be confusing otherwise...
console.logger.info("streamlink is running as root! Be careful!")
+def log_current_versions():
"""Show current installed versions"""
if args.loglevel == "debug":
# MAC OS X
|
codereview_python_data_12586
|
try:
validate_listen(listen, listen_type)
listens.append(listen)
- except APIBadRequest as e:
- current_app.logger.info(str(e))
return listens
this still logs the error and the raise is discarded. Should it be the other way round?
try:
validate_listen(listen, listen_type)
listens.append(listen)
+ except APIBadRequest:
+ pass
return listens
|
codereview_python_data_12590
|
def test_changed(self):
config = [
- ([1,2,3,4,4,5,5,5,6,7], [0,0,0,0,1,0,1,1,0,0]),
- ([None,None,None,None,0,0,0,None,None,1], [0,1,1,1,0,1,1,0,1,0])
]
for i, c in enumerate(config):
name = "collectd.test-db{0}.load.value".format(i + 1)
For why the test is failing: I think that that the first list of expected values should be `[0,1,1,1,0,1,0,0,1,1]`, and the 2nd should be either `[0,0,0,0,0,0,0,0,0,1]` or `[0,1,0,0,1,0,0,1,0,1]`, depending on transformNull/drawNullAsZero.
def test_changed(self):
config = [
+ [[1,2,3,4,4,5,5,5,6,7], [0,1,1,1,0,1,0,0,1,1]],
+ [[None,None,None,None,0,0,0,None,None,1], [0,0,0,0,0,0,0,0,0,1]]
]
for i, c in enumerate(config):
name = "collectd.test-db{0}.load.value".format(i + 1)
|
codereview_python_data_12596
|
# TODO: deprecate `extra_convs_on_inputs`
warnings.simplefilter('once')
warnings.warn(
- '"extra_convs_on_inputs" will be deprecated in the future,'
'Please use "add_extra_convs"', DeprecationWarning)
self.add_extra_convs = 'on_input'
else:
in the future -> in v2.9.0
# TODO: deprecate `extra_convs_on_inputs`
warnings.simplefilter('once')
warnings.warn(
+ '"extra_convs_on_inputs" will be deprecated in v2.9.0,'
'Please use "add_extra_convs"', DeprecationWarning)
self.add_extra_convs = 'on_input'
else:
|
codereview_python_data_12607
|
min_count = kwargs.pop("min_count")
if min_count != 0:
raise NotImplementedError(
- f"OmniSci' sum does not support such set of parameters: min_count={min_count}"
)
return self._agg("sum", **kwargs)
```suggestion f"OmniSci's sum does not support such set of parameters: min_count={min_count}" ``` here and in other places
min_count = kwargs.pop("min_count")
if min_count != 0:
raise NotImplementedError(
+ f"OmniSci's sum does not support such set of parameters: min_count={min_count}."
)
return self._agg("sum", **kwargs)
|
codereview_python_data_12610
|
"""
if len(args) <= 0: # pylint: disable=len-as-condition
args = sys.argv[1:]
- command, force, verbose, debug, conf_file_path, log_collector_mode = parse_args(args)
if command == "version":
version()
elif command == "help":
This seems like we're returning a lot of arguments, and might need to return more later on (with the addition of more options). Would the return type be better as a specific class, or maybe a dict?
"""
if len(args) <= 0: # pylint: disable=len-as-condition
args = sys.argv[1:]
+ command, force, verbose, debug, conf_file_path, log_collector_full_mode = parse_args(args)
if command == "version":
version()
elif command == "help":
|
codereview_python_data_12614
|
"""
Return Series as ndarray or ndarray-like depending on the dtype.
"""
-
- def values(df):
- return df.values
-
- return self._default_to_pandas(values)
def add(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
```suggestion return self.to_numpy().flatten() ```
"""
Return Series as ndarray or ndarray-like depending on the dtype.
"""
+ return self.to_numpy()
def add(self, other, level=None, fill_value=None, axis=0): # noqa: PR01, RT01, D200
"""
|
codereview_python_data_12615
|
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
- object_hook: function given to `json.loads`
Returns
-------
```suggestion object_hook : callable or None, optional (default=None) Detailed description of the argument ended with full stop. ```
What type of feature importance should be dumped.
If "split", result contains numbers of times the feature is used in a model.
If "gain", result contains total gains of splits which use the feature.
+ object_hook : callable or None, optional (default=None)
+ If not None, object_hook is a function called while parsing the json
+ string returned by the C API. It may be used to alter the json, to store
+ specific values while building the json structure. It avoids
+ walking through the structure again. It saves a significant amount
+ of time if the number of trees is huge.
+ Signature is `def object_hook(node: dict) -> dict`.
+ None is equivalent to `lambda node: node`.
+ See documentation of `json.loads` for further details.
Returns
-------
|
codereview_python_data_12617
|
import collections
import random
from tests import assert_equals
#-------------------------------------------------------------------------------
# Define namedtuple of test parameters
```python from tests import noop ... def test_ftrl_construct_wrong_inter(): with pytest.raises(TypeError) as e: noop(core.Ftrl(inter = 2)) ```
import collections
import random
from tests import assert_equals
+from tests import noop
#-------------------------------------------------------------------------------
# Define namedtuple of test parameters
|
codereview_python_data_12624
|
def test_wrong_source():
common_msg = "External Source in parallel mode (when `parallel=True`) accepts as `source` only *. Got {} instead"
- expected_error_msgs = (
common_msg.format("a callable that does not accept arguments"),
"External source callback must be a callable with 0 or 1 argument",
common_msg.format("an iterable"),
- common_msg.format("a generator function"))
assert len(disallowed_sources) == len(expected_error_msgs)
for source, error_msg in zip(disallowed_sources, expected_error_msgs):
yield raises(TypeError, error_msg)(check_source_build), source
Any reason for using tuple instead of list of the expected error msgs?
def test_wrong_source():
common_msg = "External Source in parallel mode (when `parallel=True`) accepts as `source` only *. Got {} instead"
+ expected_error_msgs = [
common_msg.format("a callable that does not accept arguments"),
"External source callback must be a callable with 0 or 1 argument",
common_msg.format("an iterable"),
+ common_msg.format("a generator function")]
assert len(disallowed_sources) == len(expected_error_msgs)
for source, error_msg in zip(disallowed_sources, expected_error_msgs):
yield raises(TypeError, error_msg)(check_source_build), source
|
codereview_python_data_12629
|
entries, where key is padded to a uniform width.
"""
- return format_pair(list(d.items()))
def format_text(text: TTextType) -> typing.Iterator[TViewLine]:
There's no need to wrap `d.items()` in `list()`.
entries, where key is padded to a uniform width.
"""
+ return format_pairs(d.items())
def format_text(text: TTextType) -> typing.Iterator[TViewLine]:
|
codereview_python_data_12636
|
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
- # TODO: support multiple images per gpu (only minor changes are needed)
samples_per_gpu = 1
if 'samples_per_gpu' in cfg.data.test:
samples_per_gpu = cfg.data.test.pop('samples_per_gpu')
Please merge with master so that `samples_per_gpu` can be specified through command line.
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
samples_per_gpu = 1
if 'samples_per_gpu' in cfg.data.test:
samples_per_gpu = cfg.data.test.pop('samples_per_gpu')
|
codereview_python_data_12641
|
"""
if not self.bundles:
@self.register('bundle',
- calendar_name=('NYSE'),
start_session=pd.Timestamp('2014', tz='UTC'),
end_session=pd.Timestamp('2014', tz='UTC'))
def _(environ,
we can drop the parens
"""
if not self.bundles:
@self.register('bundle',
+ calendar_name='NYSE',
start_session=pd.Timestamp('2014', tz='UTC'),
end_session=pd.Timestamp('2014', tz='UTC'))
def _(environ,
|
codereview_python_data_12647
|
def get_dependencies(self, cuda_version=None, idx=None):
- """Obtains dependant packages list if exists. Otherwise return None
Parameters
----------
This doc made me check, what happens if you pass `None` to `join`. Unsurprisingly, it fails :)
def get_dependencies(self, cuda_version=None, idx=None):
+ """Obtains dependant packages list if exists. Otherwise return empty string
Parameters
----------
|
codereview_python_data_12649
|
modin_df1.combine_first(modin_df2), pandas_df1.combine_first(pandas_df2)
)
- def test_corr(self):
- data = test_data_values[0]
modin_result = pd.DataFrame(data).corr()
pandas_result = pandas.DataFrame(data).corr()
df_equals(modin_result, pandas_result)
We can parametrize this test since it is testing correctness now.
modin_df1.combine_first(modin_df2), pandas_df1.combine_first(pandas_df2)
)
+ @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
+ def test_corr(self, data):
modin_result = pd.DataFrame(data).corr()
pandas_result = pandas.DataFrame(data).corr()
df_equals(modin_result, pandas_result)
|
codereview_python_data_12651
|
FuncThread(do_start).start()
LOG.info('Elasticsearch is starting for the first time, please wait..')
data['Created'] = True
- pass
result = get_domain_status(domain_name)
# record event
nit: `pass` not required here
FuncThread(do_start).start()
LOG.info('Elasticsearch is starting for the first time, please wait..')
data['Created'] = True
result = get_domain_status(domain_name)
# record event
|
codereview_python_data_12667
|
def test_check_not_empty_string_throws_on_empty_string():
with pytest.raises(errors.DataValidationError):
- validation.check_not_empty_string("")
def test_check_not_empty_string_passes_on_non_empty_string():
thrown = None
try:
- validation.check_not_empty_string("foobar")
except Exception as ex:
thrown = ex
assert thrown is None
There's a couple double quotes here, too.
def test_check_not_empty_string_throws_on_empty_string():
with pytest.raises(errors.DataValidationError):
+ validation.check_not_empty_string('')
def test_check_not_empty_string_passes_on_non_empty_string():
thrown = None
try:
+ validation.check_not_empty_string('foobar')
except Exception as ex:
thrown = ex
assert thrown is None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.