id
stringlengths 24
28
| content
stringlengths 121
2.08k
|
---|---|
codereview_python_data_8289 | build_directory=build_directory))
-def _update_environment_for_testcase(testcase, build_directory, application):
"""Update environment variables that depend on the test case."""
commands.update_environment_for_job(testcase.job_definition)
environment.set_value('JOB_NAME', testcase.job_type)
# Override app name if explicitly specified.
if application:
- environment.set_value('APP_NAME', application)
fuzzer_directory = setup.get_fuzzer_directory(testcase.fuzzer_name)
environment.set_value('FUZZER_DIR', fuzzer_directory)
application_override sounds better here. in args.application, that seems ok.
build_directory=build_directory))
+def _update_environment_for_testcase(testcase, build_directory, application_override):
"""Update environment variables that depend on the test case."""
commands.update_environment_for_job(testcase.job_definition)
environment.set_value('JOB_NAME', testcase.job_type)
# Override app name if explicitly specified.
if application:
+ environment.set_value('APP_NAME', application_override)
fuzzer_directory = setup.get_fuzzer_directory(testcase.fuzzer_name)
environment.set_value('FUZZER_DIR', fuzzer_directory) |
codereview_python_data_8299 | 'Graph has more than one node type; please specify a dict for src_nodes.')
src_nodes = {g.ntypes[0]: src_nodes}
src_node_ids = [
- utils.toindex(src_nodes.get(ntype, []), g._idtype_str).tousertensor(
- ctx=F.to_backend_ctx(g._graph.ctx))
for ntype in g.ntypes]
src_node_ids_nd = [F.to_dgl_nd(nodes) for nodes in src_node_ids]
I feel you can just use `F.copy_to(F.tensor(...), F.to_backend_ctx(...))`. If the input tensor is already on the input device it does not actually perform a copy.
'Graph has more than one node type; please specify a dict for src_nodes.')
src_nodes = {g.ntypes[0]: src_nodes}
src_node_ids = [
+ F.copy_to(F.tensor(src_nodes.get(ntype, []), dtype=g._idtype_str), \
+ F.to_backend_ctx(g._graph.ctx)) \
for ntype in g.ntypes]
src_node_ids_nd = [F.to_dgl_nd(nodes) for nodes in src_node_ids] |
codereview_python_data_8308 | # out/<build_dir>/ at time of build generation (chromium only).
path = utils.remove_prefix(path, '../../')
- # Remove /proc/self/cwd prefix if exists.
path = utils.remove_prefix(path, '/proc/self/cwd/')
# Cross-platform way to determine path absoluteness.
Can you mention that this is something bazel can add?
# out/<build_dir>/ at time of build generation (chromium only).
path = utils.remove_prefix(path, '../../')
+ # Remove /proc/self/cwd prefix added by Bazel.
path = utils.remove_prefix(path, '/proc/self/cwd/')
# Cross-platform way to determine path absoluteness. |
codereview_python_data_8316 | open_target = target_arg
else:
open_target = config.get('general', 'new-instance-open-target')
- win_id = mainwindow.get_window(via_ipc, open_target=open_target)
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
log.init.debug("Startup URL {}".format(cmd))
With the `and target_arg` you're checking if it's not the default argument, right? If so, please use `and target_arg is not None` instead.
open_target = target_arg
else:
open_target = config.get('general', 'new-instance-open-target')
+ win_id = mainwindow.get_window(via_ipc, force_target=open_target)
tabbed_browser = objreg.get('tabbed-browser', scope='window',
window=win_id)
log.init.debug("Startup URL {}".format(cmd)) |
codereview_python_data_8320 | res_handler = result_handler_plist_to_file.PlistToFile(
buildaction,
report_output,
- lock,
export_plist_path)
res_handler.print_steps = args.print_steps
Note to a future refactoring: it might scale better to have a seperate post processing step to plist and the next stage would consume the plist regardless what happened before, so no branching on the analysis type is required here.
res_handler = result_handler_plist_to_file.PlistToFile(
buildaction,
report_output,
export_plist_path)
res_handler.print_steps = args.print_steps |
codereview_python_data_8328 | super(OrderedDefaultDict, self).__init__(*args, **kwargs)
self.default_factory = default_factory
- def __getitem__(self, key):
- try:
- return super(OrderedDefaultDict, self).__getitem__(self, key)
- except KeyError:
- return self.__missing__(key)
-
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
You don't need the `__getitem__` here, because dict will already call `__missing__` if the key is not found!
super(OrderedDefaultDict, self).__init__(*args, **kwargs)
self.default_factory = default_factory
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key) |
codereview_python_data_8332 | async def _add_fact_relationships(self, criteria=None):
relationships = await self.dao.get('core_relationships', criteria)
- for r in relationships:
- r.pop('source')
- r.pop('link_id')
- if r.get('target'):
- r['target'] = (await self.dao.get('core_fact', dict(id=r.get('target'))))[0]['value']
- return relationships
\ No newline at end of file
Are you popping just to discard the data? If so, why?
async def _add_fact_relationships(self, criteria=None):
relationships = await self.dao.get('core_relationships', criteria)
+ return [dict(edge=r.get('edge'), target=(await self.dao.get('core_fact', dict(id=r.get('target'))))[0])
+ for r in relationships if r.get('target')]
\ No newline at end of file |
codereview_python_data_8340 | with self.assertRaisesMessage(CommandError, 'Course "645343" does not exist'):
management.call_command('exportcourse', 645343, stdout=StringIO(), verbosity=3)
- def test_argument_type(self):
- with self.assertRaisesMessage(CommandError, "Error: argument course_id: invalid int value: 'dgdnfgn'"):
- management.call_command('exportcourse', "dgdnfgn", stdout=StringIO(), verbosity=3)
this argument type check is a default Django feature, not a feature of the tested code, hence we don't need to test this, it's already tested at Django probably. I'll prepare a short MR know to show that type of tests we need more of here
with self.assertRaisesMessage(CommandError, 'Course "645343" does not exist'):
management.call_command('exportcourse', 645343, stdout=StringIO(), verbosity=3)
+ def test_language_preserved(self):
+ with translation.override('en'):
+ management.call_command('exportcourse', 2, stdout=StringIO())
+ self.assertEqual(translation.get_language(), 'en', "Same language") |
codereview_python_data_8345 | self.g = g
def forward(self, score):
- """Forward function."""
g = self.g.local_var()
g.edata['s'] = score
g.update_all(fn.copy_e('s', 'm'), fn.max('m', 'smax'))
I suggest preserving the pseudo code so that users could understand the process.
self.g = g
def forward(self, score):
+ """Forward function.
+
+ Pseudo-code:
+ score = dgl.EData(g, score)
+ score_max = score.dst_max() # of type dgl.NData
+ score = score - score_max # edge_sub_dst, ret dgl.EData
+ score_sum = score.dst_sum() # of type dgl.NData
+ out = score / score_sum # edge_div_dst, ret dgl.EData
+ return out.data
+ """
g = self.g.local_var()
g.edata['s'] = score
g.update_all(fn.copy_e('s', 'm'), fn.max('m', 'smax')) |
codereview_python_data_8346 | -from datetime import datetime
import logging
-
-from xml.sax.saxutils import (
- escape as xml_escape,
- quoteattr,
-)
-
import os
from bulk_update.helper import bulk_update
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required
Nit: Let's keep import statements sorted alphabetically.
import logging
import os
from bulk_update.helper import bulk_update
+from datetime import datetime
+
from django.conf import settings
from django.contrib import messages
from django.contrib.auth.decorators import login_required |
codereview_python_data_8356 | """
if type(G) == nx.Graph:
- return nx.number_of_nodes(G) == 0 or (nx.is_connected(G) and ( nx.number_of_edges(G) == nx.number_of_nodes(G)-1 ) and G.number_of_selfloops()==0)
else:
return False
The last check, `G.number_of_selfloops()==0`, is useless: a connected graph with self-loops necessarily has more than n - 1 edges. On a different note, try to format the code so that line length is at most 79 characters.
"""
if type(G) == nx.Graph:
+ return nx.number_of_nodes(G) == 0 \
+ or (nx.is_connected(G) \
+ and (nx.number_of_edges(G) == nx.number_of_nodes(G)-1 ))
else:
return False |
codereview_python_data_8358 | <h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
- <p>Details: cache-sea4441-SEA 1645544077 1005086470</p>
<hr>
<p>Varnish cache server</p>
</body>
it seems you need to keep both history node embeddings for each layer as well as aggregated history for each layer. Is it right?
<h1>Error 503 Backend is unhealthy</h1>
<p>Backend is unhealthy</p>
<h3>Guru Mediation:</h3>
+ <p>Details: cache-sea4453-SEA 1645544077 772882475</p>
<hr>
<p>Varnish cache server</p>
</body> |
codereview_python_data_8361 | }
}).to_dict()
- # Precompue pair so lat/lon, easting/northing, mapbox coord values
self.lon_range1, self.lat_range1 = (10, 30), (20, 40)
self.easting_range1, self.northing_range1 = Tiles.lon_lat_to_easting_northing(
self.lon_range1, self.lat_range1
```suggestion # Precompute pair so lat/lon, easting/northing, mapbox coord values ``` (Can't quite parse it, though!)
}
}).to_dict()
+ # Precompute pair so lat/lon, easting/northing, mapbox coord values
self.lon_range1, self.lat_range1 = (10, 30), (20, 40)
self.easting_range1, self.northing_range1 = Tiles.lon_lat_to_easting_northing(
self.lon_range1, self.lat_range1 |
codereview_python_data_8364 | "creationTimestamp": "2017-09-25T12:33:24.312-07:00",
"name": "default1",
"description": "Default network for the project",
- "selfLink": "https://www.googleapis.com/compute/v1/projects/project1/global/networks/default",
"autoCreateSubnetworks": true,
"subnetworks": [
"https://www.googleapis.com/compute/v1/projects/project1/regions/europe-west1/subnetworks/default",
nit: name should match selflink, change from default2 to thisisatest and update the expected names below.
"creationTimestamp": "2017-09-25T12:33:24.312-07:00",
"name": "default1",
"description": "Default network for the project",
+ "selfLink": "https://www.googleapis.com/compute/v1/projects/project1/global/networks/default1",
"autoCreateSubnetworks": true,
"subnetworks": [
"https://www.googleapis.com/compute/v1/projects/project1/regions/europe-west1/subnetworks/default", |
codereview_python_data_8368 | import os
from tensorflow.python.framework import test_util
-from tensorflow_addons.custom_ops.text.python import skip_gram_ops
from tensorflow_addons.custom_ops import text
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
swap those two lines?
import os
from tensorflow.python.framework import test_util
from tensorflow_addons.custom_ops import text
+from tensorflow_addons.custom_ops.text.python import skip_gram_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes |
codereview_python_data_8371 | class _KerasModel(model_lib.Model):
"""Internal wrapper class for tf.keras.Model objects."""
- def __init__(self, inner_model, dummy_batch, loss_fns, loss_weights, metrics):
# NOTE: sub-classed `tf.keras.Model`s do not have fully initialized
# variables until they are called on input. We forced that here.
```suggestion def __init__(self, inner_model, dummy_batch, loss_fns, loss_weights=None, metrics=None): ``` Lets add defaults for everything after `loss_fns`
class _KerasModel(model_lib.Model):
"""Internal wrapper class for tf.keras.Model objects."""
+ def __init__(self, inner_model, dummy_batch, loss_fns, loss_weights=None, metrics=None):
# NOTE: sub-classed `tf.keras.Model`s do not have fully initialized
# variables until they are called on input. We forced that here. |
codereview_python_data_8373 | Examples
--------
>>> G = nx.path_graph(3)
>>> is_threshold_graph(G)
True
``` >>> from networkx.algorithms.threshold import is_threshold_graph ```
Examples
--------
+ >>> from networkx.algorithms.threshold import is_threshold_graph
>>> G = nx.path_graph(3)
>>> is_threshold_graph(G)
True |
codereview_python_data_8374 | 'violation_data': violation.get('violation_data')
},
'source_id': 'FORSETI',
- 'category': violation.get('rule_name'),
- 'url': 'table:{}/id:{}'.format(
- VIOLATIONS_TABLE, violation.get('id'))
}
findings.append(finding)
return findings
Is the 'url' property required? I think we can include the id inside 'properties:' above, having a url like this seems weird.
'violation_data': violation.get('violation_data')
},
'source_id': 'FORSETI',
+ 'category': violation.get('rule_name')
}
findings.append(finding)
return findings |
codereview_python_data_8375 | init_func=init_zero_func)
kvclient.register_push_handler('data_3', add_push)
kvclient.map_shared_data(partition_book=gpb)
- id_tensor = id_tensor = F.tensor([0,2,4], F.int64)
data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32)
time.sleep(kvclient.client_id + 1)
print("add...")
assign `id_tensor` twice?
init_func=init_zero_func)
kvclient.register_push_handler('data_3', add_push)
kvclient.map_shared_data(partition_book=gpb)
data_tensor = F.tensor([[6.,6.],[6.,6.],[6.,6.]], F.float32)
time.sleep(kvclient.client_id + 1)
print("add...") |
codereview_python_data_8379 | # build a where clause to match all of the words in any order
# given the search term "a b", the WHERE clause would be:
- # ((url || title) LIKE '%a%') AND ((url || title) LIKE '%b%')
where_clause = ' AND '.join(
"(url || ' ' || title) LIKE :{} escape '\\'".format(i)
for i in range(len(words)))
Like this ```suggestion # ((url || ' ' || title) LIKE '%a%') AND ((url || ' ' || title) LIKE '%b%') ```
# build a where clause to match all of the words in any order
# given the search term "a b", the WHERE clause would be:
+ # ((url || ' ' || title) LIKE '%a%') AND
+ # ((url || ' ' || title) LIKE '%b%')
where_clause = ' AND '.join(
"(url || ' ' || title) LIKE :{} escape '\\'".format(i)
for i in range(len(words))) |
codereview_python_data_8389 | self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0")
self._assert_ext_status(protocol.report_ext_status, "success", 1)
- #Test new version available without GUID change
test_data.goal_state = test_data.goal_state.replace("<Incarnation>3<",
"<Incarnation>4<")
test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0")
You should also test the behavior of hotfix updates, as well as comparative tests for `ExtensionRollingUpgrade=false`
self._assert_handler_status(protocol.report_vm_status, "Ready", 1, "1.0.0")
self._assert_ext_status(protocol.report_ext_status, "success", 1)
+ #Test hotfix available without GUID change
test_data.goal_state = test_data.goal_state.replace("<Incarnation>3<",
"<Incarnation>4<")
test_data.ext_conf = test_data.ext_conf.replace("1.0.0", "1.1.0") |
codereview_python_data_8391 | from plenum.server.request_handlers.handler_interfaces.read_request_handler import ReadRequestHandler
-from indy_common.authorize.auth_constraints import ConstraintCreator
-from indy_node.server.pool_config import PoolConfig
-
from indy_common.authorize.auth_actions import AuthActionEdit, EDIT_PREFIX, AuthActionAdd
-from indy_common.authorize.auth_request_validator import WriteRequestValidator
-from indy_common.constants import POOL_CONFIG, CONFIG_LEDGER_ID, ACTION, AUTH_RULE, CONSTRAINT, AUTH_ACTION, OLD_VALUE, \
NEW_VALUE, AUTH_TYPE, FIELD
-from indy_node.server.request_handlers.config_req_handlers.config_write_request_handler import ConfigWriteRequestHandler
from plenum.common.exceptions import InvalidClientRequest
from plenum.common.request import Request
from plenum.server.database_manager import DatabaseManager
Is it WIP?
+from indy_common.types import ClientGetAuthRuleOperation
+from plenum.common.txn_util import get_request_data
from plenum.server.request_handlers.handler_interfaces.read_request_handler import ReadRequestHandler
from indy_common.authorize.auth_actions import AuthActionEdit, EDIT_PREFIX, AuthActionAdd
+from indy_common.constants import CONFIG_LEDGER_ID, AUTH_RULE, AUTH_ACTION, OLD_VALUE, \
NEW_VALUE, AUTH_TYPE, FIELD
from plenum.common.exceptions import InvalidClientRequest
from plenum.common.request import Request
from plenum.server.database_manager import DatabaseManager |
codereview_python_data_8397 | wmsg = "The `timeseries` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(wb.timeseries, wb.results.timeseries)
-
-def test_import_warning():
- wmsg = 'This module has been moved to'
- with pytest.warns(DeprecationWarning, match=wmsg):
- reload_module(MDAnalysis.analysis.hbonds.wbridge_analysis)
```suggestion ``` stub has now been removed
wmsg = "The `timeseries` attribute was deprecated in MDAnalysis 2.0.0"
with pytest.warns(DeprecationWarning, match=wmsg):
assert_equal(wb.timeseries, wb.results.timeseries) |
codereview_python_data_8400 | self.__dict__.update(d)
# TODO: super's setstate not called?
if "param" not in self.__dict__:
- self.param = type(self.param)(self.__class__, self=self)
class Dimensioned(LabelledData):
Seems like an oversight but I don't think the super would do anything else in this case unless I'm mistaken.
self.__dict__.update(d)
# TODO: super's setstate not called?
if "param" not in self.__dict__:
+ self.__dict__["param"] = type(self.param)(self.__class__, self=self)
class Dimensioned(LabelledData): |
codereview_python_data_8406 | """Tests for Email Factory"""
-from google.cloud.forseti.common.util.email import email_factory, mailjet_connector
from google.cloud.forseti.common.util.email import sendgrid_connector
from google.cloud.forseti.common.util.errors import InvalidInputError
from tests.unittest_utils import ForsetiTestCase
Each import should be in a separate line. ``` from google.cloud.forseti.common.util.email import email_factory from google.cloud.forseti.common.util.email import mailjet_connector ```
"""Tests for Email Factory"""
+from google.cloud.forseti.common.util.email import email_factory
+from google.cloud.forseti.common.util.email import mailjet_connector
from google.cloud.forseti.common.util.email import sendgrid_connector
from google.cloud.forseti.common.util.errors import InvalidInputError
from tests.unittest_utils import ForsetiTestCase |
codereview_python_data_8412 | """
return self._screen
- @expect_types(term=ComputableTerm, name=str)
def add(self, term, name, overwrite=False):
"""
Add a column.
I would try to re-use the error message above rather than using `expect_types` here. This will spit out a message telling the user that we expected a `ComputableTerm` but got a `BoundColumn`, which won't mean anything to most users.
"""
return self._screen
+ @expect_types(term=Term, name=str)
def add(self, term, name, overwrite=False):
"""
Add a column. |
codereview_python_data_8415 | """Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
- according to its scale.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
Also add the paper title and link.
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
+ according to its scale. The mapping rule is proposed in
+ `FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (dict): Specify RoI layer type and arguments. |
codereview_python_data_8417 | 'projects': create_tables.CREATE_PROJECT_TABLE,
'project_iam_policies': create_tables.CREATE_PROJECT_IAM_POLICIES_TABLE,
# pylint: disable=line-too-long
'raw_project_iam_policies': create_tables.CREATE_RAW_PROJECT_IAM_POLICIES_TABLE,
'raw_org_iam_policies': create_tables.CREATE_RAW_ORG_IAM_POLICIES_TABLE,
}
Can you add TODOs on the common pylint errors that you don't correct? They should be fixed in a followup PR.
'projects': create_tables.CREATE_PROJECT_TABLE,
'project_iam_policies': create_tables.CREATE_PROJECT_IAM_POLICIES_TABLE,
# pylint: disable=line-too-long
+ # TODO: Investigate improving so we can avoid the pylint disable.
'raw_project_iam_policies': create_tables.CREATE_RAW_PROJECT_IAM_POLICIES_TABLE,
'raw_org_iam_policies': create_tables.CREATE_RAW_ORG_IAM_POLICIES_TABLE,
} |
codereview_python_data_8435 | # Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
- if scope.property_entries or scope.lookup_here("__dict__"):
return scope.getset_table_cname
else:
return "0"
This repetitive special casing actually suggests that the feature should be a normal property.
# Slot descriptor for the table of attribute get & set methods.
def slot_code(self, scope):
+ if scope.property_entries:
return scope.getset_table_cname
else:
return "0" |
codereview_python_data_8439 | flags.mark_flag_as_required('organization_id')
-def main(unused_argv=None):
"""Run the scanner."""
logger = LogUtil.setup_logging(__name__)
nit: This method is getting long in the tooth, I'd suggest (at some later point) splitting this up into other functions, e.g. violation checking, generating emails, etc. Feel free to leave a TODO for this.
flags.mark_flag_as_required('organization_id')
+def main(_):
"""Run the scanner."""
logger = LogUtil.setup_logging(__name__) |
codereview_python_data_8441 | self._create_or_update_from_compiler(
self._query_compiler.concat(1, value._query_compiler), inplace=True
)
self.columns = self.columns[:-1].append(pandas.Index([key]))
else:
self.insert(loc=len(self.columns), column=key, value=value)
Why do we not include the last column of the original columns?
self._create_or_update_from_compiler(
self._query_compiler.concat(1, value._query_compiler), inplace=True
)
+ # Now that the data is appended, we need to update the column name for
+ # that column to `key`, otherwise the name could be incorrect. Drop the
+ # last column name from the list (the appended value's name and append
+ # the new name.
self.columns = self.columns[:-1].append(pandas.Index([key]))
else:
self.insert(loc=len(self.columns), column=key, value=value) |
codereview_python_data_8442 | try:
connection = socket.socket(self.address.family, socket.SOCK_STREAM)
if self.spoof_source_address:
- if os.geteuid() != 0:
- raise RuntimeError("Insufficient privileges to set socket option")
- else:
connection.setsockopt(socket.SOL_IP, 19, 1)
if self.source_address:
connection.bind(self.source_address())
connection.connect(self.address())
What happens of this is called with insufficient privileges? If this raises an error, we should just try doing that and handle the exception case if it fails (which may have other reasons, e.g. I don't think SOL_IP works in Windows).
try:
connection = socket.socket(self.address.family, socket.SOCK_STREAM)
if self.spoof_source_address:
+ try:
connection.setsockopt(socket.SOL_IP, 19, 1)
+ except socket.error as e:
+ raise exceptions.ProtocolException(
+ "Failed to spoof the source address: " + e.strerror)
if self.source_address:
connection.bind(self.source_address())
connection.connect(self.address()) |
codereview_python_data_8448 | def __init__(self, atomgroup, reference=None, select='all',
- groupselections=None, weights=None, tol_mass=0.1,
- ref_frame=0, **kwargs):
r"""Parameters
----------
atomgroup : AtomGroup or Universe
So having now looked at `get_weights`, it looks like that already does most of these checks/raises errors, so might be easier to use that rather than adding a new function. It would be useful to keep the more specific error message identifying which selection cause the error, since this wouldn't be clear from `get_weights` alone, but you could use say try/except to catch and expand on the errors when calling `get_weights`
def __init__(self, atomgroup, reference=None, select='all',
+ groupselections=None, weights=None, weights_groupselections=False,
+ tol_mass=0.1, ref_frame=0, **kwargs):
r"""Parameters
----------
atomgroup : AtomGroup or Universe |
codereview_python_data_8449 | if has_body:
properties = {}
- for stat in getattr(self.body, 'stats', []):
if isinstance(stat, FuncDefNode):
stat._properties = properties # deliberately shared between them
self.body.analyse_declarations(scope)
Calling `getattr()` always has a code smell. Is the problem here that `self.body` might have been wrapped? Because if that is the case, then it seems wrong to ignore the body.
if has_body:
properties = {}
+ for stat in self.body.stats:
if isinstance(stat, FuncDefNode):
stat._properties = properties # deliberately shared between them
self.body.analyse_declarations(scope) |
codereview_python_data_8451 | self.norm_cfg = norm_cfg
self.loss_point = build_loss(loss_point)
- fc_channels_in = in_channels + num_classes
self.fcs = nn.ModuleList()
for k in range(num_fcs):
fc = ConvModule(
- fc_channels_in,
fc_channels,
kernel_size=1,
stride=1,
fc_channels_in -> fc_in_channels
self.norm_cfg = norm_cfg
self.loss_point = build_loss(loss_point)
+ fc_in_channels = in_channels + num_classes
self.fcs = nn.ModuleList()
for k in range(num_fcs):
fc = ConvModule(
+ fc_in_channels,
fc_channels,
kernel_size=1,
stride=1, |
codereview_python_data_8455 | return
if conf.get_enable_overprovisioning():
- if not self.protocol.supports_overprovisioning():
- logger.verbose("Overprovisioning is enabled but protocol does not support it.")
- else:
- artifacts_profile = self.protocol.get_artifacts_profile()
- if artifacts_profile and artifacts_profile.is_on_hold():
- logger.info("Extension handling is on hold")
- return
wait_until = datetime.datetime.utcnow() + datetime.timedelta(minutes=DEFAULT_EXT_TIMEOUT_MINUTES)
max_dep_level = max([handler.sort_key() for handler in self.ext_handlers.extHandlers])
maybe instead add a supports_overprovisioning() method to the protocol class? and a warning if the setting is true but the protocol doesn't support it? thanks
return
if conf.get_enable_overprovisioning():
+ artifacts_profile = self.protocol.get_artifacts_profile()
+ if artifacts_profile and artifacts_profile.is_on_hold():
+ logger.info("Extension handling is on hold")
+ return
wait_until = datetime.datetime.utcnow() + datetime.timedelta(minutes=DEFAULT_EXT_TIMEOUT_MINUTES)
max_dep_level = max([handler.sort_key() for handler in self.ext_handlers.extHandlers]) |
codereview_python_data_8458 | Call Sphinx command with hard-coded "html" target
"""
# Copy docs files from Open3D-ML repo
- OPEN3D_ML_ROOT = os.environ.get("OPEN3D_ML_ROOT", "../../Open3D-ML")
if os.path.isdir(OPEN3D_ML_ROOT):
shutil.copy(os.path.join(OPEN3D_ML_ROOT, "docs", "tensorboard.md"),
self.current_file_dir)
Since we have `self.current_file_dir`, shall we make `../../Open3D-ML` relative to current file_dir?
Call Sphinx command with hard-coded "html" target
"""
# Copy docs files from Open3D-ML repo
+ OPEN3D_ML_ROOT = os.environ.get(
+ "OPEN3D_ML_ROOT",
+ os.path.join(self.current_file_dir, "../../Open3D-ML"))
if os.path.isdir(OPEN3D_ML_ROOT):
shutil.copy(os.path.join(OPEN3D_ML_ROOT, "docs", "tensorboard.md"),
self.current_file_dir) |
codereview_python_data_8464 | -from pontoon.checks import DB_EXCLUDE_LIBRARIES
from pontoon.checks.models import Warning, Error
-def get_failed_checks_db_objects(translation, failed_checks, excluded=None):
"""
Return model instances of Warnings and Errors
:arg Translation translation: instance of translation
:arg dict failed_checks: dictionary with failed checks
- :arg tuple exclude_libraries:
"""
warnings = []
errors = []
You don't use this argument (and it's called differently in the function signature).
+from pontoon.checks import DB_LIBRARIES
from pontoon.checks.models import Warning, Error
+def get_failed_checks_db_objects(translation, failed_checks):
"""
Return model instances of Warnings and Errors
:arg Translation translation: instance of translation
:arg dict failed_checks: dictionary with failed checks
"""
warnings = []
errors = [] |
codereview_python_data_8466 | val=pgast.NullConstant())
assert query.larg and query.rarg
query.rarg.target_list.append(restarget)
nullref = pgast.ColumnRef(name=[alias], nullable=True)
Is this for the `emptyrel` side of `optional_wrapping`, if so, it probably makes sense to to check for a `NullRelation` explicitly to be safer here.
val=pgast.NullConstant())
assert query.larg and query.rarg
+ assert (
+ isinstance(query.rarg, pgast.SelectStmt)
+ and isinstance(query.rarg.from_clause[0], pgast.RelRangeVar)
+ and isinstance(
+ query.rarg.from_clause[0].relation, pgast.NullRelation)
+ )
query.rarg.target_list.append(restarget)
nullref = pgast.ColumnRef(name=[alias], nullable=True) |
codereview_python_data_8471 | def __call__(self):
return [
- np.array([-1, -2, -3, -4], dtype = self._data_type),
- np.array([99, 2, -10, 10], dtype = self._data_type)]
def valid_axes(self):
return [None, 0]
```suggestion input, keep_dims = keep_dims, axes = axes) ``` Just a note, you don't have to specify the device, since it will be inferred from the input
def __call__(self):
return [
+ np.array([ 1, 2, 3, 4], dtype = self._data_type),
+ np.array([99, 2, 10, 10], dtype = self._data_type)]
def valid_axes(self):
return [None, 0] |
codereview_python_data_8472 | from data import *
def parse_args():
- parser = argparse.ArgumentParser(description='Train Faster-RCNN networks e2e.')
parser.add_argument('--image', type=str, default='',
help="The image for scene graph extraction.")
parser.add_argument('--gpu', type=str, default='',
Is this a script just for detection or scene graph extraction? Also the Faster-RCNN net is not really trained right? Look like it is pre-trained instead.
from data import *
def parse_args():
+ parser = argparse.ArgumentParser(description='Demo of Scene Graph Extraction.')
parser.add_argument('--image', type=str, default='',
help="The image for scene graph extraction.")
parser.add_argument('--gpu', type=str, default='', |
codereview_python_data_8478 | elif self._config.batch_mode == 'family':
return family
elif self._config.batch_mode == 'unbatched_params':
- param_str = ', '.join(f'{k}={v}' for k, v in six.iteritems(unbatched_args))
- return f'{family}({param_str})'
else:
raise ValueError('Unknown batch mode for batch notifier: {}'.format(
self._config.batch_mode))
weren't f-strings added in Python 3.6? If so, are we also removing support for 3.4 and 3.5?
elif self._config.batch_mode == 'family':
return family
elif self._config.batch_mode == 'unbatched_params':
+ param_str = ', '.join('{}={}'.format(k,v) for k, v in six.iteritems(unbatched_args))
+ return '{}({})'.format(family, param_str)
else:
raise ValueError('Unknown batch mode for batch notifier: {}'.format(
self._config.batch_mode)) |
codereview_python_data_8488 | guiclass="ArgumentsPanel", testclass="Arguments")
@staticmethod
- def _get_http_request(url, label, method, timeout, body, keepalive, files=(), encoding=None,
- redirect_follow=True, redirect_auto=False):
"""
Generates HTTP request
:type method: str
Do we really need two options? Aren't they same thing and nobody understand the difference?
guiclass="ArgumentsPanel", testclass="Arguments")
@staticmethod
+ def _get_http_request(url, label, method, timeout, body, keepalive, files=(), encoding=None, follow_redirects=True):
"""
Generates HTTP request
:type method: str |
codereview_python_data_8496 | def get_main_container_name():
- cmd = "docker inspect -f '{{ .Name }}' %s | cut -c 2-" % config.HOSTNAME
- return run(cmd).strip()
def setup_logging():
Instead of piping to `cut` here, can we do the string manipulation directly in Python - something like: ``` cmd = "docker inspect -f '{{ .Name }}' %s" % config.HOSTNAME return run(cmd).strip().lstrip('/') ```
def get_main_container_name():
+ cmd = "docker inspect -f '{{ .Name }}' %s" % config.HOSTNAME
+ return run(cmd).strip().lstrip('/')
def setup_logging(): |
codereview_python_data_8500 | new_directives_out.update(new_directives)
return new_directives_out
-def get_conversion_utility_code_directives(directives):
- directives_out = dict(_directive_defaults)
- allowed_directives = (
- 'binding', 'always_allow_keywords', 'allow_none_for_extension_args',
- 'auto_pickle', 'ccomplex',
- 'c_string_type', 'c_string_encoding')
- for k,v in directives.items():
- if k in allowed_directives or k.startswith('optimize.'):
- directives_out[k] = v
- return directives_out
-
# Declare compiler directives
_directive_defaults = {
'binding': True, # was False before 3.0
I would suggest a) moving the names into a global (list) constant, next to the defaults and scopes, since those three are what we need to adapt together and consistently as part of future changes in the directives. b) make the list complete, including the spelled out `optimize` names, to make it more visible that they are part of what is being copied and make them show up in name searches. c) just iterating over the name list and not the items, since the list to copy is much smaller than the directives dict.
new_directives_out.update(new_directives)
return new_directives_out
# Declare compiler directives
_directive_defaults = {
'binding': True, # was False before 3.0 |
codereview_python_data_8502 | score_factors (Tensor): The factors multiplied to scores before
applying NMS.
return_inds (bool): Whether return the indices of kept bboxes.
Returns:
tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5), \
(bool, optional): xxx, Defaults to False.
score_factors (Tensor): The factors multiplied to scores before
applying NMS.
return_inds (bool): Whether return the indices of kept bboxes.
+ Default False.
Returns:
tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5), \ |
codereview_python_data_8503 | func_details.envvars["AWS_LAMBDA_FUNCTION_HANDLER"] = main_file
func_details.envvars["AWS_LAMBDA_EVENT_BODY"] = json.dumps(json_safe(event))
else:
- func_details = {}
- func_details.envvars = {
- "AWS_LAMBDA_FUNCTION_HANDLER": main_file,
- "AWS_LAMBDA_EVENT_BODY": json.dumps(json_safe(event)),
- }
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details)
Thanks for the fix in this PR @pinzon ! Assigning an attribute to a dict will likely not work (raising `AttributeError`). I think we can safely assume that `func_details` should be passed here (`execute_go_lambda(..)` is only called from one place currently). I think we can simply add a `LOG.warning(...)` message in the `else` branch here.. Thanks
func_details.envvars["AWS_LAMBDA_FUNCTION_HANDLER"] = main_file
func_details.envvars["AWS_LAMBDA_EVENT_BODY"] = json.dumps(json_safe(event))
else:
+ LOG.warn("Function details are empty")
LOG.info(cmd)
result = self._execute_in_custom_runtime(cmd, func_details=func_details) |
codereview_python_data_8506 | return getattr(self, self.norm_name)
def init_weights(self):
- nonlinearity = self.act_cfg['type'] if self.with_activation else 'relu'
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0)
nonlinearity can only be `relu` or `leaky_relu`.
return getattr(self, self.norm_name)
def init_weights(self):
+ if self.with_activation and self.act_cfg['type'] == 'leaky_relu':
+ nonlinearity = 'leaky_relu'
+ else:
+ nonlinearity = 'relu'
kaiming_init(self.conv, nonlinearity=nonlinearity)
if self.with_norm:
constant_init(self.norm, 1, bias=0) |
codereview_python_data_8515 | self.fail('KeyError should be caught')
def test_get_coord_axes_bad_dims(self):
- '''Test that ValueError is raised when
numpy array with incorrect dimensions
- is fed to get_coord_axes().'''
with self.assertRaises(ValueError):
PSA.get_coord_axes(np.zeros((5,5,5,5)))
Have each of these line in a different `with` block, or in a different test. The way it is now makes it difficult to now which one failed if there is a failure.
self.fail('KeyError should be caught')
def test_get_coord_axes_bad_dims(self):
+ """Test that ValueError is raised when
numpy array with incorrect dimensions
+ is fed to get_coord_axes()."""
with self.assertRaises(ValueError):
PSA.get_coord_axes(np.zeros((5,5,5,5))) |
codereview_python_data_8521 | self.disconnect()
"""
- def __init__(self, server_address=None, source_address=None):
super(ServerConnectionMixin, self).__init__()
- self.server_conn = ServerConnection(server_address, source_address)
self.__check_self_connect()
def __check_self_connect(self):
Can we take this out of the constructor here and just use the config value? (This would also make the other proxy mode cases obsolete)
self.disconnect()
"""
+ def __init__(self, server_address=None):
super(ServerConnectionMixin, self).__init__()
+ self.server_conn = ServerConnection(server_address)
self.__check_self_connect()
def __check_self_connect(self): |
codereview_python_data_8524 | # handle empty file gracefully
return
if marker != b"ABIF":
- raise IOError('File should start with ABIF, not %r' % marker)
# dirty hack for handling time information
times = {'RUND1': '', 'RUND2': '', 'RUNT1': '', 'RUNT2': '', }
I think the original is OK here, and the improved wording does not justify changing the exception and the small chance of breaking someone's script it is matched on the message.
# handle empty file gracefully
return
if marker != b"ABIF":
+ raise IOError('File should start ABIF, not %r' % marker)
# dirty hack for handling time information
times = {'RUND1': '', 'RUND2': '', 'RUNT1': '', 'RUNT2': '', } |
codereview_python_data_8526 | test_helpers.patch(self, ['metrics.logs.log_warn'])
# Pass-through logs just so we can see what's going on (but moving from
# log_warn to plain log to avoid creating a loop)
- self.mock.log_warn.side_effect = metrics.logs.log
environment.set_value('FUZZ_TARGET', 'example_fuzzers/overflow_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia')
nit: please use single quotes for strings. another instance on lines 883 and 885.
test_helpers.patch(self, ['metrics.logs.log_warn'])
# Pass-through logs just so we can see what's going on (but moving from
# log_warn to plain log to avoid creating a loop)
+ self.mock.log_warn.side_effect = logs.log
environment.set_value('FUZZ_TARGET', 'example_fuzzers/overflow_fuzzer')
environment.set_value('JOB_NAME', 'libfuzzer_asan_fuchsia') |
codereview_python_data_8527 | import os.path as osp
-import random
import mmcv
import numpy as np
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls
It's better to change `random` to `np.random` since the latter one is the conventional one used in mmdet and it's more compatible with numpy arrays.
import os.path as osp
import mmcv
import numpy as np
+from numpy import random
from torch.utils.data import Dataset
from mmdet.core import eval_map, eval_recalls |
codereview_python_data_8529 | -import asyncio
from argparse import ArgumentParser
import numpy as np
import requests
-from mmdet.apis import (async_inference_detector, inference_detector,
- init_detector, show_result_pyplot)
def parse_args():
Address and port of the inference server.
from argparse import ArgumentParser
import numpy as np
import requests
+from mmdet.apis import inference_detector, init_detector, show_result_pyplot
def parse_args(): |
codereview_python_data_8530 | from .layers import *
from .metrics import *
from .losses import *
import time
Instead of an extra param, could you just allow tmp_name to be (optionally) an absolute path?
from .layers import *
from .metrics import *
from .losses import *
+from .fp16 import *
import time |
codereview_python_data_8534 | if path is infer:
path = os.getenv('HYPOTHESIS_DATABASE_FILE')
if path is not None: # pragma: no cover
- # Note: for Hypothesis 4, there should still be a deprecation
- # warning (noting that the var is ignored) to ease debugging for
- # anyone using it and migrating to a new version.
note_deprecation(
'The $HYPOTHESIS_DATABASE_FILE environment variable is '
'deprecated, and will be ignored by a future version of '
As per discussion before (depending on timeline) this probably won't be true in Hypothesis 4 - we'll still respect the environment variable.
if path is infer:
path = os.getenv('HYPOTHESIS_DATABASE_FILE')
if path is not None: # pragma: no cover
+ # Note: we should retain an explicit deprecation warning for a
+ # further period after this is removed, to ease debugging for
+ # anyone migrating to a new version.
note_deprecation(
'The $HYPOTHESIS_DATABASE_FILE environment variable is '
'deprecated, and will be ignored by a future version of ' |
codereview_python_data_8540 | if not self.config.hatch_eggs:
return
- self.api.get_inventory()
- response_dict = self.api.call()
inv = {}
incubators = []
eggs = []
Please, use self.bot.get_inventory() - it is cached version of it.
if not self.config.hatch_eggs:
return
+ response_dict = self.bot.get_inventory()
inv = {}
incubators = []
eggs = [] |
codereview_python_data_8547 | path = utils.expand_windows_drive(path)
# Drive dependent working directories are not supported, e.g.
# E:filename is invalid
- if re.fullmatch(r'[A-Z]:[^\\]', path, re.IGNORECASE):
return None
# Paths like COM1, ...
# See https://github.com/qutebrowser/qutebrowser/issues/82
This should be `re.search` with a `^` anchor added to the regex, as what we want here is really any path starting with something like `E:`.
path = utils.expand_windows_drive(path)
# Drive dependent working directories are not supported, e.g.
# E:filename is invalid
+ if re.search(r'^[A-Z]:[^\\]', path, re.IGNORECASE):
return None
# Paths like COM1, ...
# See https://github.com/qutebrowser/qutebrowser/issues/82 |
codereview_python_data_8548 | result = response['result']
if mode == self.mode_commit:
check_tx_code = result.get('check_tx', {}).get('code', 0)
- deliver_tx_code = result['deliver_tx'].get('code', 0)
error_code = check_tx_code or deliver_tx_code
else:
error_code = result.get('code', 0)
Can we please have a test case for this response object
result = response['result']
if mode == self.mode_commit:
check_tx_code = result.get('check_tx', {}).get('code', 0)
+ deliver_tx_code = result.get('deliver_tx', {}).get('code', 0)
error_code = check_tx_code or deliver_tx_code
else:
error_code = result.get('code', 0) |
codereview_python_data_8549 | default=[100, 300, 1000],
help='proposal numbers, only used for recall evaluation')
parser.add_argument(
- '--class_wise', action='store_true', help='whether eval class wise ap')
args = parser.parse_args()
- coco_eval(args.result, args.types, args.ann, args.max_dets,
- args.class_wise)
if __name__ == '__main__':
We can omit the underscore and just use `classwise`.
default=[100, 300, 1000],
help='proposal numbers, only used for recall evaluation')
parser.add_argument(
+ '--classwise', action='store_true', help='whether eval class wise ap')
args = parser.parse_args()
+ coco_eval(args.result, args.types, args.ann, args.max_dets, args.classwise)
if __name__ == '__main__': |
codereview_python_data_8559 | self.assertEqual(string_table, expected_string_table)
- def testStringParsing(self):
- """Test parsing an ASCII string."""
self.assertEqual(binary.ReadUTF16(self._ascii_string_1), 'Whatis')
self.assertEqual(binary.ReadUTF16(self._ascii_string_2), 'What is this?')
name of function and docstring don't seem to match what is being tested
self.assertEqual(string_table, expected_string_table)
+ def testReadUTF16(self):
+ """Test reading a UTF-16 string."""
self.assertEqual(binary.ReadUTF16(self._ascii_string_1), 'Whatis')
self.assertEqual(binary.ReadUTF16(self._ascii_string_2), 'What is this?') |
codereview_python_data_8569 | messages = self.consumer.get_messages(count=CASSANDRA_BATCH_SIZE, block=True, timeout=KAFKA_READ_TIMEOUT)
for message in messages:
- print(message.message.value)
try:
data = ujson.loads(message.message.value)
listens.append(Listen.from_json(data))
I guess we don't want this here when we run it
messages = self.consumer.get_messages(count=CASSANDRA_BATCH_SIZE, block=True, timeout=KAFKA_READ_TIMEOUT)
for message in messages:
try:
data = ujson.loads(message.message.value)
listens.append(Listen.from_json(data)) |
codereview_python_data_8572 | """Scanner for the GroupsSettings rules engine."""
from google.cloud.forseti.common.gcp_type import groups_settings
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.scanner.audit import groups_settings_rules_engine
I recommend json.dumps with sort_keys=True instead of str() because that ensures that the output is always consistently ordered, which helps with tests and consistency in reported violations for cscc.
"""Scanner for the GroupsSettings rules engine."""
+import json
+
from google.cloud.forseti.common.gcp_type import groups_settings
from google.cloud.forseti.common.util import logger
from google.cloud.forseti.scanner.audit import groups_settings_rules_engine |
codereview_python_data_8575 | PUBSUB_PLATFORMS = ['linux']
-MEMORY_SAFE_LANGUAGES = ['go', 'java', 'python', 'rust']
OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT = 1.0
OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT = 0.2
Since there are fewer memory unsafe languages than memory safe ones, why don't we just use that instead?
PUBSUB_PLATFORMS = ['linux']
+MEMORY_SAFE_LANGUAGES = {'go', 'java', 'python', 'rust'}
OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT = 1.0
OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT = 0.2 |
codereview_python_data_8576 | return Rank(self, method=method, ascending=ascending, mask=mask)
@expect_types(
- target=ComputableTerm,
correlation_length=int,
mask=(Filter, NotSpecifiedType),
)
I would probably put `(Factor, Slice)` here rather than `ComputableTerm`. We shouldn't accept a Filter or a Classifier here.
return Rank(self, method=method, ascending=ascending, mask=mask)
@expect_types(
+ target=(Factor, Slice),
correlation_length=int,
mask=(Filter, NotSpecifiedType),
) |
codereview_python_data_8581 | # Each fetch will take place in its own thread, since it's naturally
# parallel work.
- nodes = []
patterns = []
for v in nodes_or_patterns:
if isinstance(v, basestring):
patterns.append(v)
else:
- nodes.append(v)
results = []
for store in self.remote_stores:
is this used anywhere?
# Each fetch will take place in its own thread, since it's naturally
# parallel work.
patterns = []
for v in nodes_or_patterns:
if isinstance(v, basestring):
patterns.append(v)
else:
+ patterns.append(node.path)
results = []
for store in self.remote_stores: |
codereview_python_data_8584 | if make_url(self.get_connection_string()).drivername == \
'sqlite+pysqlite':
# FIXME: workaround for locking errors
- # FIXME: why is the connection used by multiple threads
- # is that a problem ??? do we need some extra locking???
engine = sqlalchemy.create_engine(self.get_connection_string(),
encoding='utf8',
- connect_args={'timeout': 600,
- 'check_same_thread': False},
poolclass=NullPool)
else:
engine = sqlalchemy.create_engine(self.get_connection_string(),
```suggestion # FIXME: why is the connection used by multiple threads ```
if make_url(self.get_connection_string()).drivername == \
'sqlite+pysqlite':
# FIXME: workaround for locking errors
engine = sqlalchemy.create_engine(self.get_connection_string(),
encoding='utf8',
+ connect_args={'timeout': 600},
poolclass=NullPool)
else:
engine = sqlalchemy.create_engine(self.get_connection_string(), |
codereview_python_data_8587 | def test_characters_of_specific_groups():
st = characters(whitelist_categories=('Lu', 'Nd'))
- minimal(st, lambda c: unicodedata.category(c) == 'Lu')
- minimal(st, lambda c: unicodedata.category(c) == 'Nd')
assert_no_examples(
st, lambda c: unicodedata.category(c) not in ('Lu', 'Nd'))
This is an existence claim which doesn't have any assertions about the return value, so perhaps it should use find_any instead of minimal?
def test_characters_of_specific_groups():
st = characters(whitelist_categories=('Lu', 'Nd'))
+ find_any(st, lambda c: unicodedata.category(c) == 'Lu')
+ find_any(st, lambda c: unicodedata.category(c) == 'Nd')
assert_no_examples(
st, lambda c: unicodedata.category(c) not in ('Lu', 'Nd')) |
codereview_python_data_8591 | # Copyright (C) 2016 Arthur Gervais, Ken LE PRADO, Sébastien Mainand
-from scapy.all import *
# TODO: implement serial specific function codes
Is that really useful ? Wouldn't it be better to only import what you need ?
# Copyright (C) 2016 Arthur Gervais, Ken LE PRADO, Sébastien Mainand
+from scapy.packet import *
+from scapy.fields import *
+from scapy.layers.inet import *
# TODO: implement serial specific function codes |
codereview_python_data_8601 | def test_build_network_url(self):
"""Verify that we can get a url from project and network name."""
- self.assertEqual(
- fe.build_network_url(
- 'example.com:testing', 'mytestnet'),
- 'https://www.googleapis.com/compute/{}/projects/'
- 'example.com:testing/global/networks/'
- 'mytestnet'.format(fe.API_VERSION))
class ComputeFirewallAPI(ForsetiTestCase):
By convention we put the expected value first for assertEqual and the actual value second. So fe.build_network_url(..) should be the second argument instead of the first.
def test_build_network_url(self):
"""Verify that we can get a url from project and network name."""
+ self.assertEqual('https://www.googleapis.com/compute/{}/projects/'
+ 'example.com:testing/global/networks/'
+ 'mytestnet'.format(fe.API_VERSION),
+ fe.build_network_url('example.com:testing',
+ 'mytestnet'))
class ComputeFirewallAPI(ForsetiTestCase): |
codereview_python_data_8605 | implicit_limit: int = 0
inline_typeids: bool = False
inline_typenames: bool = False
- inline_shapeids: bool = True
schema_object_ids: Optional[Mapping[s_name.Name, uuid.UUID]] = None
source: Optional[edgeql.Source] = None
backend_runtime_params: Any = (
```suggestion inline_objectids: bool = True ```
implicit_limit: int = 0
inline_typeids: bool = False
inline_typenames: bool = False
+ inline_objectids: bool = True
schema_object_ids: Optional[Mapping[s_name.Name, uuid.UUID]] = None
source: Optional[edgeql.Source] = None
backend_runtime_params: Any = ( |
codereview_python_data_8607 | text.replace("\n", " ")))
def _write_multi_line(self, tag, text):
- """Write multiple lines in each GenBank record (PRIVATE)."""
# TODO - Do the line spliting while preserving white space?
max_len = self.MAX_WIDTH - self.HEADER_WIDTH
lines = self._split_multi_line(text, max_len)
Can you include the original information like this please: ```python def _write_multi_line(self, tag, text): """Write multiple lines in each GenBank record (PRIVATE). Used in the 'header' of each GenBank record. """ ```
text.replace("\n", " ")))
def _write_multi_line(self, tag, text):
+ """Write multiple lines in each GenBank record (PRIVATE).
+
+ Used in the 'header' of each GenBank record.
+ """
# TODO - Do the line spliting while preserving white space?
max_len = self.MAX_WIDTH - self.HEADER_WIDTH
lines = self._split_multi_line(text, max_len) |
codereview_python_data_8611 | script_content = fds.read()
if "con:soapui-project" in script_content:
self.log.info("SoapUI project detected")
- # TODO: don't fail if there's one test case inside the SoapUI project
- test_case = scenario_obj.get("test-case",
- ValueError("'test-case' field should be present for SoapUI projects"))
converter = SoapUIScriptConverter(self.log)
- conv_config = converter.convert_script(script_path, test_case)
conv_scenarios = conv_config["scenarios"]
- scenario_name, conv_scenario = next(iteritems(conv_scenarios))
if scenario_name not in self.engine.config["scenarios"]:
self.engine.config["scenarios"].merge({scenario_name: conv_scenario})
self.execution["scenario"] = scenario_name
What will happen if 2 test suites will have test case with this name?
script_content = fds.read()
if "con:soapui-project" in script_content:
self.log.info("SoapUI project detected")
+ test_case = scenario_obj.get("test-case", None)
converter = SoapUIScriptConverter(self.log)
+ conv_config = converter.convert_script(script_path)
conv_scenarios = conv_config["scenarios"]
+ scenario_name, conv_scenario = converter.find_soapui_test_case(test_case, conv_scenarios)
if scenario_name not in self.engine.config["scenarios"]:
self.engine.config["scenarios"].merge({scenario_name: conv_scenario})
self.execution["scenario"] = scenario_name |
codereview_python_data_8620 | folds=None, nfold=5, stratified=True, shuffle=True,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
- early_stopping_rounds=None, early_stopping_threshold=0.0,
- fpreproc=None, verbose_eval=None, show_stdv=True, seed=0,
callbacks=None, eval_train_metric=False,
return_cvbooster=False):
"""Perform the cross-validation with given parameters.
Since `lightgbm` does not enforce the use of keyword-only arguments, inserting a new argument in the middle of this signature is a breaking change. I think it's fair to say that calling a function with this many arguments with only positional arguments is unlikely, but it's not impossible. To ensure this isn't a breaking change, I think this argument should be added at the end of the signature here (and in all other places in this PR where it's currently being added in the middle of a signature).
folds=None, nfold=5, stratified=True, shuffle=True,
metrics=None, fobj=None, feval=None, init_model=None,
feature_name='auto', categorical_feature='auto',
+ early_stopping_rounds=None, fpreproc=None,
+ verbose_eval=None, show_stdv=True, seed=0,
callbacks=None, eval_train_metric=False,
return_cvbooster=False):
"""Perform the cross-validation with given parameters. |
codereview_python_data_8621 | p = remainder
continue
if i <= 1:
- # We special case so that zero is always false and 1 is
# always true which makes shrinking easier because we can
# always replace a truthy block with 1. This has the
# slightly weird property that shrinking from 2 to 1 can
`We special case so...` - sounds a bit strange, at least for me. Is that a typo?
p = remainder
continue
if i <= 1:
+ # We arrange it so that zero is always false and 1 is
# always true which makes shrinking easier because we can
# always replace a truthy block with 1. This has the
# slightly weird property that shrinking from 2 to 1 can |
codereview_python_data_8622 | def check_root():
"""Ask for confirmation if running as root when unnecessary."""
- if sys.platform == "linux":
if spell.can_use_data_path() and os.geteuid() == 0:
print("You're running Qt >= 5.10 which means qutebrowser will "
"load dictionaries from a path in your home-directory. "
Are you sure this is unneeded in bsd and mac too?
def check_root():
"""Ask for confirmation if running as root when unnecessary."""
+ if sys.platform not in "win32":
if spell.can_use_data_path() and os.geteuid() == 0:
print("You're running Qt >= 5.10 which means qutebrowser will "
"load dictionaries from a path in your home-directory. " |
codereview_python_data_8624 | find(
st.binary(min_size=100),
lambda x: assume(not finicky) and has_a_non_zero_byte(x),
- settings=settings(
- database=database,
- max_shrinks=10,
- ),
database_key=key
)
except Unsatisfiable:
No need to break over multiple lines now
find(
st.binary(min_size=100),
lambda x: assume(not finicky) and has_a_non_zero_byte(x),
+ settings=settings(database=database, max_shrinks=10),
database_key=key
)
except Unsatisfiable: |
codereview_python_data_8625 | config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
- keep_alive_ttl = TimeDeltaParameter(default=datetime.timedelta(0),
- config_path=dict(section='core', name='worker-keep-alive-ttl'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a '
Feels a little odd to maintain the legacy config format here and allow both `core -> worker-keep-alive-ttl` and `worker -> keep_alive_ttl` for a brand new config arg. I'm not against it. Just seems odd to allow. But I'll let @Tarrasch or @honnix chime in on that
config_path=dict(section='core', name='worker-ping-interval'))
keep_alive = BoolParameter(default=False,
config_path=dict(section='core', name='worker-keep-alive'))
count_uniques = BoolParameter(default=False,
config_path=dict(section='core', name='worker-count-uniques'),
description='worker-count-uniques means that we will keep a ' |
codereview_python_data_8628 | super().__init__()
self.id = id
self.start, self.finish = None, None
- self.timeout = 180
self.name = name
self.group = group
self.agents = agents
I think this should be a dynamic configuration option within the operations pane or global application configuration
super().__init__()
self.id = id
self.start, self.finish = None, None
+ self.base_timeout = 180
self.name = name
self.group = group
self.agents = agents |
codereview_python_data_8629 | # Issue #1511
# analysis_class should not raise
# a DeprecationWarning
- with no_deprecated_call():
- u = mda.Universe(PSF, DCD)
- def distance(a, b):
- return np.linalg.norm((a.centroid() - b.centroid()))
- Distances = base.analysis_class(distance)
d = Distances(u.atoms[:10], u.atoms[10:20]).run()
I prefer if the context only covers the lines we want to test and not the whole test
# Issue #1511
# analysis_class should not raise
# a DeprecationWarning
+ u = mda.Universe(PSF, DCD)
+
+ def distance(a, b):
+ return np.linalg.norm((a.centroid() - b.centroid()))
+ Distances = base.analysis_class(distance)
+
+ with no_deprecated_call():
d = Distances(u.atoms[:10], u.atoms[10:20]).run() |
codereview_python_data_8630 | kwargs = {
'dn': args.dataset, 'g': g, 'train_nid': train_nid, 'num_workers_sampler': args.num_workers_sampler,
- 'train': False, 'num_subg_norm': args.num_subg_norm, 'batch_size_norm': args.batch_size_norm,
'online': args.online, 'num_subg': args.num_subg
}
I wonder if we need this flag. Even if we need it for pre-sampling, we can set that internally in the SaintSampler classes.
kwargs = {
'dn': args.dataset, 'g': g, 'train_nid': train_nid, 'num_workers_sampler': args.num_workers_sampler,
+ 'num_subg_norm': args.num_subg_norm, 'batch_size_norm': args.batch_size_norm,
'online': args.online, 'num_subg': args.num_subg
} |
codereview_python_data_8641 | self._widget.page().findText(text, flags, wrapped_callback)
def _on_find_finished(self, find_text_result):
- """Unwrap the QWebEngineFindTextResult and pass it along."""
- self.search_match_changed.emit(find_text_result.activeMatch(),
- find_text_result.numberOfMatches())
def search(self, text, *, ignore_case=usertypes.IgnoreCase.never,
reverse=False, wrap=True, result_cb=None):
Should have a `@pyqtSlot`
self._widget.page().findText(text, flags, wrapped_callback)
def _on_find_finished(self, find_text_result):
+ """Unwrap the result, store it, and pass it along."""
+ self.current_match = find_text_result.activeMatch()
+ self.total_match_count = find_text_result.numberOfMatches()
+ log.webview.debug("Active search match: {}/{}"
+ .format(self.current_match, self.total_match_count))
+ self.search_match_changed.emit(self.current_match, self.total_match_count)
def search(self, text, *, ignore_case=usertypes.IgnoreCase.never,
reverse=False, wrap=True, result_cb=None): |
codereview_python_data_8644 | repos = list(self.base.repos._iter_disabled_with_enabled_metadata())
for repo in repos:
repo.enable()
- self.base._add_repo_to_sack(repos)
self.base._setup_excludes_includes()
try:
self.base.install(pkg_spec, strict=strict, forms=forms)
shouldn't this be translated?
repos = list(self.base.repos._iter_disabled_with_enabled_metadata())
for repo in repos:
repo.enable()
+ self.base._add_repos_to_sack(repos)
self.base._setup_excludes_includes()
try:
self.base.install(pkg_spec, strict=strict, forms=forms) |
codereview_python_data_8648 | @property
def xlim(self):
dim1_keys, _ = self.dense_keys()
return min(dim1_keys), max(dim1_keys)
@property
def ylim(self):
_, dim2_keys = self.dense_keys()
return min(dim2_keys), max(dim2_keys)
I think we should rename this to `ViewMap` as it will become the main map type in holoviews.
@property
def xlim(self):
+ if self._xlim: return self._xlim
dim1_keys, _ = self.dense_keys()
return min(dim1_keys), max(dim1_keys)
@property
def ylim(self):
+ if self._ylim: return self._ylim
_, dim2_keys = self.dense_keys()
return min(dim2_keys), max(dim2_keys) |
codereview_python_data_8650 | edge only if the triple (source node, destination node and relation)
matches one of the edges in the graph.
- This sampler samples edges without replacement by default, which means it
- returns a fixed number of batches (i.e., num_edges/batch_size). However
- it accepts a replacement argument, which can be set to True to let the
- sampler generates any number of batches.
Parameters
----------
it seems with or without replacement, the sampler always generate `num_edges/batch_size` batches.
edge only if the triple (source node, destination node and relation)
matches one of the edges in the graph.
+ This sampler samples positive edges without replacement by default, which means
+ it returns a fixed number of batches (i.e., num_edges/batch_size), and the
+ positive edges sampled will not be duplicated. However, one can explicitly
+ specify sampling with replacement (replacement = True), that the sampler will
+ generates any number of batches, and it treats each sampling of a single positive
+ edge as a standalone event.
Parameters
---------- |
codereview_python_data_8651 | f'shapes cannot be applied to '
f'{expr_stype.get_verbosename(ctx.env.schema)}'
)
- try:
- view_type = viewgen.process_view(
- stype=expr_stype, path_id=expr.path_id,
- elements=shape.elements, ctx=ctx)
- except irast.InvalidScopeConfiguration as e:
- raise errors.QueryError(e.args[0], context=shape.context) from e
return setgen.ensure_set(expr, type_override=view_type, ctx=ctx)
`InvalidScopeConfigration` gets thrown by `ScopeNode.attach_path()`, so perhaps it'll be better to invent a wrapper around it that takes source context as argument and reraise?
f'shapes cannot be applied to '
f'{expr_stype.get_verbosename(ctx.env.schema)}'
)
+ view_type = viewgen.process_view(
+ stype=expr_stype, path_id=expr.path_id,
+ elements=shape.elements, parser_context=shape.context, ctx=ctx)
return setgen.ensure_set(expr, type_override=view_type, ctx=ctx) |
codereview_python_data_8655 | # ensure that when we do the math for the blocks, the partition column
# will be read in along with a non partition column.
if columns and directory and any(col in partitioned_columns for col in columns):
- partitioned_columns = [col for col in columns if col in partitioned_columns]
columns = [col for col in columns if col not in partitioned_columns]
# If all of the columns wanted are partition columns, return an
# empty dataframe with the desired columns.
Won't this make it so that if we want columns 1 and 5 with the dataset partitioned on 1, 2, and 5, we will get 1 and 5 while pandas returns 1, 2, and 5?
# ensure that when we do the math for the blocks, the partition column
# will be read in along with a non partition column.
if columns and directory and any(col in partitioned_columns for col in columns):
columns = [col for col in columns if col not in partitioned_columns]
# If all of the columns wanted are partition columns, return an
# empty dataframe with the desired columns. |
codereview_python_data_8664 | self.use_deform = use_deform
self.switch = nn.Conv2d(
self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
- self.switch.weight.data.fill_(0)
- self.switch.bias.data.fill_(1)
self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
self.weight_diff.data.zero_()
self.pre_context = nn.Conv2d(
We may use init function in `mmcv.cnn`.
self.use_deform = use_deform
self.switch = nn.Conv2d(
self.in_channels, 1, kernel_size=1, stride=stride, bias=True)
+ constant_init(self.switch, 0, bias=1)
self.weight_diff = nn.Parameter(torch.Tensor(self.weight.size()))
self.weight_diff.data.zero_()
self.pre_context = nn.Conv2d( |
codereview_python_data_8674 | inventory_index_id = Column(String(256))
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
rule_name = Column(String(256))
rule_index = Column(Integer, default=0)
violation_data = Column(Text)
Do you think that we need to maybe call this `timestamp`? And do you think we need `violation_timestamp` and a `scanner_timestamp` so that we can tell which scanning run the violations came from? And `scanner` might not be future-proof, of course. :)
inventory_index_id = Column(String(256))
resource_id = Column(String(256), nullable=False)
resource_type = Column(String(256), nullable=False)
+ full_name = Column(String(1024))
rule_name = Column(String(256))
rule_index = Column(Integer, default=0)
violation_data = Column(Text) |
codereview_python_data_8676 | @attr.s()
-class MultipleResults(Generic[Ex_Inv], Iterable):
values = attr.ib()
def __iter__(self):
return iter(self.values)
def multiple(*args: Ex_Inv) -> MultipleResults[Ex_Inv]:
"""This function can be used to pass multiple results to the target(s) of
a rule. Just use ``return multiple(result1, result2, ...)`` in your rule.
If we use `typing.Iterable`, I don't think we need multiple inheritance from `Generic[Ex]`: ```suggestion class MultipleResults(Iterable[Ex]): ```
@attr.s()
+class MultipleResults(Generic[Ex], Iterable):
values = attr.ib()
def __iter__(self):
return iter(self.values)
+# We need to use an invariant typevar here to avoid a mypy error, as covariant
+# typevars cannot be used as parameters.
def multiple(*args: Ex_Inv) -> MultipleResults[Ex_Inv]:
"""This function can be used to pass multiple results to the target(s) of
a rule. Just use ``return multiple(result1, result2, ...)`` in your rule. |
codereview_python_data_8680 | return {'last_metrics': last_metrics + mets}
def try_save(state:Dict, path:Path=None, file:PathLikeOrBinaryStream=None):
- target = open(path / file, 'wb') if is_pathlike(file) else file
try: torch.save(state, target)
except OSError as e:
- raise Exception(f"{e}\n Can't write {path / file}. Pass an absolute writable pathlib obj `fname`.")
fastai uses `foo/bar` style and not `foo / bar` - so it mimics the actual file path. here and below.
return {'last_metrics': last_metrics + mets}
def try_save(state:Dict, path:Path=None, file:PathLikeOrBinaryStream=None):
+ target = open(path/file, 'wb') if is_pathlike(file) else file
try: torch.save(state, target)
except OSError as e:
+ raise Exception(f"{e}\n Can't write {path/file}. Pass an absolute writable pathlib obj `fname`.") |
codereview_python_data_8682 | pos : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a coordinate list or tuple. If None, then use
- circular_layout() for dim > 2 and a linear layout for dim == 1.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for
```>= 2``` instead of ```> 2```
pos : dict or None optional (default=None)
Initial positions for nodes as a dictionary with node as keys
and values as a coordinate list or tuple. If None, then use
+ circular_layout() for dim >= 2 and a linear layout for dim == 1.
weight : string or None optional (default='weight')
The edge attribute that holds the numerical value used for |
codereview_python_data_8686 | train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
load_from = 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth' # noqa
This URL is outdated, add a `TODO` to fix it.
train=dict(classes=classes),
val=dict(classes=classes),
test=dict(classes=classes))
+# TODO: Update model url after bumping to V2.0
load_from = 'https://s3.ap-northeast-2.amazonaws.com/open-mmlab/mmdetection/models/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth' # noqa |
codereview_python_data_8688 | new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
callback.before_iteration = True
callback.order = 10
return callback
I think you should update env.params after reset_parameters.
new_parameters[key] = new_param
if new_parameters:
env.model.reset_parameter(new_parameters)
+ env.params.update(new_parameters)
callback.before_iteration = True
callback.order = 10
return callback |
codereview_python_data_8689 | # frame seek within range is tested
new_frame = 91
self.dcdfile.seek(new_frame)
- assert_equal(self.dcdfile.current_frame, new_frame)
def test_seek_negative(self):
# frame seek with negative number
The `dcd_frame` is a namedtuple. You can access the coordinates also with `dcd_frame.x`
# frame seek within range is tested
new_frame = 91
self.dcdfile.seek(new_frame)
+ assert_equal(self.dcdfile.tell(), new_frame)
def test_seek_negative(self):
# frame seek with negative number |
codereview_python_data_8690 | tensor([0, 1, 0, 1, 2, 2, 3, 5, 4, 6, 3, 5, 4, 6]))
"""
if algorithm == 'bruteforce-blas':
- return _segmented_knn_graph_blas(x, k, segs)
else:
- out = knn(x, segs, x, segs, k, algorithm=algorithm)
row, col = out[1], out[0]
return convert.graph((row, col))
-def _segmented_knn_graph_blas(x, k, segs):
"""Construct multiple graphs from multiple sets of points according to
k-nearest-neighbor (KNN).
`dgl.knn` allows changing the distance function. Perhaps we should expose that option to `dgl.knn_graph` too.
tensor([0, 1, 0, 1, 2, 2, 3, 5, 4, 6, 3, 5, 4, 6]))
"""
if algorithm == 'bruteforce-blas':
+ return _segmented_knn_graph_blas(x, k, segs, dist=dist)
else:
+ out = knn(x, segs, x, segs, k, algorithm=algorithm, dist=dist)
row, col = out[1], out[0]
return convert.graph((row, col))
+def _segmented_knn_graph_blas(x, k, segs, dist='euclidean'):
"""Construct multiple graphs from multiple sets of points according to
k-nearest-neighbor (KNN). |
codereview_python_data_8696 | def set_one_var_from_string(
- name: Optional[str],
- param_type: Optional[str],
- checks: List
-) -> Optional[str]:
"""Construct code for auto config file for one param value.
Parameters
```suggestion name: str, param_type: str, checks: List[str] ) -> str: ```
def set_one_var_from_string(
+ name: str,
+ param_type: str,
+ checks: List[str]
+) -> str:
"""Construct code for auto config file for one param value.
Parameters |
codereview_python_data_8697 | """
Purpose
-Shows how to use AWS SDK for Python (Boto3) with the Amazon Elastic Compute Cloud
(Amazon EC2) API to terminate an instance and clean up additional resources.
"""
...**the** AWS SDK...
"""
Purpose
+Shows how to use the AWS SDK for Python (Boto3) with the Amazon Elastic Compute Cloud
(Amazon EC2) API to terminate an instance and clean up additional resources.
""" |
codereview_python_data_8699 | # https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
if method == 'GET':
query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True)
- allowed_overrides = {
- 'response-content-type': 'Content-Type',
- 'response-content-language': 'Content-Language',
- 'response-expires': 'Expires',
- 'response-cache-control': 'Cache-Control',
- 'response-content-disposition': 'Content-Disposition',
- 'response-content-encoding': 'Content-Encoding',
- }
- for param_name, header_name in allowed_overrides.items():
if param_name in query_map:
response.headers[header_name] = query_map[param_name][0]
nitpick: could we move these to a top-level constant `ALLOWED_HEADER_OVERRIDES`? Not critical, but it would help us reduce the amount of "spaghetti code" we already have in here. :)
# https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html
if method == 'GET':
query_map = urlparse.parse_qs(parsed.query, keep_blank_values=True)
+ for param_name, header_name in ALLOWED_HEADER_OVERRIDES.items():
if param_name in query_map:
response.headers[header_name] = query_map[param_name][0] |
codereview_python_data_8711 | Returns:
iter: An iterator of assets that match the text search.
"""
- if table is None:
- table = 'assets'
-
objects = backend.query.text_search(self.connection, search, limit=limit,
table=table)
Unnecessary if we make `table` a required parameter or if we set the default value of `table` to `assets`.
Returns:
iter: An iterator of assets that match the text search.
"""
objects = backend.query.text_search(self.connection, search, limit=limit,
table=table) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.