id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_2723
if use_mpi: cmake_cmd.append("-DUSE_MPI=ON") if use_cpp17: - cmake_cmd.append("_DUSE_CPP17=ON") if nomp: cmake_cmd.append("-DUSE_OPENMP=OFF") if use_hdfs: ```suggestion cmake_cmd.append("-DUSE_CPP17=ON") ``` if use_mpi: cmake_cmd.append("-DUSE_MPI=ON") if use_cpp17: + cmake_cmd.append("-DUSE_CPP17=ON") if nomp: cmake_cmd.append("-DUSE_OPENMP=OFF") if use_hdfs:
codereview_python_data_2726
alignments = list(FastaIO.FastaM10Iterator(StringIO(simple_example))) self.assertEqual(len(alignments), 4) self.assertEqual(len(alignments[0]), 2) - for a in alignments: - rows = (2, 3, 4, 5) - cols = (108, 64, 123, 456) - for a, rows, cols in zip(alignments, rows, cols): - self.assertEqual(rows, len(a)) - self.assertEqual(cols, a.get_alignment_length()) for r in a: print("%s %s %i" % (r.seq, r.id, r.annotations["original_length"])) # print(a.annotations) You've accidentally got a double nested for loop here, there should be just one loop with ``a``. alignments = list(FastaIO.FastaM10Iterator(StringIO(simple_example))) self.assertEqual(len(alignments), 4) self.assertEqual(len(alignments[0]), 2) + rows = (2, 3, 4, 5) + cols = (108, 64, 123, 456) + for a in (alignments, rows, cols): + self.assertEqual(rows, len(a)) + self.assertEqual(cols, a.get_alignment_length()) for r in a: print("%s %s %i" % (r.seq, r.id, r.annotations["original_length"])) # print(a.annotations)
codereview_python_data_2729
# Install Forseti. download_forseti=DOWNLOAD_FORSETI, - # if on tag, checkout latest patch to that version, else checkout current branch version checkout_forseti_version=CHECKOUT_FORSETI_VERSION, # Set ownership for Forseti conf and rules dirs Please use proper sentence, capitalize first letter and give it a period. Also keep it to 80 character length. You can set the margin in pycharm. # Install Forseti. download_forseti=DOWNLOAD_FORSETI, + # If installed on a version tag, checkout latest patch. + # Otherwise checkout originally installed version. checkout_forseti_version=CHECKOUT_FORSETI_VERSION, # Set ownership for Forseti conf and rules dirs
codereview_python_data_2731
try: return parser(self.session, track["uri"]) except IOError as err: - self.logger.debug("Failed to extract {0} streams: {1}", name, err) def _extract_from_format(self, format_): qualities = {} If it's failed to extract, shouldn't this be error not a debug? try: return parser(self.session, track["uri"]) except IOError as err: + self.logger.error("Failed to extract {0} streams: {1}", name, err) def _extract_from_format(self, format_): qualities = {}
codereview_python_data_2745
else: if name: axes.set_title(name) - else: - if title: - axes.set_title(title) if xlabel is None: axes.set_xlabel("branch length") else: axes.set_xlabel(xlabel) if ylabel is None: axes.set_ylabel("taxa") else: You could use a single "elif" line here. else: if name: axes.set_title(name) + elif title: + axes.set_title(title) + if xlabel is None: axes.set_xlabel("branch length") else: axes.set_xlabel(xlabel) + if ylabel is None: axes.set_ylabel("taxa") else:
codereview_python_data_2746
if __name__ == "__main__": - sys.exit(main()) `main()` does not have return code, we can simply do `main()` if __name__ == "__main__": + main()
codereview_python_data_2752
:param queue: The queue from which to delete the messages. :param messages: The list of messages to delete. :return: The response from SQS that contains the list of successful and failed - deletions. """ try: entries = [{ list of successful and failed... messages? :param queue: The queue from which to delete the messages. :param messages: The list of messages to delete. :return: The response from SQS that contains the list of successful and failed + message deletions. """ try: entries = [{
codereview_python_data_2754
): if not self.full_axis: num_splits = 1 result = super(PandasOnRayDataframeVirtualPartition, self).apply( func, num_splits, other_axis_partition, maintain_partitioning, **kwargs ) It seems clearer to me to switch the cases so we're not checking a negative condition. ): if not self.full_axis: num_splits = 1 + if len(self.call_queue) > 0: + self.drain_call_queue() result = super(PandasOnRayDataframeVirtualPartition, self).apply( func, num_splits, other_axis_partition, maintain_partitioning, **kwargs )
codereview_python_data_2759
if fmt != 'coo': raise TypeError( 'Tensorflow backend only supports COO format. But got %s.' % fmt) spmat = tf.SparseTensor(indices=tf.cast(tf.transpose( index[1], (1, 0)), tf.int64), values=data, dense_shape=shape) return spmat, None Add a docstring about the reason if fmt != 'coo': raise TypeError( 'Tensorflow backend only supports COO format. But got %s.' % fmt) + # tf.SparseTensor only supports int64 indexing, + # therefore manually casting to int64 when input in int32 spmat = tf.SparseTensor(indices=tf.cast(tf.transpose( index[1], (1, 0)), tf.int64), values=data, dense_shape=shape) return spmat, None
codereview_python_data_2770
Notes ----- Unbatching will break each field tensor of the batched graph into smaller - partitions. This is usually wasteful. For simpler tasks such as node/edge state aggregation, try to use readout functions. Maybe remove "This is usually wasteful"? People might just want to unbatch the graph. This makes them wonder what is the better solution. Notes ----- Unbatching will break each field tensor of the batched graph into smaller + partitions. For simpler tasks such as node/edge state aggregation, try to use readout functions.
codereview_python_data_2787
def _imptcs_to_numpy(X, impcts_dict): cols = ['Column_' + str(i) for i in range(X.shape[1])] - imptcs = [] - for col in cols: - imptcs.append(impcts_dict.get(col, 0.)) return np.array(imptcs) X, y = load_breast_cancer(True) Now after removing `try`/`except` logic, it can be rewritten with list comprehension for better efficiency and readability: ``` imptcs = [impcts_dict.get(col, 0.) for col in cols] ``` def _imptcs_to_numpy(X, impcts_dict): cols = ['Column_' + str(i) for i in range(X.shape[1])] + imptcs = [impcts_dict.get(col, 0.) for col in cols] return np.array(imptcs) X, y = load_breast_cancer(True)
codereview_python_data_2807
p(v, u) = \frac{w_{v, u}}{\sum_{u', (v, u') \in E} w_{v, u'}} - If a str is given, the edge weight will be loaded from the feature column with the same name. The feature column must be a scalar column in this case. Default: None is it "edge feature column"? p(v, u) = \frac{w_{v, u}}{\sum_{u', (v, u') \in E} w_{v, u'}} + If a str is given, the edge weight will be loaded from the edge feature column with the same name. The feature column must be a scalar column in this case. Default: None
codereview_python_data_2813
np.testing.assert_equal(o3_t.cpu().numpy(), np_t) # Slice a zero-dim tensor - with pytest.raises(Exception): o3c.Tensor.ones((), device=device)[:] = 0 - with pytest.raises(Exception): o3c.Tensor.ones((), device=device)[0:1] = 0 More specific test here as well. np.testing.assert_equal(o3_t.cpu().numpy(), np_t) # Slice a zero-dim tensor + # match=".*Cannot slice a scalar (0-dim) tensor.*" + with pytest.raises(RuntimeError, + match=r"Cannot slice a scalar \(0-dim\) tensor."): o3c.Tensor.ones((), device=device)[:] = 0 + with pytest.raises(RuntimeError, + match=r"Cannot slice a scalar \(0-dim\) tensor."): o3c.Tensor.ones((), device=device)[0:1] = 0
codereview_python_data_2817
3: Chem.BondType.TRIPLE, } # add string version of the key for each bond - RDBONDORDER.update({str(key):value for key,value in RDBONDORDER.items()}) RDATTRIBUTES = { "altLoc": "AltLoc", "chainID": "ChainId", This breaks PEP8, make sure to run your code through a linter at some point (I won't mention any more PEP8 until a future review). 3: Chem.BondType.TRIPLE, } # add string version of the key for each bond + RDBONDORDER.update({str(key): value for key, value in RDBONDORDER.items()}) RDATTRIBUTES = { "altLoc": "AltLoc", "chainID": "ChainId",
codereview_python_data_2818
def test_articulation_points(): Ggen = _generate_no_biconnected() for flow_func in flow_funcs: - for i in range(1): G = next(Ggen) cut = nx.minimum_node_cut(G, flow_func=flow_func) assert_true(len(cut) == 1, msg=msg.format(flow_func.__name__)) Did you mean to leave this loop here? def test_articulation_points(): Ggen = _generate_no_biconnected() for flow_func in flow_funcs: + for i in range(1): # change 1 to 3 or more for more realizations. G = next(Ggen) cut = nx.minimum_node_cut(G, flow_func=flow_func) assert_true(len(cut) == 1, msg=msg.format(flow_func.__name__))
codereview_python_data_2820
re_score = re_score - re_tail im_score = im_score - im_tail - score = th.stack([re_score, im_score], dim = 0) score = score.norm(dim = 0) return {'score': self.gamma - th.sum(score, -1)} Why do the rotation on tail? rotatE is rotate(head) - tail re_score = re_score - re_tail im_score = im_score - im_tail + score = th.stack([re_score, im_score], dim = 0, p = 1) score = score.norm(dim = 0) return {'score': self.gamma - th.sum(score, -1)}
codereview_python_data_2821
""" # ensure we get a 200 - resp = restutil.http_get(self.instance_url, headers=self._headers) if restutil.request_failed(resp): return False, "{0}".format(restutil.read_response_error(resp)) We should pass a useragent with this that uniquely identifies the request """ # ensure we get a 200 + resp = restutil.http_get(self.instance_url, headers=self._health_headers) if restutil.request_failed(resp): return False, "{0}".format(restutil.read_response_error(resp))
codereview_python_data_2823
response.headers['X-Frame-Options'] = 'deny' return response - def render(self, path, values=None, status=200, response=None): """Write HTML response.""" if values is None: values = {} Could you explain a bit why we need to add the response argument? Seems like we can always just add stuff to the response after the call to render() to keep the interface clean. response.headers['X-Frame-Options'] = 'deny' return response + def render(self, path, values=None, status=200): """Write HTML response.""" if values is None: values = {}
codereview_python_data_2829
import pkg_resources try: list(pkg_resources.parse_requirements('foo~=1.0')) - except pkg_resources.RequirementParseError: exit('Your Python distribution comes with an incompatible version ' 'of `setuptools`. Please run:\n' ' $ pip install --update setuptools\n' This creates another problem because with older versions of setuptools `pkg_resources.RequirementsParseError` may not exists. I think this is the case with ubuntu 14.04 which from the repos installs setuptools v3.0.0 import pkg_resources try: list(pkg_resources.parse_requirements('foo~=1.0')) + except ValueError: exit('Your Python distribution comes with an incompatible version ' 'of `setuptools`. Please run:\n' ' $ pip install --update setuptools\n'
codereview_python_data_2841
import tensorflow_addons -EXAMPLE_URL = 'https://github.com/tensorflow/addons/blob/fa8e966d987fd9b0d20551a666e44e2790fdf9c7/tensorflow_addons/layers/normalizations.py#L73' -TUTORIAL_URL = 'https://docs.python.org/3/library/typing.html' # TODO: add types and remove all elements from I like this system as an iterative way to add typing :) Probably worth describing this system in the docs? import tensorflow_addons +EXAMPLE_URL = "https://github.com/tensorflow/addons/blob/fa8e966d987fd9b0d20551a666e44e2790fdf9c7/tensorflow_addons/layers/normalizations.py#L73" +TUTORIAL_URL = "https://docs.python.org/3/library/typing.html" # TODO: add types and remove all elements from
codereview_python_data_2844
from nvidia.dali._multiproc.pool import WorkerPool from nvidia.dali import pickling as dali_pickle from nvidia.dali.backend import CheckDLPackCapsule -import nvidia.dali.fn as fn from threading import local as tls from . import data_node as _data_node import functools Doesn't this introduce `nvidia.dali.pipeline.fn`? I think, when introducing new imports we should stick with `import nvidia.dali.fn as _fn`. Same for internal above. I know that some of them already are not handled correctly, but let's not add more. from nvidia.dali._multiproc.pool import WorkerPool from nvidia.dali import pickling as dali_pickle from nvidia.dali.backend import CheckDLPackCapsule from threading import local as tls from . import data_node as _data_node import functools
codereview_python_data_2846
expected_n_segments = 1 def test_attr_size(self, top): - for attr in ['ids', 'names']: - assert len(top.ids) == top.n_atoms - assert len(top.names) == top.n_atoms - for attr in ['resids', 'resnames']: - assert len(top.resids) == top.n_residues - assert len(top.resnames) == top.n_residues class TestGROWideBox(object): The loops are meaningless: the `attr` variable is not used. expected_n_segments = 1 def test_attr_size(self, top): + assert len(top.ids) == top.n_atoms + assert len(top.names) == top.n_atoms + assert len(top.resids) == top.n_residues + assert len(top.resnames) == top.n_residues class TestGROWideBox(object):
codereview_python_data_2851
EmailFactory(email_summary_config).get_connector()) except: LOGGER.exception( - 'Error occurred while fetching connector details') raise InvalidInputError(self.notifier_config) email_subject = 'Inventory Summary: {0}'.format( Update this message as mentioned above. EmailFactory(email_summary_config).get_connector()) except: LOGGER.exception( + 'Error occurred to instantiate connector.') raise InvalidInputError(self.notifier_config) email_subject = 'Inventory Summary: {0}'.format(
codereview_python_data_2852
img, result, score_thr=0.3, title='result', - wait_time=0): """Visualize the detection results on the image. Args: Try not BC-breaking the API. If necessary, please add a deprecated warning. img, result, score_thr=0.3, + fig_size=(15, 10), title='result', + block=True, + wait_time=1): """Visualize the detection results on the image. Args:
codereview_python_data_2853
latest_build = build_revision_mappings.get('canary') if latest_build is None: latest_build = build_revision_mappings.get('dev') - return get_impact(latest_build, start_revision, end_revision, True) def get_impact_on_build(build_type, current_version, testcase, nit: `is_last_possible_build=True` for readability. latest_build = build_revision_mappings.get('canary') if latest_build is None: latest_build = build_revision_mappings.get('dev') + return get_impact( + latest_build, start_revision, end_revision, is_last_possible_build=True) def get_impact_on_build(build_type, current_version, testcase,
codereview_python_data_2859
def setUp(self): self.P4 = nx.path_graph(4) - self.K3 = nx.bipartite.complete_bipartite_graph(3,3) self.C4 = nx.cycle_graph(4) self.davis = nx.davis_southern_women_graph() self.top_nodes = [n for n,d in self.davis.nodes(data=True) Isn't `complete_bipartite_graph` in the main namespace. Then it should be `self.K3 = nx.complete_bipartite_graph(3,3)` def setUp(self): self.P4 = nx.path_graph(4) + self.K3 = nx.complete_bipartite_graph(3,3) self.C4 = nx.cycle_graph(4) self.davis = nx.davis_southern_women_graph() self.top_nodes = [n for n,d in self.davis.nodes(data=True)
codereview_python_data_2863
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4468-SEA 1645542752 2757172324</p> <hr> <p>Varnish cache server</p> </body> The remove all and re-add all pattern doesn't scale very well -- it is fine for small lists, but once the lists get large you're creating too many operations for a simple "add this user to my list" operation. Comparing the existing and new lists and then creating a DELETE and an INSERT statement ensures that this operation touches the least number of rows necessary. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4422-SEA 1645542752 1505943393</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_2866
undisordered_atom_list = [] for atom in atom_list: if atom.is_disordered(): - undisordered_atom_list = ( - undisordered_atom_list + atom.disordered_get_list() - ) else: undisordered_atom_list.append(atom) return undisordered_atom_list Does this work just as well? ```python undisordered_atom_list += atom.disordered_get_list() ``` undisordered_atom_list = [] for atom in atom_list: if atom.is_disordered(): + undisordered_atom_list += atom.disordered_get_list() else: undisordered_atom_list.append(atom) return undisordered_atom_list
codereview_python_data_2867
num_rows_for_head = num_rows // 2 + 1 num_cols_for_front = num_cols // 2 + 1 - if len(self._query_compiler.index) <= num_rows: head = self._query_compiler tail = None else: head = self._query_compiler.head(num_rows_for_head) tail = self._query_compiler.tail(num_rows_for_head) - if len(self._query_compiler.columns) <= num_cols: head_front = head.to_pandas() # Creating these empty to make the concat logic simpler head_back = pandas.DataFrame() Why does this change? `self.index` is a property view on `self._query_compiler.index`. num_rows_for_head = num_rows // 2 + 1 num_cols_for_front = num_cols // 2 + 1 + if len(self.index) <= num_rows: head = self._query_compiler tail = None else: head = self._query_compiler.head(num_rows_for_head) tail = self._query_compiler.tail(num_rows_for_head) + if len(self.columns) <= num_cols: head_front = head.to_pandas() # Creating these empty to make the concat logic simpler head_back = pandas.DataFrame()
codereview_python_data_2868
# take the name and status of the extension if is it not None, else use the handler's self.summary = [(o.name, o.status) for o in map(lambda h: h.extension_status if h.extension_status is not None else h, vm_status.vmAgent.extensionHandlers)] self.summary.sort(key=lambda s: s[0]) # sort by extension name to make comparisons easier - self.converged = all(status in (ValidHandlerStatus.success, ValidHandlerStatus.error, HandlerStatus.ready) for _, status in self.summary) def __eq__(self, other): return self.summary == other.summary When looking at HandlerStatus, both Ready and NotReady imply that the Handler is `Converged`. They're just used to identify if the handler failed or not, but it still means that it converged. You should add `HandlerStatus.not_ready` here too # take the name and status of the extension if is it not None, else use the handler's self.summary = [(o.name, o.status) for o in map(lambda h: h.extension_status if h.extension_status is not None else h, vm_status.vmAgent.extensionHandlers)] self.summary.sort(key=lambda s: s[0]) # sort by extension name to make comparisons easier + self.converged = all(status in (ValidHandlerStatus.success, ValidHandlerStatus.error, HandlerStatus.ready, HandlerStatus.not_ready) for _, status in self.summary) def __eq__(self, other): return self.summary == other.summary
codereview_python_data_2876
assert v[0].request.url == "http://foo.com/" v.create("get", "http://foo.com") assert len(v) == 2 - v.create("get", "http://foo.com\\") - v.create("get", "http://") - assert len(v) == 2 def test_orders(): Please also assert that a log message was written, e.g., `assert tctx.master.has_log("<check for correct message>", level="error")` assert v[0].request.url == "http://foo.com/" v.create("get", "http://foo.com") assert len(v) == 2 + with pytest.raises(exceptions.CommandError, match="Invalid URL"): + v.create("get", "http://foo.com\\") + with pytest.raises(exceptions.CommandError, match="Invalid URL"): + v.create("get", "http://") def test_orders():
codereview_python_data_2879
SELECT REQUIRED (User.groups.description); """ - def test_edgeql_syntax_optional_01(self): - """ - SELECT OPTIONAL (User.groups.description); - """ - def test_edgeql_syntax_list_01(self): """ SELECT (some_list_fn())[2]; This is actually dead syntax and needs to be removed. We decided to express this via a cast expression. SELECT REQUIRED (User.groups.description); """ def test_edgeql_syntax_list_01(self): """ SELECT (some_list_fn())[2];
codereview_python_data_2884
if tool_matches(tool, job_name): return tool - from platforms import android - if platform() == 'ANDROID_KERNEL' and 'KASAN' in android.adb.get_property( - 'ro.product.name'): - return 'KASAN' - # If no tool specified, assume it is ASAN. Also takes care of LSAN job type. return 'ASAN' put this block in a if if platform() == 'ANDROID': if tool_matches(tool, job_name): return tool # If no tool specified, assume it is ASAN. Also takes care of LSAN job type. return 'ASAN'
codereview_python_data_2886
module.post_process() except BaseException as exc: if isinstance(exc, KeyboardInterrupt): - self.log.debug("Shutdown: %s", exc) else: - self.log.debug("Shutdown: %s\n%s", exc, traceback.format_exc()) if not self.stopping_reason: self.stopping_reason = exc if not exc_info: Why shutdown while we in post-process? module.post_process() except BaseException as exc: if isinstance(exc, KeyboardInterrupt): + self.log.debug("post_process: %s", exc) else: + self.log.debug("post_process: %s\n%s", exc, traceback.format_exc()) if not self.stopping_reason: self.stopping_reason = exc if not exc_info:
codereview_python_data_2891
# WORKAROUND for https://github.com/PyCQA/pylint/issues/574 # pylint: disable=superfluous-parens if 'magenta' in (old_config or ''): assert ' colors.hints.fg: magenta' in lines if insert: assert ' tabs.show: never' in lines - # pylint: enable=superfluous-parens def test_init_save_manager(self, yaml, fake_save_manager): yaml.init_save_manager(fake_save_manager) This can be after the affected `if` line already. # WORKAROUND for https://github.com/PyCQA/pylint/issues/574 # pylint: disable=superfluous-parens if 'magenta' in (old_config or ''): + # pylint: enable=superfluous-parens assert ' colors.hints.fg: magenta' in lines if insert: assert ' tabs.show: never' in lines def test_init_save_manager(self, yaml, fake_save_manager): yaml.init_save_manager(fake_save_manager)
codereview_python_data_2896
self.assertEqual(listens[1].ts_since_epoch, 1400000150) self.assertEqual(listens[2].ts_since_epoch, 1400000100) self.assertEqual(listens[3].ts_since_epoch, 1400000000) I think it would make sense to fetch the cache values after deleting the listens and making sure they are what we expect. self.assertEqual(listens[1].ts_since_epoch, 1400000150) self.assertEqual(listens[2].ts_since_epoch, 1400000100) self.assertEqual(listens[3].ts_since_epoch, 1400000000) + + self.assertEqual(self.logstore.get_listen_count_for_user(testuser_name), 4) + min_ts, max_ts = self.logstore.get_timestamps_for_user(testuser_name) + self.assertEqual(min_ts, 1400000000) + self.assertEqual(max_ts, 1400000200) +
codereview_python_data_2898
self._update_overlay_geometries() elif option == 'window.hide_decoration': self._set_decoration(config.val.window.hide_decoration) - elif option == 'bg_transparency': self._set_bg_color() - def _add_widgets(self): """Add or readd all widgets to the VBox.""" self._vbox.removeWidget(self.tabbed_browser.widget) That option doesn't exist, you probably mean `colors.webpage.bg` self._update_overlay_geometries() elif option == 'window.hide_decoration': self._set_decoration(config.val.window.hide_decoration) + elif option == 'colors.webpage.bg': self._set_bg_color() def _add_widgets(self): """Add or readd all widgets to the VBox.""" self._vbox.removeWidget(self.tabbed_browser.widget)
codereview_python_data_2903
if force_multi is not None: dgl_warning("force_multi will be deprecated." \ "Please use return_array instead") - force_multi = return_array if return_array: return idx.tousertensor() This line is redundant. if force_multi is not None: dgl_warning("force_multi will be deprecated." \ "Please use return_array instead") + return_array = force_multi if return_array: return idx.tousertensor()
codereview_python_data_2904
disk_cache = cache.DiskCache(str(tmpdir)) assert disk_cache.maximumCacheSize() == max_cache_size - config_stub.data['storage']['cache-size'] = max_cache_size * 2 - config_stub.changed.emit('storage', 'cache-size') assert disk_cache.maximumCacheSize() == max_cache_size * 2 Please use `config_stub.set(sect, opt, value)` instead of emitting the signal manually everywhere - it sets the value and emits the signal. disk_cache = cache.DiskCache(str(tmpdir)) assert disk_cache.maximumCacheSize() == max_cache_size + config_stub.set('storage', 'cache-size', max_cache_size * 2) assert disk_cache.maximumCacheSize() == max_cache_size * 2
codereview_python_data_2906
self.assertTrue(summary1 != summary2, "{0} != {1} should be True") - def test_equality_operator_should_return_false_on_items_with_same_value(self): summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ValidHandlerStatus.success), ("Extension 2", ValidHandlerStatus.transitioning)]) summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ValidHandlerStatus.success), ("Extension 2", ValidHandlerStatus.transitioning)]) nit: should be inequality self.assertTrue(summary1 != summary2, "{0} != {1} should be True") + def test_inequality_operator_should_return_false_on_items_with_same_value(self): summary1 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ValidHandlerStatus.success), ("Extension 2", ValidHandlerStatus.transitioning)]) summary2 = ExtensionsSummaryTestCase._create_extensions_summary([("Extension 1", ValidHandlerStatus.success), ("Extension 2", ValidHandlerStatus.transitioning)])
codereview_python_data_2908
if 'pinned' in histentry: new_tab.data.pinned = histentry['pinned'] - if (config.val.session_lazy_restore and histentry.get('active', False) and not histentry['url'].startswith('qute://back')): # remove "active" mark and insert back page marked as active - data['history'].insert( - i + 1, - { 'title': histentry['title'], 'url': 'qute://back#' + histentry['title'], 'active': True Here you're mutating the list you're iterating over. I'm not sure whether this is guaranteed to work. if 'pinned' in histentry: new_tab.data.pinned = histentry['pinned'] + if (config.val.session.lazy_restore and histentry.get('active', False) and not histentry['url'].startswith('qute://back')): # remove "active" mark and insert back page marked as active + lazy_index = i + 1 + lazy_load.append({ 'title': histentry['title'], 'url': 'qute://back#' + histentry['title'], 'active': True
codereview_python_data_2912
symbol_column=None, special_params_checker=None, **kwargs): - """Fetch a csv from a remote url. Parameters ---------- Could we mention that it automatically registers the returned source with the data portal? symbol_column=None, special_params_checker=None, **kwargs): + """Fetch a csv from a remote url and register the data so that it is + queryable from the ``data`` object. Parameters ----------
codereview_python_data_2918
proposed_lat = proposed_origin[0] proposed_lng = proposed_origin[1] if proposed_lat != lat and proposed_lng != lng: - logger.log('[#] Using _old_walk_to to go to the proposed_origin: ' +str(proposed_origin)) self._old_walk_to(speed, proposed_lat, proposed_lng, alt) if proposed_origin != proposed_destination: duration = polyline_walker.get_total_distance() / speed - logger.log('[#] Using PolylineWalker from '+ str(proposed_origin) + - ' to ' +str(proposed_destination) + " for approx. " + str(ceil(duration)) + ' seconds') while proposed_destination != polyline_walker.get_pos()[0]: cLat, cLng = polyline_walker.get_pos()[0] self.api.set_position(cLat, cLng, alt) please use `.format for args` proposed_lat = proposed_origin[0] proposed_lng = proposed_origin[1] if proposed_lat != lat and proposed_lng != lng: + logger.log('[#] Using _old_walk_to to go to the proposed_origin: {}' + .format(proposed_origin)) self._old_walk_to(speed, proposed_lat, proposed_lng, alt) if proposed_origin != proposed_destination: duration = polyline_walker.get_total_distance() / speed + logger.log('[#] Using PolylineWalker from {} to {} for approx. {} seconds.' + .format(proposed_origin, proposed_destination, ceil(duration))) while proposed_destination != polyline_walker.get_pos()[0]: cLat, cLng = polyline_walker.get_pos()[0] self.api.set_position(cLat, cLng, alt)
codereview_python_data_2928
pre_doc = "This is " + ", ".join(op_dev) + " operator\n\n" schema = b.GetSchema(op_name) - ret = pre_doc ret += schema.Dox() ret += '\n' if schema.AllowsSequences(): There is still question if we want to add column to our operators table to indicate if given operator supports sequences. pre_doc = "This is " + ", ".join(op_dev) + " operator\n\n" schema = b.GetSchema(op_name) + # insert tag to easily link to the operator + ret = '.. _' + op_name + ':\n\n' + ret += pre_doc ret += schema.Dox() ret += '\n' if schema.AllowsSequences():
codereview_python_data_2948
pytest.raises(NetworkXError, dbag, m2, m1, m2, 0) pytest.raises(NetworkXError, dbag, 100, m1, m2, -0.5) pytest.raises(NetworkXError, dbag, 100, m1, m2, 1.5) - pytest.raises( - NetworkXError, - dbag, - 100, - m1, - m2, - p, - initial=nx.complete_graph(max(m1, m2) - 1), - ) def test_extended_barabasi_albert(self, m=2): """ This might make it easier to see that p has changed as well as initial relative to previous lines. ```suggestion initial=nx.complete_graph(max(m1, m2) - 1) pytest.raises(NetworkXError, dbag, 100, m1, m2, p, initial=initial) ``` pytest.raises(NetworkXError, dbag, m2, m1, m2, 0) pytest.raises(NetworkXError, dbag, 100, m1, m2, -0.5) pytest.raises(NetworkXError, dbag, 100, m1, m2, 1.5) + initial = nx.complete_graph(max(m1, m2) - 1) + pytest.raises(NetworkXError, dbag, 100, m1, m2, p, initial=initial) def test_extended_barabasi_albert(self, m=2): """
codereview_python_data_2949
# def test_normalize_empty(self): - try: functions.normalize([]) - except NormalizeEmptyResultError: - pass def test_normalize_None_values(self): seriesList = [] Is this still a WIP? # def test_normalize_empty(self): + with self.assertRaises(NormalizeEmptyResultError): functions.normalize([]) def test_normalize_None_values(self): seriesList = []
codereview_python_data_2963
return self.request_dict.get('group') @cached_property - def request(self): request = self.request_dict # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple. The property name is not very meaningful. `some_request.request` does not explain what the property returns. We'll need to rename it. Furthermore, should this property be public? Is there a reason to expose this to Celery's users? And one last thing, we need a docstring for this property. return self.request_dict.get('group') @cached_property + def _context(self): request = self.request_dict # pylint: disable=unpacking-non-sequence # payload is a property, so pylint doesn't think it's a tuple.
codereview_python_data_2965
c = command.CommandManager(tctx.master) a = TDec() c.collect_commands(a) def test_decorator(): Don't you want to assert something? How would we tell if the test actually works? c = command.CommandManager(tctx.master) a = TDec() c.collect_commands(a) + assert not "cmd1" in c.commands + assert not "cmd2" in c.commands + assert not "empty" in c.commands def test_decorator():
codereview_python_data_2971
def __str__(self): if self.traceback: - return '{} - {}: {} '.format(self.text, self.exception.__class__.__name__, self.exception) return '{}: {}'.format(self.text, self.exception) Why the trailing space (also in `test_configexc.py`)? def __str__(self): if self.traceback: + return '{} - {}: {}'.format(self.text, self.exception.__class__.__name__, self.exception) return '{}: {}'.format(self.text, self.exception)
codereview_python_data_2975
help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy") parser.add_option("--no-capture", dest="capture", default=True, action="store_false", help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive") - parser.add_option("--limited-api", dest="limited_api", default=False, action="store_false", help="Compiles Cython using CPython's LIMITED_API") options, cmd_args = parser.parse_args(args) This should say `"store_true"`, in which case the default already is `False`. help="specify Pythran include directory. This will run the C++ tests using Pythran backend for Numpy") parser.add_option("--no-capture", dest="capture", default=True, action="store_false", help="do not capture stdout, stderr in srctree tests. Makes pdb.set_trace interactive") + parser.add_option("--limited-api", dest="limited_api", default=False, action="store_true", help="Compiles Cython using CPython's LIMITED_API") options, cmd_args = parser.parse_args(args)
codereview_python_data_2978
return instance_name.split('-')[-2][8:] -def run_command(cmd_args, number_of_retry=5, timeout_in_second=10): """Wrapper to run a command in subprocess. If there is a timeout/error on the API call, we will re try up to 5 times by default. Args: cmd_args (list): The list of command arguments. Is there a message to tell the users what retry number this is? And a message that there is an error and retry is necessary? And exponential retry? What do you think? return instance_name.split('-')[-2][8:] +def run_command(cmd_args, number_of_retry=5, timeout_in_second=30): """Wrapper to run a command in subprocess. If there is a timeout/error on the API call, we will re try up to 5 times by default. + Each re try will increment timeout_in_second by 10. Args: cmd_args (list): The list of command arguments.
codereview_python_data_2981
-if cython.compiled: - from cython.cimports.libc.math import sin -else: - from math import sin @cython.cclass class Function: This isn't needed (and not relevant in the context of this example). There is a fallback for `libc.math` in the form of Python's own `math` module. ```suggestion from cython.cimports.libc.math import sin ``` +from cython.cimports.libc.math import sin @cython.cclass class Function:
codereview_python_data_2982
class CorpusElement(object): """An element (file) in a corpus.""" - def __init__(self, file_path): - self.file_path = file_path - self.size = os.path.getsize(self.file_path) class Corpus(object): nit: can just do path instead of file_path, also matches size name style. or can do file_size ? class CorpusElement(object): """An element (file) in a corpus.""" + def __init__(self, path): + self.path = path + self.size = os.path.getsize(self.path) class Corpus(object):
codereview_python_data_2983
else: if rescale and not isinstance(scale_factor, (float, torch.Tensor)): - scale_factor = det_bboxes[:, :4].new_tensor(scale_factor) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes) `[:, :4]` is unnecessary else: if rescale and not isinstance(scale_factor, (float, torch.Tensor)): + scale_factor = det_bboxes.new_tensor(scale_factor) _bboxes = ( det_bboxes[:, :4] * scale_factor if rescale else det_bboxes)
codereview_python_data_3002
Function applied to size values before applying scaling, to remove values lower than zero.""") - style_opts = (['cmap', 'palette', 'marker', 'size', 's', 'color', - 'unselected_color'] + line_properties + fill_properties) _plot_methods = dict(single='scatter', batched='scatter') I feel ``style_opts`` should be a set - something for 2.0 perhaps? I am assuming 'alpha' was being duplicated... Function applied to size values before applying scaling, to remove values lower than zero.""") + style_opts = (['cmap', 'palette', 'marker', 'size', 's'] + line_properties + fill_properties) _plot_methods = dict(single='scatter', batched='scatter')
codereview_python_data_3004
from copy import deepcopy import networkx as nx -import pandas as pd from networkx.utils.decorators import not_implemented_for -import numpy as np from networkx.algorithms.centrality.betweenness import ( _single_source_shortest_path_basic, _single_source_dijkstra_path_basic, Move the pandas and numpy imports inside the functions so that they don't get imported until the function is run instead of when it is defined. The pypy test failed because that test does not have those packages available. That means you'll also have to change the tests file. For tests that call functions involving pandas or numpy, use the `pytest.importorskip` function to allow pytest to skip the test when the import doesn't work. For example ``` pytest.importorskip("pandas") ``` Or, if you need pandas in the test module: ``` pd = pytest.importorskip("pandas") ``` from copy import deepcopy import networkx as nx from networkx.utils.decorators import not_implemented_for from networkx.algorithms.centrality.betweenness import ( _single_source_shortest_path_basic, _single_source_dijkstra_path_basic,
codereview_python_data_3009
return self.__intervals def zero_example(self, i): - """Attempt to replace each a draw call with its minimal possible value. This is intended as a fast-track to minimize whole sub-examples that don't matter as rapidly as possible. For example, suppose we had "replace each a draw" -> my reading of the method body is that this now applies to exactly one draw call, so the word "each" should be deleted (and is probably a merge artifact?) return self.__intervals def zero_example(self, i): + """Attempt to replace a draw call with its minimal possible value. This is intended as a fast-track to minimize whole sub-examples that don't matter as rapidly as possible. For example, suppose we had
codereview_python_data_3018
return 1 for kind, name in self.read_dependency_file(source_path): if kind == "cimport": - # missing suffix? dep_path = self.find_pxd_file(name, pos) elif kind == "include": - # missing suffix? dep_path = self.search_include_directories(name, pos) else: continue No idea. As far as I can tell, this code path is only used by `Cython/Build/BuildExecutable.py` return 1 for kind, name in self.read_dependency_file(source_path): if kind == "cimport": dep_path = self.find_pxd_file(name, pos) elif kind == "include": dep_path = self.search_include_directories(name, pos) else: continue
codereview_python_data_3021
queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) - TEST_EVENT_PATTERN = { 'Source': [{'exists': True}], 'detail-type': [{'prefix': 'core.app'}], 'Detail': json.dumps({ nitpick: can we convert this to lower-case as well (`test_event_pattern`) - usually, we're only using upper-case for root/top-level variables. queue_url = sqs_client.create_queue(QueueName=queue_name)['QueueUrl'] queue_arn = aws_stack.sqs_queue_arn(queue_name) + pattern = { 'Source': [{'exists': True}], 'detail-type': [{'prefix': 'core.app'}], 'Detail': json.dumps({
codereview_python_data_3023
start: FloatTensorLike = 0, limit: FloatTensorLike = -1, emit_self_as_target: bool = False, - vocab_freq_table: AcceptableDTypes = None, vocab_min_count: Optional[FloatTensorLike] = None, vocab_subsampling: Optional[FloatTensorLike] = None, corpus_size: Optional[FloatTensorLike] = None, It seems to be a lookup table, not a data type. start: FloatTensorLike = 0, limit: FloatTensorLike = -1, emit_self_as_target: bool = False, + vocab_freq_table: tf.lookup.KeyValueTensorInitializer = None, vocab_min_count: Optional[FloatTensorLike] = None, vocab_subsampling: Optional[FloatTensorLike] = None, corpus_size: Optional[FloatTensorLike] = None,
codereview_python_data_3025
For user_pwd, a default username as string. title: The question title to show. text: The prompt text to display to the user. - yank_text: The prompt text available to prompt-yank command. answer: The value the user entered (as password for user_pwd). is_aborted: Whether the question was aborted. interrupted: Whether the question was interrupted by another one. If it's always an URL, I'd prefer this to be called `url`. I'm just not sure if there's something else other than URLs to yank in prompts which would make sense, but the rest of your code talks about URLs as well. For user_pwd, a default username as string. title: The question title to show. text: The prompt text to display to the user. + url: Any URL referenced in prompts. answer: The value the user entered (as password for user_pwd). is_aborted: Whether the question was aborted. interrupted: Whether the question was interrupted by another one.
codereview_python_data_3032
lhs_data_nd, rhs_data_nd, out_data_nd, self.lhs_map[0], self.rhs_map[0], self.out_map[0]) # normalize if mean reducer if self.reducer == 'mean': degs = nd.empty((out_data.shape[0],), ctx=out_data.context, dtype=out_data.dtype) Could you leave a note that this is a temporary hack and we should have a better solution (either in kernel or to support int32 queries) in the future? lhs_data_nd, rhs_data_nd, out_data_nd, self.lhs_map[0], self.rhs_map[0], self.out_map[0]) # normalize if mean reducer + # note(zihao): this is a temporal hack and we should have better solution in the future. if self.reducer == 'mean': degs = nd.empty((out_data.shape[0],), ctx=out_data.context, dtype=out_data.dtype)
codereview_python_data_3046
assert len(txs) == 2 assert {tx_create.id, tx_transfer.id} == set(tx.id for tx in txs) - assert {asset_id} == {Transaction.get_asset_id(txs)} @pytest.mark.bdb Why not just `assert asset_id == Transaction.get_asset_id(txs)`? assert len(txs) == 2 assert {tx_create.id, tx_transfer.id} == set(tx.id for tx in txs) + assert asset_id == Transaction.get_asset_id(txs) @pytest.mark.bdb
codereview_python_data_3048
class Transforms(): def __init__(self, sz, tfms, denorm, crop_type=CropType.CENTER): self.sz,self.denorm = sz,denorm - crop_fn = RandomCrop - if crop_type == CropType.CENTER: crop_fn = CenterCrop if crop_type == CropType.NO: crop_fn = NoCrop self.tfms = tfms + [crop_fn(sz), channel_dim] def __call__(self, im, y): return compose(im, self.tfms), y Maybe if crop_type is None we should use the 'max_zoom is not None' logic still, so you only have to pass this in if you want to override the default? class Transforms(): def __init__(self, sz, tfms, denorm, crop_type=CropType.CENTER): self.sz,self.denorm = sz,denorm + crop_fn = CenterCrop + if crop_type == CropType.RANDOM: crop_fn = RandomCrop if crop_type == CropType.NO: crop_fn = NoCrop self.tfms = tfms + [crop_fn(sz), channel_dim] def __call__(self, im, y): return compose(im, self.tfms), y
codereview_python_data_3053
stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) stdout, stderr = await process.communicate() if process.returncode != 0: - print('Unable to refresh docs') if cfg['debug']: - print(stderr) else: - print('Successfully rebuilt documentation.') async def start_server(config, services): let's swap this from print to logging.debug().. you can do it against the root logger (just import logging) stdout=asyncio.subprocess.PIPE, stderr=asyncio.subprocess.PIPE) stdout, stderr = await process.communicate() if process.returncode != 0: + logging.warning('Unable to refresh docs') if cfg['debug']: + logging.warning(stderr) else: + logging.debug('Successfully rebuilt documentation.') async def start_server(config, services):
codereview_python_data_3067
class TestAlignmentProcessing(object): seq = FASTA - ref = FASTA_ref - tgt = FASTA_tgt def test_fasta2select_aligned(self): """test align.fasta2select() on aligned FASTA (Issue 112)""" Calling these FASTA_ref and FASTA_tgt is somewhat confusing for future devs given it's mainly for a single test. Ideally I'd name them something a bit more explicit like "FASTA_NOGAP_ref". class TestAlignmentProcessing(object): seq = FASTA + ref = FASTA_NOGAP_ref + tgt = FASTA_NOGAP_tgt def test_fasta2select_aligned(self): """test align.fasta2select() on aligned FASTA (Issue 112)"""
codereview_python_data_3078
elif format == 'jaspar': for m in motifs: counts = m.counts - if hasattr(m, 'matrix_id'): matrix_id = m.matrix_id - else: matrix_id = None line = ">{0} {1}\n".format(matrix_id, m.name) lines.append(line) This is more pythonic: ``` python try: matrix_id = m.matrix_id except AttributeError: matrix_id = 'None' ``` elif format == 'jaspar': for m in motifs: counts = m.counts + try: matrix_id = m.matrix_id + except AttributeError: matrix_id = None line = ">{0} {1}\n".format(matrix_id, m.name) lines.append(line)
codereview_python_data_3082
<https://shanghai.nyu.edu/academics/faculty/directory/zheng-zhang>`_ In this tutorial, you learn about a graph attention network (GAT) and how it can be -implemented in MXNet-DGL. You can also learn to visualize and understand what the attention mechanism has learned. The research described in the paper `Graph Convolutional Network (GCN) <https://arxiv.org/abs/1609.02907>`_, The code is implemented in PyTorch rather than MXNet. <https://shanghai.nyu.edu/academics/faculty/directory/zheng-zhang>`_ In this tutorial, you learn about a graph attention network (GAT) and how it can be +implemented in PyTorch. You can also learn to visualize and understand what the attention mechanism has learned. The research described in the paper `Graph Convolutional Network (GCN) <https://arxiv.org/abs/1609.02907>`_,
codereview_python_data_3089
# root process broadcasts nccl id nccl_id = nccl.UniqueId() uid = str(nccl_id) - store.set('nccl_root_id', uid) else: - uid = store.get('nccl_root_id') nccl_id = nccl.UniqueId(uid) # needs to be set for nccl to work self._comm = nccl.Communicator(self._world_size, If we won't delete the key, I think we need to be something less generic than `nccl_root_id`. Maybe `SparseGradOptimizer.nccl_root_id` # root process broadcasts nccl id nccl_id = nccl.UniqueId() uid = str(nccl_id) + store.set(self._nccl_root_id, uid) else: + uid = store.get(self._nccl_root_id) nccl_id = nccl.UniqueId(uid) # needs to be set for nccl to work self._comm = nccl.Communicator(self._world_size,
codereview_python_data_3091
Args: parent: The parent to pass to QObjects which get initialized. """ - _init_main_config(parent, True) - _init_key_config(parent, True) _init_misc() Why is reload `True` here? Args: parent: The parent to pass to QObjects which get initialized. """ + _init_main_config(parent) + _init_key_config(parent) _init_misc()
codereview_python_data_3093
>>> print(list(nx.bfs_tree(G,1).edges())) [(1, 0), (1, 2)] >>> H = nx.Graph() - >>> nx.add_path(G, [0, 1, 2, 3, 4, 5, 6]) - >>> nx.add_path(G, [2, 7, 8, 9, 10]) - >>> dict(nx.bfs_tree(H, source=3, depth_limit=3) - [(3, 2), (3, 4)] Notes This tests that the depth_limit is 3, but the answer only shows a depth_limit of 1. (1 step away from the source.) >>> print(list(nx.bfs_tree(G,1).edges())) [(1, 0), (1, 2)] >>> H = nx.Graph() + >>> nx.add_path(H, [0, 1, 2, 3, 4, 5, 6]) + >>> nx.add_path(H, [2, 7, 8, 9, 10]) + >>> print(list(nx.bfs_tree(H, source=3, depth_limit=3).edges())) + [(1, 0), (2, 1), (2, 7), (3, 2), (3, 4), (4, 5), (5, 6), (7, 8)] Notes
codereview_python_data_3115
if debug: sys.stderr.write("%s\n" % cmdline_str) - status = subprocess.call(cmdline_str, shell=True) >> 8 - if status > 1: if kbyte != 0: # possible memory problem; could be None sys.stderr.write("INFO trying again with the linear model\n") Does subprocess definitely return the same value as os.system? There are weird and wonderful corner cases like minus values for system errors etc, and I am unclear what the ``>> 8`` was for. if debug: sys.stderr.write("%s\n" % cmdline_str) + status = os.system(cmdline_str, shell=True) >> 8 + # `status` here will be >1 for error codes >=256 if status > 1: if kbyte != 0: # possible memory problem; could be None sys.stderr.write("INFO trying again with the linear model\n")
codereview_python_data_3120
def test_smooth(self): seriesList = [ - TimeSeries('collectd.test-db1.load.value',0,1,1,[range(20)]), ] def mock_evaluateTokens(reqCtx, tokens, replacements=None): seriesList = [ - TimeSeries('collectd.test-db1.load.value',0,1,1,[range(20)]), ] for series in seriesList: series.pathExpression = series.name The `range(20)` call returns a list. No need for the braces around that. def test_smooth(self): seriesList = [ + TimeSeries('collectd.test-db1.load.value',0,1,1,range(20)), ] def mock_evaluateTokens(reqCtx, tokens, replacements=None): seriesList = [ + TimeSeries('collectd.test-db1.load.value',0,1,1,range(20)), ] for series in seriesList: series.pathExpression = series.name
codereview_python_data_3121
HealthCheck.exception_in_generation, HealthCheck.random_module ): note_deprecation(( - '%s is now ignored and suppressing it is a no-op. Simply ' 'remove it from your list of suppressions to get the same ' 'effect.') % (s,)) return suppressions Will using it become an error in future versions of Hypothesis? HealthCheck.exception_in_generation, HealthCheck.random_module ): note_deprecation(( + '%s is now ignored and suppressing it is a no-op. This will ' + 'become an error in a fugure version of Hypothesis. Simply ' 'remove it from your list of suppressions to get the same ' 'effect.') % (s,)) return suppressions
codereview_python_data_3132
def table_exists(self, table, database='default', partition=None): """ - We consider table/partition as existing if corresponding path in hdfs exists and contains file except those which match pattern set in `ignored_file_masks` """ path = self.table_location(table, database, partition) Can we remove the "we" wording? `The table/partition is considered existing if corresponding path in hdfs exists and ....` def table_exists(self, table, database='default', partition=None): """ + The table/partition is considered existing if corresponding path in hdfs exists and contains file except those which match pattern set in `ignored_file_masks` """ path = self.table_location(table, database, partition)
codereview_python_data_3140
import dgl.nn.pytorch as dglnn from dgl.distributed import DistDataLoader from dgl.distributed.nn import NodeEmbedding -from dgl.distributed.optim import SparseAdagrad import torch as th import torch.nn as nn is this function different for inductive and transductive models? import dgl.nn.pytorch as dglnn from dgl.distributed import DistDataLoader from dgl.distributed.nn import NodeEmbedding import torch as th import torch.nn as nn
codereview_python_data_3141
metrics_set, local_namespace, environ, - *args, - **kwargs): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`. Does this need to accept unknown more arguments as we have here, or could it accept just `blotter_class`? I didn't see any other uses of `args` or `kwargs`. metrics_set, local_namespace, environ, + blotter_class): """Run a backtest for the given algorithm. This is shared between the cli and :func:`zipline.run_algo`.
codereview_python_data_3151
""" slug = os.path.basename(exercise) meta_dir = os.path.join(exercise, ".meta") - plugins = None plugins_module = None plugins_name = "plugins" plugins_source = os.path.join(meta_dir, f"{plugins_name}.py") Line 154 is no longer needed. """ slug = os.path.basename(exercise) meta_dir = os.path.join(exercise, ".meta") plugins_module = None plugins_name = "plugins" plugins_source = os.path.join(meta_dir, f"{plugins_name}.py")
codereview_python_data_3154
@given(st.data()) def test_signature_is_the_most_important_source(data): - """Signature types should take presence over all other annotations.""" data.draw(st.builds(AnnotatedConstructorWithSignature)) ```suggestion """Signature types should take precedence over all other annotations.""" ``` @given(st.data()) def test_signature_is_the_most_important_source(data): + """Signature types should take precedence over all other annotations.""" data.draw(st.builds(AnnotatedConstructorWithSignature))
codereview_python_data_3162
return self.EMISSION_TO_PERF_KEY_MAP[ self.algo.perf_tracker.emission_rate] - def process_event(self, blotter_process_trade, perf_process_event, event): for txn, order in blotter_process_trade(event): perf_process_event(txn) perf_process_event(order) because the bound methods are passed in, this should probably become 'private', also the bound methods should probably be private args ('_blotter_process_trade') so that consumers don't mess this up. return self.EMISSION_TO_PERF_KEY_MAP[ self.algo.perf_tracker.emission_rate] + def _process_event(self, blotter_process_trade, perf_process_event, event): for txn, order in blotter_process_trade(event): perf_process_event(txn) perf_process_event(order)
codereview_python_data_3165
@pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('relabel', [True, False]) def test_edge_coarsening(idtype, g, weight, relabel): g = dgl.to_bidirected(g) g = g.astype(idtype).to(F.ctx()) edge_weight = None Could you please establish a more strict unit test to verify the correctness of our implementation? This test looks too weak for me. @pytest.mark.parametrize('weight', [True, False]) @pytest.mark.parametrize('relabel', [True, False]) def test_edge_coarsening(idtype, g, weight, relabel): + num_nodes = g.num_nodes() g = dgl.to_bidirected(g) g = g.astype(idtype).to(F.ctx()) edge_weight = None
codereview_python_data_3176
premium=premium, ) elif given_module == 'uniswap': - self.eth_modules['uniswap'] = 'loading' - greenlet_manager.spawn_and_track( - after_seconds=None, - task_name='Initialize Uniswap object', - method=self._initialize_uniswap, premium=premium, ) elif given_module == 'yearn_vaults': self.eth_modules['yearn_vaults'] = YearnVaults( There is no reason to spawn uniswap module asynchronously. We do that in compound only because it has an async call (to get the comptroller address) at its `__init__`. So you can just initialize it normally, like Aave is. premium=premium, ) elif given_module == 'uniswap': + self.eth_modules['uniswap'] = Uniswap( + ethereum_manager=ethereum_manager, + database=self.database, premium=premium, + msg_aggregator=msg_aggregator, ) elif given_module == 'yearn_vaults': self.eth_modules['yearn_vaults'] = YearnVaults(
codereview_python_data_3183
Converts listen into dict that can be submitted to influx directly. Returns: - a dict with approriate values of measurement, time, tags and fields """ data = { 'measurement' : measurement, 'time' : self.ts_since_epoch, - 'tags' : { - 'dedup_tag' : self.dedup_tag - }, 'fields' : { 'user_name' : escape(self.user_name), 'artist_name' : self.data['artist_name'], I think it might be nicer to add this key if a dedup value is needed and omit it otherwise. This makes the data a little smaller. Converts listen into dict that can be submitted to influx directly. Returns: + a dict with appropriate values of measurement, time, tags and fields """ data = { 'measurement' : measurement, 'time' : self.ts_since_epoch, 'fields' : { 'user_name' : escape(self.user_name), 'artist_name' : self.data['artist_name'],
codereview_python_data_3184
from .. import function as fn from . import functional -try: - import torch - from torch.distributions import Bernoulli -except ImportError: - pass - __all__ = [ 'BaseTransform', 'AddSelfLoop', Is there a version requirement? from .. import function as fn from . import functional __all__ = [ 'BaseTransform', 'AddSelfLoop',
codereview_python_data_3187
def exit_status(self): return self._proc.exitStatus() - - def final_stdout(self): - return self._final_stdout - - def final_stderr(self): - return self._final_stderr I don't see a need for getter functions here - instead, store them as public attributes (`self.final_stdout` and `final_stderr`) and access them directly. def exit_status(self): return self._proc.exitStatus()
codereview_python_data_3192
def wikimedia_replacement(): if util.config.wikimedia_tile_source_replacement == 'OSM-with-warning': param.main.param.warning('Wikimedia tile source no longer available outside ' - 'wikimedia domain, switching to OpenStreetMap (OSM) tile ' 'source. You can set wikimedia_tile_source_replacement ' 'to your chosen replacement tile source URL in hv.config' ' to disable this warning. See release notes for HoloViews' ```suggestion 'wikimedia domain as of April 2021; switching to OpenStreetMap (OSM) tile ' ``` def wikimedia_replacement(): if util.config.wikimedia_tile_source_replacement == 'OSM-with-warning': param.main.param.warning('Wikimedia tile source no longer available outside ' + 'wikimedia domain as of April 2021; switching to OpenStreetMap (OSM) tile ' 'source. You can set wikimedia_tile_source_replacement ' 'to your chosen replacement tile source URL in hv.config' ' to disable this warning. See release notes for HoloViews'
codereview_python_data_3193
import sys from abci.application import BaseApplication -from github.com.tendermint.tendermint.abci.types.types_pb2 import ( ResponseInitChain, ResponseInfo, ResponseCheckTx, nitpicky, but why are these imports so weird? import sys from abci.application import BaseApplication +from abci import ( ResponseInitChain, ResponseInfo, ResponseCheckTx,
codereview_python_data_3218
Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). """ # noqa: E501, W605 prob = cls_prob * (1 - box_prob) - neg_prob = 1 - prob # There are some cases when neg_prob = 0. # This will cause the neg_prob.log() to be inf without clamp. - neg_prob = torch.clamp(neg_prob, min=1e-12) - negative_bag_loss = -prob**self.gamma * neg_prob.log() - negative_bag_loss = (1 - self.alpha) * negative_bag_loss - return negative_bag_loss Adding one line `prob = prob.clamp(min=EPS, max=1-EPS)` already works. Tensor: Negative bag loss in shape (num_img, num_anchors, num_classes). """ # noqa: E501, W605 prob = cls_prob * (1 - box_prob) # There are some cases when neg_prob = 0. # This will cause the neg_prob.log() to be inf without clamp. + prob = prob.clamp(min=EPS, max=1 - EPS) + negative_bag_loss = prob**self.gamma * F.binary_cross_entropy( + prob, torch.zeros_like(prob), reduction='none') + return (1 - self.alpha) * negative_bag_loss
codereview_python_data_3228
super(HttpError, self).__init__(msg, inner) -class HostPluginConfigError(AgentError): """ - Http request failure """ def __init__(self, msg=None, inner=None): - super(HostPluginConfigError, self).__init__(msg, inner) class EventError(AgentError): Ah, so I am not the only one who forgets to update stuff on copy-n-paste... super(HttpError, self).__init__(msg, inner) +class InvalidContainerError(HttpError): """ + Container id sent in the header is invalid """ def __init__(self, msg=None, inner=None): + super(InvalidContainerError, self).__init__(msg, inner) class EventError(AgentError):
codereview_python_data_3234
if operation.has_fact(trait=adjustment.trait, value=adjustment.value): l.visibility.apply(adjustment) l.status = l.states['HIGH_VIZ'] - - async def _apply_global_cleanup_links(self, operation, agent, link_status): - links = [] - for ability_id in self.get_config(name='agents', prop='cleanup_abilities'): - for ability in await self.get_service('data_svc').locate('abilities', dict(ability_id=ability_id)): - links.append(Link(operation=operation.id, command=ability.cleanup, paw=agent.paw, cleanup=1, - ability=ability, score=0, jitter=0, status=link_status)) - return await self.add_test_variants(links, agent, operation) this block doesn't work as expected. we would want to clean it up a bit as well, as it is messy + it duplicates some fo the above work... likely this block can be combined with the calling function (_generate_cleanup_links) if operation.has_fact(trait=adjustment.trait, value=adjustment.value): l.visibility.apply(adjustment) l.status = l.states['HIGH_VIZ']
codereview_python_data_3239
norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), - sac=None, - stage_with_sac=(False, False, False, False), - rfp_inplanes=None, - output_img=False, plugins=None, with_cp=False, zero_init_residual=True): We may add docstring about these arguments. norm_eval=True, dcn=None, stage_with_dcn=(False, False, False, False), plugins=None, with_cp=False, zero_init_residual=True):
codereview_python_data_3240
elif 'reg_class_agnostic' in config.model.bbox_head.keys(): reg_cls_agnostic = config.model.bbox_head \ .reg_class_agnostic - return is_two_stage, is_ssd, is_retina, reg_cls_agnostic It is not deleted after being used. elif 'reg_class_agnostic' in config.model.bbox_head.keys(): reg_cls_agnostic = config.model.bbox_head \ .reg_class_agnostic + temp_file.close() return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
codereview_python_data_3243
return None -def _find_dep_file_path(main_file, file_path): abs_path = os.path.abspath(file_path) - if not os.path.exists(abs_path): # files are looked up relative to the main source file rel_file_path = os.path.join(os.path.dirname(main_file), file_path) if os.path.exists(rel_file_path): This is actually not true. In the compiler, it really only applies to `include` files. That does not make this change feel like the right fix in general. It might still be the right thing to do for the source lookup relative to a .c/.cpp/etc. file, i.e. one of the two cases (in `_read_source_lines()`) where this function is used. Not sure about the other call. return None +def _find_dep_file_path(main_file, file_path, relative_path_search=False): abs_path = os.path.abspath(file_path) + if not os.path.exists(abs_path) and (file_path.endswith('.pxi') or + relative_path_search): # files are looked up relative to the main source file rel_file_path = os.path.join(os.path.dirname(main_file), file_path) if os.path.exists(rel_file_path):
codereview_python_data_3246
# choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. -"""Bio.AlignIO support for "emboss" alignment output from EMBOSS tools. - -You are expected to use this module via the Bio.Align functions (or the -Bio.SeqIO functions if you are interested in the sequences only). This module contains a parser for the EMBOSS pairs/simple file format, for example from the alignret, water and needle tools. """ -import Bio from Bio.Align import Alignment from Bio.Align import interfaces from Bio.Seq import Seq This is clearly based on ``Bio/AlignIO/EmbossIO.py`` but this docstring needs tweaking slightly now, both here and a few lines down (although that hypothetical new high level API does not exist yet). # choice of the "Biopython License Agreement" or the "BSD 3-Clause License". # Please see the LICENSE file that should have been included as part of this # package. +"""Bio.Align support for "emboss" alignment output from EMBOSS tools. This module contains a parser for the EMBOSS pairs/simple file format, for example from the alignret, water and needle tools. """ from Bio.Align import Alignment from Bio.Align import interfaces from Bio.Seq import Seq
codereview_python_data_3249
import queue from data_generator.resource import RESOURCE_GENERATOR_FACTORY from data_generator.file_handler import create_file_and_writer_listener Maybe a cleaner way to do this would be: ``` for key in resource_structure: if key in RESOURCE_GENERATORY_FACTORY: resource_type = key break if not resource_type: <log and return> ``` import queue +from data_generator.resource import RESOURCE_DEPENDENCY_MAP from data_generator.resource import RESOURCE_GENERATOR_FACTORY from data_generator.file_handler import create_file_and_writer_listener
codereview_python_data_3263
class DADict: def __init__(self, _name="DADict", **kargs): self._name=_name - self._update(kargs) def fixname(self,val): return fixname(val) def __contains__(self, val): What is the purpose of this change? class DADict: def __init__(self, _name="DADict", **kargs): self._name=_name + self.update(kargs) def fixname(self,val): return fixname(val) def __contains__(self, val):
codereview_python_data_3265
self.select = select self.grA = u.select_atoms(select[0]) self.grB = u.select_atoms(select[1]) - self.is_box = self.fraction_kwargs.get('is_box') # contacts formed in reference self.r0 = [] The name `is_box` isn't particularly intuitive; something like `pbc` would be better. Also, since it is only used in `__init__` and not in the fraction_contacts methods where `fraction_kwargs` is passed, it'd make more sense to directly include as an argument of `__init__` rather than through `kwargs`. This would also make it easier to include in the docs here. self.select = select self.grA = u.select_atoms(select[0]) self.grB = u.select_atoms(select[1]) + self.pbc = pbc # contacts formed in reference self.r0 = []
codereview_python_data_3274
:Copyright: GNU Public License v3 ..Warning: - This module will be deprecated in version 2.0. Please use :mod:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis` instead. Given a :class:`~MDAnalysis.core.universe.Universe` (simulation Should be "will be removed in 2.0" :Copyright: GNU Public License v3 ..Warning: + This module is deprecated and will be removed in version 2.0. Please use :mod:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis` instead. Given a :class:`~MDAnalysis.core.universe.Universe` (simulation
codereview_python_data_3283
from time import sleep from selenium import webdriver from selenium.common.exceptions import NoSuchElementException -from selenium.common.exceptions import NoAlertPresentException""" def __init__(self): self.root = etree.Element("NoseTest") There is a method get_requests in Executor class. It has all things like think-time in it. Why not use it? from time import sleep from selenium import webdriver from selenium.common.exceptions import NoSuchElementException +from selenium.common.exceptions import NoAlertPresentException +""" def __init__(self): self.root = etree.Element("NoseTest")
codereview_python_data_3286
class ClientAuthRulesOperation(MessageValidator): schema = ( (TXN_TYPE, ConstantField(AUTH_RULES)), - (RULES, IterableField(AuthRuleField())) ) Should we set `min_length=1`? class ClientAuthRulesOperation(MessageValidator): schema = ( (TXN_TYPE, ConstantField(AUTH_RULES)), + (RULES, IterableField(AuthRuleField(), min_length=1)) )