id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_9671
if not tool.check_if_installed(): tool.install() - jmeter_version = self.settings.get("version") jmeter_path = self.settings.get("path", "~/.bzt/jmeter-taurus/{version}/") jmeter_path = get_full_path(jmeter_path) download_link = self.settings.get("download-link", None) Keep it like it was. Just in case. It's harmless. if not tool.check_if_installed(): tool.install() + jmeter_version = self.settings.get("version", JMeterExecutor.JMETER_VER) jmeter_path = self.settings.get("path", "~/.bzt/jmeter-taurus/{version}/") jmeter_path = get_full_path(jmeter_path) download_link = self.settings.get("download-link", None)
codereview_python_data_9673
except OSError: # Note: Auxiliary Data is only supported since # Linux 2.6.21 - warning("Your Linux Kernel does not support Auxiliary Data!") if isinstance(self, L2ListenSocket): self.outs = None else: this sounds a little too aggressive, since users can't disable it except OSError: # Note: Auxiliary Data is only supported since # Linux 2.6.21 + msg = "Your Linux Kernel does not support Auxiliary Data!" + log_runtime.info(msg) if isinstance(self, L2ListenSocket): self.outs = None else:
codereview_python_data_9677
from tqdm.auto import tqdm from .. import version -from .util import deprecate def start_logging(logfile="MDAnalysis.log", version=version.__version__): ```suggestion ``` shouldn't be necessary anymore from tqdm.auto import tqdm from .. import version def start_logging(logfile="MDAnalysis.log", version=version.__version__):
codereview_python_data_9680
return value def __repr__(self): - return utils.get_repr(self, none_ok=self.none_ok, minval=self.minval, - maxval=self.maxval, minperc=self.minperc, maxperc=self.maxperc) The values passed to the constructor are called `minint` and `maxint` in the constructor, so it probably makes sense to call them the same here. return value def __repr__(self): + return utils.get_repr(self, none_ok=self.none_ok, minint=self.minval, + maxint=self.maxval, minperc=self.minperc, maxperc=self.maxperc)
codereview_python_data_9681
.. versionadded:: 0.17.0 .. versionchanged:: 0.19.0 The attached Reader when trajectory=True is now a MemoryReader """ if not n_atoms: n_residues = 0 Add a `.. versionchanged:: 0.21.0` entry. .. versionadded:: 0.17.0 .. versionchanged:: 0.19.0 The attached Reader when trajectory=True is now a MemoryReader + .. versionchanged:: 0.21.0 + Universes can now be created with 0 atoms with an error """ if not n_atoms: n_residues = 0
codereview_python_data_9690
finding_id = finding_list[0] to_be_updated_finding = finding_list[1] - if (finding_id not in new_findings_map and - to_be_updated_finding['state'] == 'ACTIVE'): to_be_updated_finding['state'] = 'INACTIVE' current_time = date_time.get_utc_now_datetime() actual_time = current_time.strftime( Instead of using the `and` operator, can we make this check clearer and simpler by separating it? Such as below? ``` if to_be_updated_finding['state'] == 'INACTIVE': continue if (finding_id not in new_findings_map: <rest of the code> ``` finding_id = finding_list[0] to_be_updated_finding = finding_list[1] + if to_be_updated_finding['state'] == 'INACTIVE': + continue + + if finding_id not in new_findings_map: to_be_updated_finding['state'] = 'INACTIVE' current_time = date_time.get_utc_now_datetime() actual_time = current_time.strftime(
codereview_python_data_9695
trees = model_str[model_str.find('Tree=0'):model_str.find('end of trees')] more_trees = (one_tree * multiplier).format(*range(2, total_trees)) after_trees = model_str[model_str.find('end of trees'):] - num_end_spaces = (2**31 - one_tree_size * total_trees) new_model_str = f"{before_tree_sizes}\n\n{trees}{more_trees}{after_trees}{'':{num_end_spaces}}" assert len(new_model_str) > 2**31 bst.model_from_string(new_model_str, verbose=False) ```suggestion num_end_spaces = 2**31 - one_tree_size * total_trees ``` trees = model_str[model_str.find('Tree=0'):model_str.find('end of trees')] more_trees = (one_tree * multiplier).format(*range(2, total_trees)) after_trees = model_str[model_str.find('end of trees'):] + num_end_spaces = 2**31 - one_tree_size * total_trees new_model_str = f"{before_tree_sizes}\n\n{trees}{more_trees}{after_trees}{'':{num_end_spaces}}" assert len(new_model_str) > 2**31 bst.model_from_string(new_model_str, verbose=False)
codereview_python_data_9697
return x def i2repr(self, pkt, x): - return "%s%s" % (self.i2h(pkt, x), self.unit) def randval(self): value = super(ScalingField, self).randval() The space must be kept. return x def i2repr(self, pkt, x): + return "%s %s" % (self.i2h(pkt, x), self.unit) def randval(self): value = super(ScalingField, self).randval()
codereview_python_data_9709
assert revoc_def_type tags = cred_def_id.split(":") - revoc_def = make_state_path_for_revoc_def(tags[0], cred_def_id, revoc_def_type, revoc_def_tag) revoc_def_id, _, _, _ = self.lookup(revoc_def, isCommitted=False) if revoc_def is None: The author DID here is not the author of the corresponding CRED_DEF, but the author of this RevocDef. So, `req.identifier` needs to be passed as the first argument. assert revoc_def_type tags = cred_def_id.split(":") + revoc_def = make_state_path_for_revoc_def(req.identifier, cred_def_id, revoc_def_type, revoc_def_tag) revoc_def_id, _, _, _ = self.lookup(revoc_def, isCommitted=False) if revoc_def is None:
codereview_python_data_9711
# Generic per-Column annotation, exactly 1 char per column # Format: "#=GC <feature> <exactly 1 char per column>" feature, text = line[5:].strip().split(None, 1) - # Each feature key could be used more than once, - # so store the entries as a list of strings. - try: - gc[feature].append(text) - except KeyError: - gc[feature] = [text] elif line[:5] == '#=GS ': # Generic per-Sequence annotation, free text # Format: "#=GS <seqname> <feature> <free text>" Is this correct? Wikipedia says "Do not use multiple lines with the same #=GC label." So isn't it something like ``` python if feature in gc: raise ValueError("Duplicate feature for #=GC label:\n" + line) else: gc[feature] = text ``` # Generic per-Column annotation, exactly 1 char per column # Format: "#=GC <feature> <exactly 1 char per column>" feature, text = line[5:].strip().split(None, 1) + if feature in gc: + raise ValueError("Duplicate feature for #=GC label:\n" + line) + gc[feature] = text elif line[:5] == '#=GS ': # Generic per-Sequence annotation, free text # Format: "#=GS <seqname> <feature> <free text>"
codereview_python_data_9716
def test_select_slice(self): cube = Dataset(self.cube) - self.assertEqual(cube.select(longitude=(0, 1.01)).data.data, np.array([[1, 2], [5, 6], [9, 10]], dtype=np.int32)) def test_select_set(self): Is the extra .01 due to inclusive vs exclusive bounds? def test_select_slice(self): cube = Dataset(self.cube) + self.assertEqual(cube.select(longitude=(0, 1+self.epsilon)).data.data, np.array([[1, 2], [5, 6], [9, 10]], dtype=np.int32)) def test_select_set(self):
codereview_python_data_9721
"""Return a tuple of exceptions meaning 'skip this test', to re-raise. This is intended to cover most common test runners; if you would - like another to be added please open an issue or pull request. """ # This is a set because nose may simply re-export unittest.SkipTest exceptions = set() I am not wild about running these methods on every exception raised - given that that raising exceptions is the main thing Hypothesis tests do it's pretty hot path, and doing all this conditional querying as to whether stuff is already in `sys.modules` is technically fine but looks pretty weird and results in all of these coverage pragmas. How about instead catching all exceptions and checking whether the current exception is one to reraise based on a string version of its name? e.g. something like: ```python from hypothesis.internal.compat import qualname SKIP_EXCEPTIONS = frozenset({ 'unittest.SkipTest', 'unittest2.SkipTest', 'nose.SkipTest', '_pytest.outcomes.Skipped', }) ... except BaseException as e: if qualname(e) in SKIP_EXCEPTIONS or ( not isinstance(e, Exception) and qualname(e) != '_pytest.outcomes.Failed' ): raise # normal handling logic continues here ``` """Return a tuple of exceptions meaning 'skip this test', to re-raise. This is intended to cover most common test runners; if you would + like another to be added please open an issue or pull request adding + it to this function and to tests/cover/test_lazy_import.py """ # This is a set because nose may simply re-export unittest.SkipTest exceptions = set()
codereview_python_data_9725
if frame.ncols < 2: return s = self.random_array(frame.ncols - 1, positive=True) - s = sorted(set(s)) print("[14] Removing columns %r -> ncols = %d" % (s, frame.ncols - len(s))) if python_output: Theoretically, you don't have to either sort or deduplicate the list of columns that you want to remove. if frame.ncols < 2: return s = self.random_array(frame.ncols - 1, positive=True) print("[14] Removing columns %r -> ncols = %d" % (s, frame.ncols - len(s))) if python_output:
codereview_python_data_9736
image: TensorLike, channel: int, bins: Optional[int] = 256 ) -> tf.Tensor: """Scale the data in the channel to implement equalize.""" image_dtype = image.dtype image = tf.cast(image[:, :, channel], tf.int32) For line 43 could it clip with 0 ? image: TensorLike, channel: int, bins: Optional[int] = 256 ) -> tf.Tensor: """Scale the data in the channel to implement equalize.""" + assert bins >= 256, "Number of histogram bins should be at least 256" image_dtype = image.dtype image = tf.cast(image[:, :, channel], tf.int32)
codereview_python_data_9737
from MDAnalysis.tests.datafiles import PDB import pickle -import sys @pytest.fixture(params=[ We can drop these now that we're past 1.0. from MDAnalysis.tests.datafiles import PDB import pickle @pytest.fixture(params=[
codereview_python_data_9738
def resize_rows(self, nrows): curr_nrows = self.nrows - try: self.df.nrows = nrows - except ValueError as e: - assert str(e) == "Cannot increase the number of rows in a keyed frame" - return if curr_nrows < nrows: append = [None] * (nrows - curr_nrows) Same here: you should know in advance whether resizing should or should not throw an error. It should throw iff (a) frame.nkeys > 0, and (b) new_nrows > nrows. def resize_rows(self, nrows): curr_nrows = self.nrows + if len(self.df.key) and nrows > curr_nrows: + with pytest.raises(ValueError) as e: + self.df.nrows = nrows + assert str(e.value) == "Cannot increase the number of rows in a keyed frame" + return False + else: self.df.nrows = nrows if curr_nrows < nrows: append = [None] * (nrows - curr_nrows)
codereview_python_data_9742
# Options for LaTeX output # ------------------------ -# Notes for customization -# https://www.sphinx-doc.org/en/master/latex.html -# https://github.com/sphinx-doc/sphinx/issues/4136 - -# Use a latex engine allows for unicode characters in docstrings latex_engine = "xelatex" - -# Docs on xindy: https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-latex_use_xindy -latex_use_xindy = True - # The paper size ('letter' or 'a4'). latex_paper_size = "letter" ```suggestion # Use a latex engine that allows for unicode characters in docstrings latex_engine = "xelatex" ``` My vote would be to keep this simple. `xindy` is the default with xelatex so no need to explicitly specify it here. The links are more valuable from a troubleshooting perspective and won't be relevant (:crossed_fingers: ) # Options for LaTeX output # ------------------------ +# Use a latex engine that allows for unicode characters in docstrings latex_engine = "xelatex" # The paper size ('letter' or 'a4'). latex_paper_size = "letter"
codereview_python_data_9746
stmts.append(self.gen_classdef()) return ast.Module(body=stmts) - def get_data_source_readers(self): readers = [] for idx, source in enumerate(self.scenario.get_data_sources(), start=1): reader_tpl = "reader{idx} = CSVReaderPerThread({params})" - params = source.get("path") if "loop" in source: params += ", " + "loop=%s" % source.get("loop") `%r` is better than `%s` when generating python code like this. It deals with escaping. stmts.append(self.gen_classdef()) return ast.Module(body=stmts) def get_data_source_readers(self): readers = [] for idx, source in enumerate(self.scenario.get_data_sources(), start=1): reader_tpl = "reader{idx} = CSVReaderPerThread({params})" + args = "'%s'" % source.get("path") if "loop" in source: params += ", " + "loop=%s" % source.get("loop")
codereview_python_data_9749
def __repr__(self): """Represent the UniGene Record object as a string for debugging.""" - return "<%s> %s %s\n%s" % ( self.__class__.__name__, self.ID, self.symbol, This isn't a very standard ``__repr__`` string, the trailing ``\n`` is most unusual. Do you want to look at updating this since you are working on this module? def __repr__(self): """Represent the UniGene Record object as a string for debugging.""" + return "<%s> %s %s %s" % ( self.__class__.__name__, self.ID, self.symbol,
codereview_python_data_9754
kwargs['_env']['ANSIBLE_FORCE_COLOR'] = 'false' try: - output = sh.ansible_playbook(playbook, *args, **kwargs) if self.molecule._args['--debug']: ansible_env = {k: v for (k, v) in kwargs['_env'].items() if 'ANSIBLE' in k} other_env = {k: v for (k, v) in kwargs['_env'].items() if 'ANSIBLE' not in k} utilities.debug('OTHER ENVIRONMENT', yaml.dump(other_env, default_flow_style=False, indent=2)) utilities.debug('ANSIBLE ENVIRONMENT', yaml.dump(ansible_env, default_flow_style=False, indent=2)) - utilities.debug('ANSIBLE PLAYBOOK', output.ran) return output except sh.ErrorReturnCode as e: print('ERROR: {}'.format(e)) Can we report the output prior to execution. Would be nice to see what we will be doing vs waiting to converge to display this. kwargs['_env']['ANSIBLE_FORCE_COLOR'] = 'false' try: + ansible = sh.ansible_playbook.bake(playbook, *args, **kwargs) if self.molecule._args['--debug']: ansible_env = {k: v for (k, v) in kwargs['_env'].items() if 'ANSIBLE' in k} other_env = {k: v for (k, v) in kwargs['_env'].items() if 'ANSIBLE' not in k} utilities.debug('OTHER ENVIRONMENT', yaml.dump(other_env, default_flow_style=False, indent=2)) utilities.debug('ANSIBLE ENVIRONMENT', yaml.dump(ansible_env, default_flow_style=False, indent=2)) + utilities.debug('ANSIBLE PLAYBOOK', str(ansible.bake.im_self)) + output = ansible() return output except sh.ErrorReturnCode as e: print('ERROR: {}'.format(e))
codereview_python_data_9759
'cloudsqlinstance' ] - bigquery_type_list = [ - 'dataset', - ] - gsuite_type_list = [ 'gsuite_group', 'gsuite_user', If it is gcp resources, it should go to gcp_type_list 'cloudsqlinstance' ] gsuite_type_list = [ 'gsuite_group', 'gsuite_user',
codereview_python_data_9761
def dynamodb_get_table_stream_specification(table_name): - stream_specification = {} - ddb_client = aws_stack.connect_to_service('dynamodb') try: - stream_specification = ddb_client.describe_table(TableName=table_name)['Table'].get('StreamSpecification') except Exception as e: - LOGGER.info('Unable to get %s information : %s %s' % (table_name, e, traceback.format_exc())) raise e - return stream_specification def error_response(message=None, error_type=None, code=400): nit: Since we're using `stream_specification` only in a single place, we could simplify to: ``` def dynamodb_get_table_stream_specification(table_name): ddb_client = aws_stack.connect_to_service('dynamodb') try: return ddb_client.describe_table(TableName=table_name)['Table'].get('StreamSpecification') except Exception as e: LOGGER.info('Unable to get stream specification for table "%s": %s %s' % ( table_name, e, traceback.format_exc())) raise e ``` Also, querying the table schema for each `PutItem`/`UpdateItem` operation can have severe performance impacts. Let's better make use of `get_table_schema(..)` here (which internally uses the cached values from `SCHEMA_CACHE`). def dynamodb_get_table_stream_specification(table_name): try: + return get_table_schema(table_name)['Table'].get('StreamSpecification') except Exception as e: + LOGGER.info('Unable to get stream specification for table %s : %s %s' % (table_name, e, + traceback.format_exc())) raise e def error_response(message=None, error_type=None, code=400):
codereview_python_data_9766
__maintainer__ = __author__ __email__ = "mail@qutebrowser.org" __version__ = "1.5.0" -__version_info__ = __version__.split('.') __description__ = "A keyboard-driven, vim-like browser based on PyQt5." basedir = os.path.dirname(os.path.realpath(__file__)) Those should be integers, not strings. __maintainer__ = __author__ __email__ = "mail@qutebrowser.org" __version__ = "1.5.0" +__version_info__ = [int(part) for part in __version__.split('.')] __description__ = "A keyboard-driven, vim-like browser based on PyQt5." basedir = os.path.dirname(os.path.realpath(__file__))
codereview_python_data_9777
chunked = not (request.body is None or 'Content-Length' in request.headers) - if isinstance(timeout, TimeoutSauce): - pass - elif isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read) Can this be moved lower down the list? A timeout object isn't a tuple, so the original `isinstance(tuple)` check won't catch it, so let's try to avoid rearranging the code too much. chunked = not (request.body is None or 'Content-Length' in request.headers) + if isinstance(timeout, tuple): try: connect, read = timeout timeout = TimeoutSauce(connect=connect, read=read)
codereview_python_data_9784
from netlib import version def sysinfo(): data = [ - "Mitmproxy verison: %s" % version.VERSION, "Python version: %s" % platform.python_version(), "Platform: %s" % platform.platform(), ] d = platform.linux_distribution() t = "Linux distro: %s %s %s" % d Can we add the openssl version as well? `from OpenSSL import SSL; print(SSL.SSLeay_version(SSL.SSLEAY_VERSION))` from netlib import version +from OpenSSL import SSL; + def sysinfo(): data = [ + "Mitmproxy version: %s" % version.VERSION, "Python version: %s" % platform.python_version(), "Platform: %s" % platform.platform(), + "SSL version: %s" % SSL.SSLeay_version(SSL.SSLEAY_VERSION), ] d = platform.linux_distribution() t = "Linux distro: %s %s %s" % d
codereview_python_data_9786
line = inf.readline() while not len(line.split()) == 5: line = inf.readline() while line and not line.startswith('timestep'): name = line[:8].strip() I think this has the ability to infinitely loop if a (non HISTORY) file never has 5 tokens on a line. I think once we reach end-of-file `readline()` is going to return an empty string (i.e. not even a newline char), can we add a check for that and raise an error if we reach the end of file? line = inf.readline() while not len(line.split()) == 5: line = inf.readline() + if line == '': + raise EOFError("End of file reached when reading HISTORY.") while line and not line.startswith('timestep'): name = line[:8].strip()
codereview_python_data_9790
variables = re.findall(r'#{(.*?)}', decoded_test, flags=re.DOTALL) if variables: - private_facts = await self._explode_private_facts(operation['id'], agent['id']) - relevant_facts = await self._build_relevant_facts(variables, operation.get('facts', []), private_facts) for combo in list(itertools.product(*relevant_facts)): copy_test = copy.deepcopy(decoded_test) copy_link = copy.deepcopy(link) let's rename "private_facts" to "agent_facts". These are a list of all the agent's facts, not just their private ones. variables = re.findall(r'#{(.*?)}', decoded_test, flags=re.DOTALL) if variables: + agent_facts = await self._get_agent_facts(operation['id'], agent['id']) + relevant_facts = await self._build_relevant_facts(variables, operation.get('facts', []), agent_facts) for combo in list(itertools.product(*relevant_facts)): copy_test = copy.deepcopy(decoded_test) copy_link = copy.deepcopy(link)
codereview_python_data_9795
import pandas from pandas.api.types import is_scalar -from pandas.compat import to_str, string_types, numpy, cPickle as pkl import pandas.core.common as com from pandas.core.dtypes.common import ( _get_dtype_from_object, can we alias `numpy as numpy_compat` or something like that? It will be confusing if we just use `numpy`. import pandas from pandas.api.types import is_scalar +from pandas.compat import to_str, string_types, numpy as numpy_compat, cPickle as pkl import pandas.core.common as com from pandas.core.dtypes.common import ( _get_dtype_from_object,
codereview_python_data_9800
@pytest.mark.skipif(executable_not_found("hole"), reason="Test skipped because HOLE not found") class TestHOLE(object): - filename = PDB_HOLE - @pytest.fixture() - def H(self, tmpdir): # keep tempdir around for the whole lifetime of the class with tmpdir.as_cwd(): - H = HOLE(self.filename, raseed=31415) H.run() H.collect() return H Shouldn't this be a static method for pytest to work? @pytest.mark.skipif(executable_not_found("hole"), reason="Test skipped because HOLE not found") class TestHOLE(object): + @staticmethod @pytest.fixture() + def H(tmpdir): # keep tempdir around for the whole lifetime of the class with tmpdir.as_cwd(): + filename = PDB_HOLE + H = HOLE(filename, raseed=31415) H.run() H.collect() return H
codereview_python_data_9802
if "egg_incubators" in inv_data: for incubator in inv_data.get("egg_incubators", {}).get("egg_incubator", []): - incubators.append({"id":incubator.get("item_id", -1), "used":False}) if "pokemon_data" in inv_data: pokemon = inv_data.get("pokemon_data", {}) - if pokemon.get("is_egg", False): eggs.append({"id": pokemon.get("id", -1), "km": pokemon.get("egg_km_walked_target", -1), "used": False}) sorting = self.config.longer_eggs_first You can check if the incubator is being used here. It will have 'pokemon_id' set if there is an egg in it. 'pokemon_id' matches 'id' from an egg. if "egg_incubators" in inv_data: for incubator in inv_data.get("egg_incubators", {}).get("egg_incubator", []): + if "start_km_walked" not in incubator: + incubators.append({"id":incubator.get("id", -1), "used":False}) if "pokemon_data" in inv_data: pokemon = inv_data.get("pokemon_data", {}) + if pokemon.get("is_egg", False) and "egg_km_walked_target" in pokemon: eggs.append({"id": pokemon.get("id", -1), "km": pokemon.get("egg_km_walked_target", -1), "used": False}) sorting = self.config.longer_eggs_first
codereview_python_data_9814
A new DataFrame filled with Booleans. """ if level is not None: - raise NotImplementedError( - "Mutlilevel index not yet supported " "in Modin" - ) other = self._validate_other(other, axis) new_manager = self._data_manager.gt(other=other, axis=axis, level=level) return self._create_dataframe_from_manager(new_manager) Also remove the `"..." "..."` strings in this file A new DataFrame filled with Booleans. """ if level is not None: + raise NotImplementedError("Mutlilevel index not yet supported " "in Modin") other = self._validate_other(other, axis) new_manager = self._data_manager.gt(other=other, axis=axis, level=level) return self._create_dataframe_from_manager(new_manager)
codereview_python_data_9815
if util.verify_vote_signature(block, vote): return block else: - raise ImproperVoteError('Block {block_id} already has in incorrectly signed vote ' 'from public key {me}').format(block_id=block['id'], me=self.me) # Run the plugin block validation logic I believe you meant to write "...has an incorrectly signed vote" if util.verify_vote_signature(block, vote): return block else: + raise ImproperVoteError('Block {block_id} already has an incorrectly signed vote ' 'from public key {me}').format(block_id=block['id'], me=self.me) # Run the plugin block validation logic
codereview_python_data_9817
Returns: int: the number of bytes this user's listens take in the dump file """ - self.log.info('Dumping user %s...', username) - t0 = time.time() offset = 0 bytes_written = 0 I think this should go away, its noise in the grand scheme of things. Returns: int: the number of bytes this user's listens take in the dump file """ t0 = time.time() offset = 0 bytes_written = 0
codereview_python_data_9820
force_search = False if not utils.supports_selection(): sel = False - text = utils.get_clipboard(selection=sel) text_urls = [u for u in text.split('\n') if u.strip()] if (len(text_urls) > 1 and not urlutils.is_url(text_urls[0]) and urlutils.get_path_if_valid( Why did you remove this? force_search = False if not utils.supports_selection(): sel = False + try: + text = utils.get_clipboard(selection=sel) + except utils.ClipboardEmptyError as e: + raise cmdexc.CommandError(e) text_urls = [u for u in text.split('\n') if u.strip()] if (len(text_urls) > 1 and not urlutils.is_url(text_urls[0]) and urlutils.get_path_if_valid(
codereview_python_data_9821
ri = urlparse(url) - # Strip port numbers from netloc if isinstance(url, str): - host = ri.netloc.split(u':')[0] - else: - host = ri.netloc.split(b':')[0] try: _netrc = netrc(netrc_path).authenticators(host) Suggestion: ``` py splitstr = b':' if isinstance(url, str): splitstr = splitstr.decode('utf-8') host = r.netloc.split(splitstr)[0] ``` My reasoning is that while we don't test against Python 3.2, pip still supports 3.2 and so we need to support it as best as we can as well. `u''` is not supported on 3.2 and that's why the above might be a bit friendlier. ri = urlparse(url) + # Strip port numbers from netloc. This weird `if...encode`` dance is + # used for Python 3.2, which doesn't support unicode literals. + splitstr = b':' if isinstance(url, str): + splitstr = splitstr.decode('ascii') + host = ri.netloc.split(splitstr)[0] try: _netrc = netrc(netrc_path).authenticators(host)
codereview_python_data_9823
"""Add a child to the Entity.""" entity_id = entity.get_id() if self.has_id(entity_id): - raise PDBConstructionException("%s defined twice" % str(entity_id)) entity.set_parent(self) self.child_list.append(entity) self.child_dict[entity_id] = entity This and the one on line 224 can drop the ``str(...)`` call. Not directly related to black, but a style change we might as well address now? """Add a child to the Entity.""" entity_id = entity.get_id() if self.has_id(entity_id): + raise PDBConstructionException("%s defined twice" % entity_id) entity.set_parent(self) self.child_list.append(entity) self.child_dict[entity_id] = entity
codereview_python_data_9828
view_name = "Unknown Image" return view_name, base.format_dict(multidict.MultiDict(image_metadata)) - def should_render(self, content_type): - return content_type.startswith("image/") and content_type != "image/svg+xml" Might be worthy of note that this changes assumes now forward-compatibility of all future new image formats, whereas the old behaviour was more explicit about the supported image types. This should be fine since the code in `__call__` has a safe default path. view_name = "Unknown Image" return view_name, base.format_dict(multidict.MultiDict(image_metadata)) + def render_priority(self, data: bytes, *, content_type: Optional[str] = None, **metadata) -> float: + return float( + content_type + and content_type.startswith("image/") + and content_type != "image/svg+xml" + )
codereview_python_data_9830
you will need to call :meth:`start_py_workers` before calling :meth:`build` of any of the pipelines. You can find more details and caveats of both methods in Python's ``multiprocessing`` module documentation. -`py_callback_pickler` : module or tuple, default = nvidia.dali.pickling - If `py_start_method` is set to *spawn* callback passed to parallel ExternalSource must be picklable. - If run in Python3.8 or newer, DALI uses customized pickle (`nvidia.dali.pickling`) when - serializing callbacks to support serialization of local functions and lambdas. However, if you need to serialize more complex objects like local classes or you are running older version of Python you can provide external serialization package such as dill or cloudpickle ```suggestion py_callback_pickler=None): ``` you will need to call :meth:`start_py_workers` before calling :meth:`build` of any of the pipelines. You can find more details and caveats of both methods in Python's ``multiprocessing`` module documentation. +`py_callback_pickler` : module or tuple, default = None + If `py_start_method` is set to *spawn*, callback passed to parallel ExternalSource must be picklable. + If run in Python3.8 or newer with `py_callback_pickler` set to None, DALI uses customized pickle + when serializing callbacks to support serialization of local functions and lambdas. However, if you need to serialize more complex objects like local classes or you are running older version of Python you can provide external serialization package such as dill or cloudpickle
codereview_python_data_9832
'type': 'string', }, 'tls_verify': { - 'type': 'integer', - 'regex': '^[0-1]$', } } } This should be a boolean or? 'type': 'string', }, 'tls_verify': { + 'type': 'boolean', } } }
codereview_python_data_9836
from __future__ import absolute_import from __future__ import unicode_literals from . import misc -from sys import version_info import dnf.pycomp import glob import logging You can save one line if you import it from pycomp (`from .pycomp import PY3`). from __future__ import absolute_import from __future__ import unicode_literals from . import misc import dnf.pycomp import glob import logging
codereview_python_data_9844
xlabel(r"frame number $t$") ylabel(r"native contacts $q_1$") - if not filename is None: savefig(filename) def _plot_qavg_pcolor(self, filename=None, **kwargs): the old code was actually correct and more pythonic and encouraged. xlabel(r"frame number $t$") ylabel(r"native contacts $q_1$") + if filename is not None: savefig(filename) def _plot_qavg_pcolor(self, filename=None, **kwargs):
codereview_python_data_9845
int The number of bits needed """ - if self.number_of_edges() >= 0x80000000: - return 64 - elif self.number_of_nodes() >= 0x80000000: return 64 else: return 32 shouldn't be "or"? #edges can be smaller than 2^31 but #nodes is > 2^31. int The number of bits needed """ + if self.number_of_edges() >= 0x80000000 or self.number_of_nodes() >= 0x80000000: return 64 else: return 32
codereview_python_data_9849
import Bio.Seq import Bio.SeqRecord import numpy as np -from inspect import signature as inspect_signature from ..lib.util import (cached, convert_aa_code, iterable, warn_if_not_unique, unique_int_1d) ```suggestion ``` duplicate import (see above on line 40)? import Bio.Seq import Bio.SeqRecord import numpy as np from ..lib.util import (cached, convert_aa_code, iterable, warn_if_not_unique, unique_int_1d)
codereview_python_data_9850
self.follow_redirects = self.config.get('follow-redirects', None) self.body = self.__get_body() - def by_priority(self, name, default=None): val = self.config.get(name, None) if val is None: val = self.scenario.get(name, None) Maybe we should rename it back to `priority_option`? `by_priority` looks nice and DSL-ish, but isn't in line with our usual method naming. self.follow_redirects = self.config.get('follow-redirects', None) self.body = self.__get_body() + def priority_option(self, name, default=None): val = self.config.get(name, None) if val is None: val = self.scenario.get(name, None)
codereview_python_data_9851
"size": int(line_split[3]), "strand": strand, "srcSize": int(line_split[5]), - "leftStatus": None, - "leftCount": None, - "rightStatus": None, - "rightCount": None, } sequence = line_split[6] Do you think it is useful to always have these in the dictionary, even if absent from the file being parsed? I would have added them as required. "size": int(line_split[3]), "strand": strand, "srcSize": int(line_split[5]), } sequence = line_split[6]
codereview_python_data_9852
'Bio.Affy', 'Bio.Cluster', 'Bio.KDTree', - 'Bio.phenotype' ] if os.name == 'java': Ideally that would have included a trailing comma, so that next time an entry is added to the list it is a one line change. But there's no real need to change it now. 'Bio.Affy', 'Bio.Cluster', 'Bio.KDTree', + 'Bio.phenotype', ] if os.name == 'java':
codereview_python_data_9853
except ( expat.ExpatError, SocketServer.socket.error, xmlrpclib.Fault) as exception: - logger.warning('Error while making RPC call: {0!s}'.format(exception)) return None def Close(self): maybe add "with error" for consistence? except ( expat.ExpatError, SocketServer.socket.error, xmlrpclib.Fault) as exception: + logger.warning('Unable to make RPC call with error: {0!s}'.format( + exception)) return None def Close(self):
codereview_python_data_9860
This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of three components: - classfication cost, regression L1 cost and regression giou cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned 1. We should not do the check here. Move the assertion to iou_calculator. 2. make `mode='giou'` an argument of iou_calculator, it should not be the argument of the assigner. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of three components: + classfication cost, regression L1 cost and regression iou cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned
codereview_python_data_9862
def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges): num_points = points.size(0) num_gts = gt_labels.size(0) - if len(gt_bboxes) == 0: return gt_labels.new_zeros(num_points), \ gt_bboxes.new_zeros((num_points, 4)) Thanks for fixing the bug. We can replace `len(gt_bboxes)` with `num_gts` here. def fcos_target_single(self, gt_bboxes, gt_labels, points, regress_ranges): num_points = points.size(0) num_gts = gt_labels.size(0) + if num_gts == 0: return gt_labels.new_zeros(num_points), \ gt_bboxes.new_zeros((num_points, 4))
codereview_python_data_9864
from ..utils import toindex from .. import backend as F -__all__ = ['sample_neighbors'] SAMPLING_SERVICE_ID = 6657 INSUBGRAPH_SERVICE_ID = 6658 Why is this under sampling.py? Seems we need a new name for this file or move it to a separate one from ..utils import toindex from .. import backend as F +__all__ = ['sample_neighbors', 'in_subgraph'] SAMPLING_SERVICE_ID = 6657 INSUBGRAPH_SERVICE_ID = 6658
codereview_python_data_9865
def float2str(value, precision=None): return "{0:.{1}f}".format(value, precision) \ - if (precision is not None) and (not isinstance(value, str)) else str(value) def add(root, parent=None, decision=None): """recursively add node or edge""" Please remove the brackets and use `string_type` from `compat.py` module instead of `str` in `isinstance()`. def float2str(value, precision=None): return "{0:.{1}f}".format(value, precision) \ + if (precision is not None) and not isinstance(value, string_type) else str(value) def add(root, parent=None, decision=None): """recursively add node or edge"""
codereview_python_data_9866
current_app.logger.error("Error while getting artist_country_codes, {}".format(err), exc_info=True) error_msg = ("An error occurred while calculating artist_map data, " "try setting 'force_recalculate' to 'false' to get a cached copy if available" - "Payload: {}".format(request_data)) raise APIInternalServerError(error_msg) return country_codes I'd like to log what the response from the endpoint was too, if it's >=400, could we log the text? current_app.logger.error("Error while getting artist_country_codes, {}".format(err), exc_info=True) error_msg = ("An error occurred while calculating artist_map data, " "try setting 'force_recalculate' to 'false' to get a cached copy if available" + "Payload: {}. Response: {}".format(request_data, result.text)) raise APIInternalServerError(error_msg) return country_codes
codereview_python_data_9867
class ProjectLocaleQuerySet(models.QuerySet): def available(self): """ - Available project locales belong to translable projects. """ return self.filter( project__disabled=False, - project__system_project=False, project__resources__isnull=False, ).distinct() This change implies that we no longer need the old implementation of the `ProjectLocale.available()` method. Are you sure about that? If yes, you should rename the method to the same name as you use on the `Project` model (e.g. `visible`). If not, you should add a new method, instead of overriding the existing one. class ProjectLocaleQuerySet(models.QuerySet): def available(self): """ + Available project locales belong to available projects. """ return self.filter( project__disabled=False, project__resources__isnull=False, ).distinct()
codereview_python_data_9874
def _to_int(value): """Transforms a value into a database integer (or None).""" # pylint: disable=no-else-return if value is None: return None elif not value: Would it make sense to change lines 86-91 to: ``` if not value: return None else: return int(value) ``` Also I wonder if we should wrap in a try/except, in case the int conversion fails. Thoughts? def _to_int(value): """Transforms a value into a database integer (or None).""" # pylint: disable=no-else-return + # TODO: Investigate adding a try around this and simplifying. if value is None: return None elif not value:
codereview_python_data_9875
elif isinstance(data, (Seq, int, float)): raise TypeError( "The sequence data given to a MutableSeq object " - "should be a string or an array " - "(not a Seq object etc)" ) else: self.data = data # assumes the input is an array The last 2 strings should fit in one line. elif isinstance(data, (Seq, int, float)): raise TypeError( "The sequence data given to a MutableSeq object " + "should be a string or an array (not a Seq object etc)" ) else: self.data = data # assumes the input is an array
codereview_python_data_9881
def __repr__(self): repr_str = self.__class__.__name__ - repr_str += '(transformations={})'.format(self.transforms) return repr_str Thanks! You may also change the key here from `transformations` to `transforms`. def __repr__(self): repr_str = self.__class__.__name__ + repr_str += '(transforms={})'.format(self.transforms) return repr_str
codereview_python_data_9894
async def explode_agents(self, criteria: object = None) -> object: agents = await self.dao.get('core_agent', criteria) for a in agents: - sql = 'SELECT g.id, g.name FROM core_group g JOIN core_group_map m on g.id=m.group_id ' \ - 'WHERE m.agent_id = %s;' % a['id'] - a['groups'] = await self.dao.raw_select(sql) return agents async def explode_results(self, criteria=None): i feel like we should do a select * from core_group before entering this loop. even if the table has 10,000 entries (not likely) i think it'd be more performant than a slew of smaller queries. thoughts? async def explode_agents(self, criteria: object = None) -> object: agents = await self.dao.get('core_agent', criteria) + sql = 'SELECT g.id, g.name, m.agent_id FROM core_group g JOIN core_group_map m on g.id=m.group_id' + groups = await self.dao.raw_select(sql) for a in agents: + a['groups'] = [dict(id=g['id'], name=g['name']) for g in groups if g['agent_id'] == a['id']] return agents async def explode_results(self, criteria=None):
codereview_python_data_9902
attrs[attrname] = attr(vals) if not 'mass' in attrs: - print('Ive got no masses!') attrs['mass'] = Masses(np.zeros(natoms)) if not 'charge' in attrs: attrs['charge'] = Charges(np.zeros(natoms, dtype=np.float32)) No `print` in library code. If necessary we would `warn()`. Are masses _required_ to be present in a HooMD XML file? ```suggestion ``` attrs[attrname] = attr(vals) if not 'mass' in attrs: attrs['mass'] = Masses(np.zeros(natoms)) if not 'charge' in attrs: attrs['charge'] = Charges(np.zeros(natoms, dtype=np.float32))
codereview_python_data_9905
import inspect import mmtf import numpy as np -from MDAnalysis.exceptions import FileIOError from MDAnalysis.lib.util import isstream from .. import _READERS, _PARSERS, _MULTIFRAME_WRITERS, _SINGLEFRAME_WRITERS remove (see below) import inspect import mmtf import numpy as np from MDAnalysis.lib.util import isstream from .. import _READERS, _PARSERS, _MULTIFRAME_WRITERS, _SINGLEFRAME_WRITERS
codereview_python_data_9907
) -def _gen_for_pad_lines_after_first(prefix, s): - lines = iter(s.splitlines()) - yield next(lines) - for line in lines: - yield prefix + line - - def pad_lines_after_first(prefix, s): """Apply a prefix to each line in s after the first.""" - return '\n'.join(_gen_for_pad_lines_after_first(prefix, s)) def format_docstring(owner_name, docstring, formatters): is this just `('\n' + prefix).join(...)`? ) def pad_lines_after_first(prefix, s): """Apply a prefix to each line in s after the first.""" + return ('\n' + prefix).join(s.splitlines()) def format_docstring(owner_name, docstring, formatters):
codereview_python_data_9915
"""A QtWebEngine tab in the browser.""" WIDGET_CLASS = QOpenGLWidget - history_prepared = [] def __init__(self, win_id, mode_manager, parent=None): super().__init__(win_id=win_id, mode_manager=mode_manager, If you assign this here on class level, every `WebEngineTab` will share the same `history_prepared` data - I think you should do `self.history_prepared = []` in `__init__`. You should also do this in `browsertab.AbstractTab` instead, not in each tab subclass individually. """A QtWebEngine tab in the browser.""" WIDGET_CLASS = QOpenGLWidget def __init__(self, win_id, mode_manager, parent=None): super().__init__(win_id=win_id, mode_manager=mode_manager,
codereview_python_data_9921
Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') - reverse (bool): Reverse the order of planes in layer sequence. - False for ResNet, True for Houglass. Default: False """ def __init__(self, It is is more appropriate to use `downsample_first`. If `downsample_first=True`, the downsample block is the first block and it is used for ResNet. If `downsample_first=False`, the downsample block is the last block, which is used by Hourglass network. Default: None norm_cfg (dict): dictionary to construct and config norm layer. Default: dict(type='BN') + downsample_first (bool): Downsample at the first block or latest block. + False for Hourglass, True for ResNet. Default: True """ def __init__(self,
codereview_python_data_9931
"Text color for the keyhint widget."), ('keyhint.fg.suffix', - SettingValue(typ.QssColor(), '#FFFF00'), "Highlight color for keys to complete the current keychain"), ('keyhint.bg', I think this should be `CssColor` as the color is used inside HTML and not a Qt stylesheet. "Text color for the keyhint widget."), ('keyhint.fg.suffix', + SettingValue(typ.CssColor(), '#FFFF00'), "Highlight color for keys to complete the current keychain"), ('keyhint.bg',
codereview_python_data_9934
amount=buy_amount, rate=Price(rate), fee=Fee(fee), - fee_currency=buy_asset, # Assumption that mineFee is denominated in outputCurrency link='', notes='Trade from ShapeShift', ) I assume typo ```suggestion fee_currency=buy_asset, # Assumption that minFee is denominated in outputCurrency ``` amount=buy_amount, rate=Price(rate), fee=Fee(fee), + fee_currency=buy_asset, # Assumption that minerFee is denominated in outputCurrency link='', notes='Trade from ShapeShift', )
codereview_python_data_9939
return st.sampled_from(literals) if is_annotated_type(thing): args = thing.__args__ annotated_type = args[0] return st.from_type(annotated_type) # Now, confirm that we're dealing with a generic type as we expected What if `args == ()`? Should we check for this in `is_annotated_type()`? If it's impossible, let's add `assert args, "it's impossible to make an annotated type with no args"` to make that obvious to later readers. return st.sampled_from(literals) if is_annotated_type(thing): args = thing.__args__ + assert args, "it's impossible to make an annotated type with no args" annotated_type = args[0] return st.from_type(annotated_type) # Now, confirm that we're dealing with a generic type as we expected
codereview_python_data_9942
def __init__(self, rename={}, source=None, subscribers=[], linked=True, **params): """ - Mapping allows multiple streams with similar event state to be - used by remapping parameter names. Source is an optional argument specifying the HoloViews datastructure that the stream receives events from, as supported Did none of the docstrings mention preprocessors? Either way docstrings should now mention the rename argument. def __init__(self, rename={}, source=None, subscribers=[], linked=True, **params): """ + The rename argument allows multiple streams with similar event + state to be used by remapping parameter names. Source is an optional argument specifying the HoloViews datastructure that the stream receives events from, as supported
codereview_python_data_9962
# Publish a simple message to the specified SNS topic response = sns.publish( - TopicArn='my-topic-arn', Message='Hello World!', ) you should put a demo arn here ```suggestion TopicArn='arn:aws:sns:region:0123456789:my-topic-arn', ``` # Publish a simple message to the specified SNS topic response = sns.publish( + TopicArn='arn:aws:sns:region:0123456789:my-topic-arn', Message='Hello World!', )
codereview_python_data_9967
import colorama import jinja2 import m9dicts -import paramiko colorama.init(autoreset=True) -GENERATED_SSH_KEY_LOCATION = '/tmp/molecule_rsa' - class LogFilter(object): def __init__(self, level): Would be nice to use something that chooses the OS tempdir. import colorama import jinja2 import m9dicts colorama.init(autoreset=True) class LogFilter(object): def __init__(self, level):
codereview_python_data_9972
from zipline.data.data_portal import DataPortal from zipline.data.minute_bars import BcolzMinuteBarWriter, \ US_EQUITIES_MINUTES_PER_DAY, BcolzMinuteBarReader -from zipline.data.us_equity_pricing import BcolzDailyBarReader from zipline.finance.trading import TradingEnvironment from zipline.protocol import BarData from zipline.utils.test_utils import write_minute_data_for_asset, \ - create_daily_df_for_asset, DailyBarWriterFromDataFrames OHLC = ["open", "high", "low", "close"] OHLCP = OHLC + ["price"] So that it can be seen which if any values fail. It would be helpful here to either include the field as the 3rd argument to the assert, or `np.testing.assert_array_equal` does support checking `np.nan`s against eachother, so this could be. ``` np.testing.assert_array_equal( [np.nan] * 5, [bar_data.spot_value(self.ASSET2, field) for field in OHLCP] ) ``` ``` ``` from zipline.data.data_portal import DataPortal from zipline.data.minute_bars import BcolzMinuteBarWriter, \ US_EQUITIES_MINUTES_PER_DAY, BcolzMinuteBarReader +from zipline.data.us_equity_pricing import BcolzDailyBarReader, \ + SQLiteAdjustmentReader from zipline.finance.trading import TradingEnvironment from zipline.protocol import BarData from zipline.utils.test_utils import write_minute_data_for_asset, \ + create_daily_df_for_asset, DailyBarWriterFromDataFrames, \ + create_mock_adjustments, str_to_seconds OHLC = ["open", "high", "low", "close"] OHLCP = OHLC + ["price"]
codereview_python_data_9974
user, password = None, None try: net = netrc.netrc(config.val.content.netrc_file) - if url.port(): authenticators = net.authenticators( "{}:{}".format(url.host(), url.port()) ) Sorry, didn't notice this before: The default/invalid value for `QUrl.port()` is -1, because 0 is a valid (but special) port. Doing `if url.port():` lets `-1` ("no port") through, so this should probably be `if url.port() != -1:` instead. user, password = None, None try: net = netrc.netrc(config.val.content.netrc_file) + if url.port() != -1: authenticators = net.authenticators( "{}:{}".format(url.host(), url.port()) )
codereview_python_data_9975
Args: weight_decay: A `Tensor`, a floating point value, or a schedule - that is a `tf.keras.optimizers.schedules.LearningRateSchedule to decay the variable by, in the update step. **kwargs: Optional list or tuple or set of `Variable` objects to decay. ```suggestion that is a `tf.keras.optimizers.schedules.LearningRateSchedule` ``` Args: weight_decay: A `Tensor`, a floating point value, or a schedule + that is a `tf.keras.optimizers.schedules.LearningRateSchedule` to decay the variable by, in the update step. **kwargs: Optional list or tuple or set of `Variable` objects to decay.
codereview_python_data_9980
for i, ts in enumerate( self._trajectory[self.start:self.stop:self.step]): #logger.info("--> Doing frame {} of {}".format(i+1, self.nframes)) - self._single_frame() #logger.info("Finishing up") self._conclude() Is there a reason why this line has to go? Could we call `_single_frame` with `ts` as the argument? ie. ``` python for i, ts in enumerate(self._traj[etc]): self._single_frame(ts) ``` for i, ts in enumerate( self._trajectory[self.start:self.stop:self.step]): #logger.info("--> Doing frame {} of {}".format(i+1, self.nframes)) + self._single_frame(ts) #logger.info("Finishing up") self._conclude()
codereview_python_data_9988
Negative indices count from the end, such that -1 is the last tab. count: The tab index to focus, starting with 1. - zero_count: The tab index to focus, with 0. - The special value 0 value focuses the last focused tab. """ if index == 'last': self._tab_focus_last() This line doesn't make any sense anymore here. Negative indices count from the end, such that -1 is the last tab. count: The tab index to focus, starting with 1. + The special value 0 focuses the rightmost tab. """ if index == 'last': self._tab_focus_last()
codereview_python_data_9989
logger = logging.getLogger(__name__) -c = Monitor() class Block(object): Give a more descriptive name to `c`, I think `monitor` is fine. (Yup, we will give a more descriptive name also to `b` one day) logger = logging.getLogger(__name__) +monitor = Monitor() class Block(object):
codereview_python_data_9991
------ NetworkXNotImplemented The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. - If the input graph is an instance of one of these classes, a - :exc:`NetworkXNotImplemented` is raised. Examples -------- IMO this last sentence (`If the input graph...`) is redundant anyways, so I'd vote to remove these two lines entirely. ------ NetworkXNotImplemented The algorithm does not support DiGraph, MultiGraph and MultiDiGraph. Examples --------
codereview_python_data_9993
def _dict_config(self, config): if config and isinstance(config, six.string_types): - return dict(map(lambda i: i.split('=',1), config.split('|'))) def _text_arg(self, name, value): if value: should be a space after the comma here, but not blocking def _dict_config(self, config): if config and isinstance(config, six.string_types): + return dict(map(lambda i: i.split('=', 1), config.split('|'))) def _text_arg(self, name, value): if value:
codereview_python_data_10002
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript, QWebEngineHistory from qutebrowser.config import config -from qutebrowser.browser import (browsertab, eventfilter, shared, webelem, greasemonkey) from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory, webenginesettings, certificateerror) from qutebrowser.misc import miscwidgets, objects nitpick: ```suggestion from qutebrowser.browser import browsertab, eventfilter, shared, webelem, greasemonkey ``` from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript, QWebEngineHistory from qutebrowser.config import config +from qutebrowser.browser import browsertab, eventfilter, shared, webelem, greasemonkey from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory, webenginesettings, certificateerror) from qutebrowser.misc import miscwidgets, objects
codereview_python_data_10003
# See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== -""" -Ops for building neural network layers -""" from __future__ import absolute_import from __future__ import division from __future__ import print_function put in one line? `"""Ops for building xxxx."""` # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== +"""Ops for building neural network layers""" + from __future__ import absolute_import from __future__ import division from __future__ import print_function
codereview_python_data_10007
Returns a tuple of (width, height) in pixels. """ return (plot.state.height, plot.state.height) Surely it is only the `load_notebook` bit that needs to go? Returns a tuple of (width, height) in pixels. """ return (plot.state.height, plot.state.height) + + @classmethod + def load_nb(cls): + """ + Loads the bokeh notebook resources. + """ + load_notebook()
codereview_python_data_10008
:param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. - :param data: the body to attach the request. If a dictionary is provided, form-encoding will take place. - :param json: json for the body to attach the request. :param params: dictionary of URL parameters to append to the URL. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request. s/attach the request/attach to the request :) :param url: URL to send. :param headers: dictionary of headers to send. :param files: dictionary of {filename: fileobject} files to multipart upload. + :param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place. + :param json: json for the body to attach to the request (if data is not specified). :param params: dictionary of URL parameters to append to the URL. :param auth: Auth handler or (user, pass) tuple. :param cookies: dictionary or CookieJar of cookies to attach to this request.
codereview_python_data_10016
def ball_throw_reticle_fail_delay(success_prob=0.95): ''' - Chances to skip the reticle should be constant, so the wait time before throwing is binomial, - given that the monster does not interrupt... <- TODO ''' for trial in range(10): if random() < success_prob: break - return trial time.sleep(1.8*(trial+random())) This sleep will never execute. def ball_throw_reticle_fail_delay(success_prob=0.95): ''' + Chances to skip the reticle could be considered constant, so the wait time before throwing is binomial, + given that the pokemon does not interrupt... <- TODO ''' for trial in range(10): if random() < success_prob: break + time.sleep(1.8*(trial+random()))
codereview_python_data_10026
self.assertRaises(ValueError, list, mmcif._splitline("foo b'ar'")) def test_verbatim_block(self): - """Verbatim bocks parsed correctly. Verbatim blocks delimited by ";...;" should have the final newline stripped. Whitespace may be stripped from the end of the line but not Maybe try implicit string concatenation with explicit line breaks? ```python mmcif_dict = MMCIF2Dict(io.StringIO( "data_verbatim_test\n" "_test_value\n" ... ) ``` self.assertRaises(ValueError, list, mmcif._splitline("foo b'ar'")) def test_verbatim_block(self): + """Verbatim blocks parsed correctly. Verbatim blocks delimited by ";...;" should have the final newline stripped. Whitespace may be stripped from the end of the line but not
codereview_python_data_10028
results: list Fraction of native contacts for each frame """ - assert(grA.universe == grB.universe) self.u = grA.universe self.grA, self.grB = grA, grB You don't want an assert here, do a proper if not x==y: raise ValueError with a nice message results: list Fraction of native contacts for each frame """ + if not grA.universe == grB.universe: raise ValueError("grA and grB should come from the same Universe") self.u = grA.universe self.grA, self.grB = grA, grB
codereview_python_data_10032
self.assertRaises(FileNotFoundError, d.get_raw, "alpha") def test_old_check_same_thread(self): - """Setting check_same_thread to False doesn't raise an exception""" d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx") def reader_thread(): Needs a trailing full stop, ``` $ flake8 ./Tests/test_SeqIO_index.py:116:1: D400 First line should end with a period ``` self.assertRaises(FileNotFoundError, d.get_raw, "alpha") def test_old_check_same_thread(self): + """Setting check_same_thread to False doesn't raise an exception.""" d = SeqIO.index_db("Roche/triple_sff_rel_paths.idx") def reader_thread():
codereview_python_data_10037
return len(self._data) # Seq API requirement def __getitem__(self, index): # Seq API requirement - """ Return a subsequence of single letter, use my_seq[index]. >>> my_seq = Seq('ACTCGACGTCG') >>> my_seq[5] Unwanted space before "Return" return len(self._data) # Seq API requirement def __getitem__(self, index): # Seq API requirement + """Return a subsequence of single letter, use my_seq[index]. >>> my_seq = Seq('ACTCGACGTCG') >>> my_seq[5]
codereview_python_data_10053
config.username = raw_input("Username: ") if not config.password and 'password' not in load: config.password = getpass("Password: ") - if config.proxy_ip and len(config.proxy_ip)>0 and config.proxy_ip.count('.') == 3 and all(0<=int(num)<256 for num in config.proxy_ip.rstrip().split('.')): - if config.proxy_port and int(config.proxy_port )<65536 and int(config.proxy_port )>1: - os.environ['http_proxy']="http://"+config.proxy_ip+":"+str(config.proxy_port)+"/" - os.environ['https_proxy']="http://"+config.proxy_ip+":"+str(config.proxy_port)+"/" config.catch = load.get('catch', {}) config.release = load.get('release', {}) Extract this to a method, please.... something called `setup_proxy` config.username = raw_input("Username: ") if not config.password and 'password' not in load: config.password = getpass("Password: ") + + set_proxy_if_exists(config) config.catch = load.get('catch', {}) config.release = load.get('release', {})
codereview_python_data_10054
# report to server # Review Very Carefully for the following line, forbiden ID changed PR: if bot.config.health_record: - logger.log('[x] Send anonymous bot health report to server, it can be disabled by config \"health_record\":false in config file', 'red') - logger.log('[x] Wait for 2 seconds ', 'red') sleep(3) self.tracker = Tracker.create('UA-81469507-1', use_post=True) # No RAW send function to be added here, to keep everything clean Remove the [x] we ditched thsi now :) # report to server # Review Very Carefully for the following line, forbiden ID changed PR: if bot.config.health_record: + logger.log('Send anonymous bot health report to server, it can be disabled by config \"health_record\":false in config file', 'red') + logger.log('Wait for 2 seconds ', 'red') sleep(3) self.tracker = Tracker.create('UA-81469507-1', use_post=True) # No RAW send function to be added here, to keep everything clean
codereview_python_data_10059
else: hatch = concurrency - wrapper = os.path.join(get_full_path(__file__, step_up=2), "resources", "locustio-taurus-wrapper.py") if load.duration: self.env.set({"LOCUST_DURATION": dehumanize_time(load.duration)}) Why have this logic? else: hatch = concurrency + wrapper = os.path.join(RESOURCES_DIR, "locustio-taurus-wrapper.py") if load.duration: self.env.set({"LOCUST_DURATION": dehumanize_time(load.duration)})
codereview_python_data_10060
return self._read_next_timestep() @property def n_frames(self): # Second line is traj_key, imcom, n_atoms, n_frames, n_records offsets = [] with open(self.filename, 'r') as f: f.readline() - n_frames = int(f.readline().split()[3]) position = f.tell() line = f.readline() while line.startswith('timestep'): The reason for `self._n_frames` was to cache the number of frames so it's only calculated once. This needs to be added back in. return self._read_next_timestep() @property + @cached('n_frames') def n_frames(self): # Second line is traj_key, imcom, n_atoms, n_frames, n_records offsets = [] with open(self.filename, 'r') as f: f.readline() position = f.tell() line = f.readline() while line.startswith('timestep'):
codereview_python_data_10063
""" default_domain = self.scenario.get("default-domain", None) default_port = self.scenario.get("default-port", None) - retrieve_resources = self.scenario.get("retrieve-resources", None) concurrent_pull_size = self.scenario.get("concurrent-pull-size", 4) timeout = self.scenario.get("timeout", None) Why this "if" lost? Any of parameters might be omitted... """ default_domain = self.scenario.get("default-domain", None) default_port = self.scenario.get("default-port", None) + retrieve_resources = self.scenario.get("retrieve-resources", True) concurrent_pull_size = self.scenario.get("concurrent-pull-size", 4) timeout = self.scenario.get("timeout", None)
codereview_python_data_10066
'startpage': QUrl(config.get('general', 'startpage')[0]), 'default-page': config.get('general', 'default-page'), } - first_tab_url = self.widget(0).url() - last_close_url_used = first_tab_url == urls[last_close] if only_one_tab_open and no_history and last_close_url_used: self.removeTab(0) I wonder if `self.widget(0).page().mainFrame().requestedUrl()` would work better for redirects etc. here - can you try? 'startpage': QUrl(config.get('general', 'startpage')[0]), 'default-page': config.get('general', 'default-page'), } + first_tab_url = self.widget(0).page().mainFrame().requestedUrl() + last_close_urlstr = urls[last_close].toString().rstrip('/') + first_tab_urlstr = first_tab_url.toString().rstrip('/') + last_close_url_used = first_tab_urlstr == last_close_urlstr if only_one_tab_open and no_history and last_close_url_used: self.removeTab(0)
codereview_python_data_10067
from kombu.utils.encoding import bytes_to_str, str_to_bytes -from celery.five import bytes_if_py2, items, string_t from .encoding import safe_repr try: Perhaps out of scope for this PR, but I wonder if we still need `bytes_if_py2` here. from kombu.utils.encoding import bytes_to_str, str_to_bytes +from celery.five import (bytes_if_py2, items, python_2_unicode_compatible, + reraise, string_t) + from .encoding import safe_repr try:
codereview_python_data_10069
def test___repr___empty(): - modin_s = pd.Series() - pandas_s = pandas.Series() - assert repr(modin_s) == repr(pandas_s) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys) ```suggestion def test___repr___empty(): modin_s, pandas_s = pd.Series(), pandas.Series() assert repr(modin_s) == repr(pandas_s) ``` def test___repr___empty(): + modin_series, pandas_series = pd.Series(), pandas.Series() + assert repr(modin_series) == repr(pandas_series) @pytest.mark.parametrize("data", test_data_values, ids=test_data_keys)
codereview_python_data_10070
applied. """, deprecation_message=""" -This setting is deprecated and will be removed in a future version. To get -the future behaviour set ``timeout=hypothesis.unlimited`` instead (which will -remain valid for a further deprecation period after this setting has gone -away). """, future_default=unlimited, validator=_validate_timeout This is now kinda an unhelpful message because it gets emitted when people use the timeout setting, but that is usually going to be one of several settings being used in creation and it just says "this setting" without disambiguating. applied. """, deprecation_message=""" +The timeout setting is deprecated and will be removed in a future version of +Hypothesis. To get the future behaviour set ``timeout=hypothesis.unlimited`` +instead (which will remain valid for a further deprecation period after this +setting has gone away). """, future_default=unlimited, validator=_validate_timeout
codereview_python_data_10079
def _current_title(self): """Convenience method to get the current title.""" - title = self._current_widget().title() - return title if title else "(null)" def _current_widget(self): """Get the currently active widget from a command.""" I'm not sure if this should be handled here - maybe rather just pass the empty string to `bookmark_add`, and handle that situation there? Also, the user interface would actually display `(null)` now, and that'd be saved to the file - why not just not save anything at all for the title, i.e. only the URL - and handle that case when reading? def _current_title(self): """Convenience method to get the current title.""" + return self._current_widget().title() def _current_widget(self): """Get the currently active widget from a command."""
codereview_python_data_10087
The :class:`GSDParser` generates a topology from files for the HOOMD_ code. -TODO: write info .. _HOOMD: http://codeblue.umich.edu/hoomd-blue/index.html -.. _HOOMD XML: http://codeblue.umich.edu/hoomd-blue/doc/page_xml_file_format.html Classes ------- this class needs tests (separate to the reader tests). check things like atomtypes, masses etc are all being read correctly. The :class:`GSDParser` generates a topology from files for the HOOMD_ code. +Parser for the HOOMD-blue GSD topology/trajectory format. The GSD file stores +information on both the topology and the trajectory in the same file, and allows +for varying atom numbers/identities and topologies during the course of the +simulation. At the moment MDAnalysis can deal only with the case in which there +is no variation. .. _HOOMD: http://codeblue.umich.edu/hoomd-blue/index.html +.. _HOOMD GSD: https://bitbucket.org/glotzer/gsd Classes -------
codereview_python_data_10092
if fuzz_target: engine_impl = engine.get(fuzz_target.engine) if not engine_impl: - raise RuntimeError(engine_impl) self._is_black_box = False self._engine_impl = engine_impl Why engine_impl in argument as it is None ? Maybe a message is better 'Engine not found or something' if fuzz_target: engine_impl = engine.get(fuzz_target.engine) if not engine_impl: + raise RuntimeError('Could not find engine ' + engine_impl.name) self._is_black_box = False self._engine_impl = engine_impl
codereview_python_data_10097
if directory: # Path of the sample file that we will read to get the remaining # columns. - pd = ParquetDataset(path) column_names = pd.schema.names else: pf = ParquetFile(path) Do we need to look at all the files? We should be able to read the first file that we encounter for the remaining column names if directory: # Path of the sample file that we will read to get the remaining # columns. + pd = ParquetDataset(file_path) column_names = pd.schema.names else: pf = ParquetFile(path)
codereview_python_data_10099
return np.zeros(6, dtype=np.float32) -def triclinic_vectors(dimensions): """Convert ``[lx, ly, lz, alpha, beta, gamma]`` to a triclinic matrix representation. If the boxes are always convex, I wonder how this performance compares with the volume calculation available from Qhull via ConvexHull in SciPy return np.zeros(6, dtype=np.float32) +def triclinic_vectors(dimensions, dtype=np.float32): """Convert ``[lx, ly, lz, alpha, beta, gamma]`` to a triclinic matrix representation.
codereview_python_data_10107
__all__ = ['gspmm'] def reshape_lhs_rhs(lhs_data, rhs_data): - r""" Reshape the dimension of lhs and rhs data Parameters ---------- this reshape function seems the same as the one above. can we merge them? __all__ = ['gspmm'] def reshape_lhs_rhs(lhs_data, rhs_data): + r""" Reshape the dimension of lhs and rhs data to avoid broadcasting + issues with different number of dimensions. Parameters ----------