id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_11399
def make_lexicon(): from ..Plex import \ Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \ - TEXT, IGNORE, Method, State, Lexicon,Range letter = Any("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_") nonzero_digit = Any("123456789") Space after comma. def make_lexicon(): from ..Plex import \ Str, Any, AnyBut, AnyChar, Rep, Rep1, Opt, Bol, Eol, Eof, \ + TEXT, IGNORE, Method, State, Lexicon, Range letter = Any("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz_") nonzero_digit = Any("123456789")
codereview_python_data_11400
if type(value) in [int, float]: if value == 0: return False - return True return bool(value) Maybe this should not accept all non-zero numeric values as `True`? maybe just `1` to go along with `0`? I personally lean towards no numbers here, just `True`/`False` or `"true"`/`"false"`. While good error responses with found/expected types are definitely great, I fear that too much lenience and auto-conversion may ultimately be more confusing - things may seem to work but the results are totally wrong, or return an error that doesn't make sense, and it's because there was a mixup in a different argument which was auto-converted. if type(value) in [int, float]: if value == 0: return False + if value == 1: + return True + raise ValueError('Invalid boolean value: {value}'.format(value=repr(value))) return bool(value)
codereview_python_data_11402
Index tensor """ def __init__(self, storage, scheme=None, index=None, device=None): - if isinstance(storage, tuple): - # handle DistTensor and node/edge ids in subgraphs - self.storage = storage[0] - self.index = storage[1] - else: - self.storage = storage - self.index = index self.scheme = scheme if scheme else infer_scheme(self.storage) self.device = device Try to avoid changing the `__init__` function as I suggested earlier. Index tensor """ def __init__(self, storage, scheme=None, index=None, device=None): + self.storage = storage + self.index = index self.scheme = scheme if scheme else infer_scheme(self.storage) self.device = device
codereview_python_data_11406
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4421-SEA 1645545907 1547537807</p> <hr> <p>Varnish cache server</p> </body> `Add new edges to an input graph based on given metapaths, ...` <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4477-SEA 1645545907 3705123398</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_11407
if 'ESDestinationDescription' in dest: es_dest = dest['ESDestinationDescription'] es_index = es_dest['IndexName'] - try: - es_type = es_dest['TypeName'] - except KeyError: - es_type = None es = connect_elasticsearch() for record in records: obj_id = uuid.uuid4() nitpick: Can we use this instead? ``` es_type = es_dest.get('TypeName') ``` if 'ESDestinationDescription' in dest: es_dest = dest['ESDestinationDescription'] es_index = es_dest['IndexName'] + es_type = es_dest.get('TypeName') es = connect_elasticsearch() for record in records: obj_id = uuid.uuid4()
codereview_python_data_11414
testcase.put() fuzzing_strategies = ( - libfuzzer_stats.LIBFUZZER_FUZZING_STRATEGIES.search( - crash.fuzzing_log if crash.fuzzing_log else '')) if fuzzing_strategies: assert len(fuzzing_strategies.groups()) == 1 Instead of doing this here, could you just initialize fuzzing_log to ''? testcase.put() fuzzing_strategies = ( + libfuzzer_stats.LIBFUZZER_FUZZING_STRATEGIES.search(crash.fuzzing_log)) if fuzzing_strategies: assert len(fuzzing_strategies.groups()) == 1
codereview_python_data_11423
bbox_results.update(loss_bbox=loss_bbox) return bbox_results - def update_hyperparameters(self, eps=1e-15): """Update hyperparameters like IoU thresholds for assigner and beta for SmoothL1 loss based on the training statistics. Use EPS=1e-15 as that in atss_head or FCOS head. bbox_results.update(loss_bbox=loss_bbox) return bbox_results + def update_hyperparameters(self): """Update hyperparameters like IoU thresholds for assigner and beta for SmoothL1 loss based on the training statistics.
codereview_python_data_11435
class StackWidget(urwid.Frame): def __init__(self, window, widget, title, focus): - self.f = focus self.window = window if title: This needs a more self-explaining name. Maybe `.is_focused`? class StackWidget(urwid.Frame): def __init__(self, window, widget, title, focus): + self.is_focus = focus self.window = window if title:
codereview_python_data_11439
from torch.nn import BatchNorm2d, ReLU from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply, - unmap) from ..builder import HEADS from .anchor_head import AnchorHead what if it is not sigmoid? from torch.nn import BatchNorm2d, ReLU from mmdet.core import (anchor_inside_flags, images_to_levels, multi_apply, + reduce_mean, unmap) from ..builder import HEADS from .anchor_head import AnchorHead
codereview_python_data_11441
else: context['summary'] = _("Products matching '%(query)s'") % {'query': q} context['search_term'] = q - context['alert_form'] = ProductAlertForm(initial={ - 'user':self.request.user, - }) return context This raises an exception for me because `ProductAlertForm` requires an additional argument `product`. The solution is a bit tricky, I think, because we have to create a form for each product in the list. My suggestion would be to use a template tag that creates the product form on the fly. I could be something like this: `{% get_product_alert_form product as product_alert_form %}` This assumes that the assignment tag has the context passed through to get access to the `user` object. Alternatively, this could be passed in as well. What do you think? Is there a more sensible solution for this? else: context['summary'] = _("Products matching '%(query)s'") % {'query': q} context['search_term'] = q return context
codereview_python_data_11449
if distro_name == "debian": if "sid" in distro_version or Version(distro_version) > Version("7"): - return DebianOS8Util() else: - return DebianOSUtil() if distro_name == "redhat" \ or distro_name == "centos" \ Given that we're now supporting up through Debian 10, I suspect this logic needs to be re-examined. You may want to rename `DebianOSUtil()` to `DebianOSOldUtil()` and `DebianOS8Util()` to `DebianOSModernUtil()` or something similar, just to make clear what's going on here. if distro_name == "debian": if "sid" in distro_version or Version(distro_version) > Version("7"): + return DebianOSModernUtil() else: + return DebianOSBaseUtil() if distro_name == "redhat" \ or distro_name == "centos" \
codereview_python_data_11458
self.angles = np.rad2deg(np.array(self.angles)) def plot(self, ax=None, ref=False, **kwargs): - """Plots data into standard ramachandran plot. Each time step in :attr:`Ramachandran.angles` is plotted onto the same graph. ```suggestion """Plots data into standard Ramachandran plot. ``` (this was my mistake!) self.angles = np.rad2deg(np.array(self.angles)) def plot(self, ax=None, ref=False, **kwargs): + """Plots data into standard Ramachandran plot. Each time step in :attr:`Ramachandran.angles` is plotted onto the same graph.
codereview_python_data_11466
return decorator def run_distributed(num_devices): def decorator(f): if inspect.isclass(f): Is there a way we can make the distribution strategy configurable? This might be useful for certain types of bugs return decorator +# TODO: Add support for other distribution strategies def run_distributed(num_devices): def decorator(f): if inspect.isclass(f):
codereview_python_data_11470
order by notice_date asc """, t) sids = [r[0] for r in c.fetchall()] - return [self.futures_contract_for_id(sid) for sid in sids] @property def sids(self): I didn't know about the :name syntax. :+1: order by notice_date asc """, t) sids = [r[0] for r in c.fetchall()] + return [self._retrieve_futures_contract(sid) for sid in sids] @property def sids(self):
codereview_python_data_11473
self.assertEqual(str(e), message) if records[0].seq.alphabet.letters is not None: self.assertNotEqual(format, t_format, - "Should be able to re-write in the original format!") # Carry on to the next format: continue I find it rather misleading that you make a list of expected ``SeqRecord`` objects but with truncated sequences. I would find it clearer to have a list of expected identifiers (strings in full) and truncated sequences - to match the list of expected sequence lengths. self.assertEqual(str(e), message) if records[0].seq.alphabet.letters is not None: self.assertNotEqual(format, t_format, + "Should be able to re-write " + "in the original format!") # Carry on to the next format: continue
codereview_python_data_11476
if (return_type.is_pyobject and (self.exception_value or self.exception_check) and self.exception_check != '+'): - # Exception clause is silently ignored for functions returning Python object. - self.exception_check = False else: if self.exception_value is None and self.exception_check and self.exception_check != '+': # Use an explicit exception return value to speed up exception checks. Maybe we should only allow `except *` for functions returning PyObjects (which hopefully matches what gets added with the `cfunc` decorator). Thus _other_ exception statements (e.g. `except -1`) would still be an error? if (return_type.is_pyobject and (self.exception_value or self.exception_check) and self.exception_check != '+'): + if self.exception_value is None and self.exception_check is True: + # Functions in pure python mode defaults to always check return value for exception + # (equivalent to except * declaration in Cython language). In this case exception clause + # is silently ignored for functions returning Python object. + self.exception_check = False + else: + error(self.pos, "Exception clause not allowed for function returning Python object") else: if self.exception_value is None and self.exception_check and self.exception_check != '+': # Use an explicit exception return value to speed up exception checks.
codereview_python_data_11477
"scripts", typing.Sequence[str], [], """ Execute a script. The script name may include wild card. If you include wild card, - don't forget to enclose the script name in single or double quotes. """ ) Please add a simple example here. I assume something like: `some/folder/*.py` "scripts", typing.Sequence[str], [], """ Execute a script. The script name may include wild card. If you include wild card, + don't forget to enclose the script name in single or double quotes. + Example: mitmproxy -s "some/folder/*.py" """ )
codereview_python_data_11484
def __init__(self, engine, scenario=None): super(Scenario, self).__init__() self.engine = engine - if isinstance(scenario, string_types): - scenario = {Scenario.SCRIPT: scenario} self.data = scenario def get(self, key, default=defaultdict): This is bad. It does not play well with concept of passing existing scenario and modifications to it. This case should not exist. def __init__(self, engine, scenario=None): super(Scenario, self).__init__() self.engine = engine self.data = scenario def get(self, key, default=defaultdict):
codereview_python_data_11487
yield stream["quality"], FilmOnHLS(self.session, vod_id=vod_id, quality=stream["quality"]) else: - if not channel or channel == "sat-1-schweiz": channel = http.get(self.url, schema=self._channel_id_schema) data = self.api.channel(channel) for stream in data["streams"]: I'd create a list of channels that should be handled differently and then check `channel in self._special_case_channels`. In case there are other in the future :) yield stream["quality"], FilmOnHLS(self.session, vod_id=vod_id, quality=stream["quality"]) else: + if not channel or channel in self._special_case_channels: channel = http.get(self.url, schema=self._channel_id_schema) data = self.api.channel(channel) for stream in data["streams"]:
codereview_python_data_11498
# handle edata if share_edata: - # for each ntype for etype in canonical_etypes: # for each data field for k in g.edges[etype].data: Should be "# for each etype" # handle edata if share_edata: + # for each etype for etype in canonical_etypes: # for each data field for k in g.edges[etype].data:
codereview_python_data_11500
else: monkeypatch.delattr(sys, 'frozen', raising=False) - substitutions['uptime'] = "1:23:45" - substitutions['autoconfig_loaded'] = ( - "yes" if autoconfig_loaded else "no") - template = textwrap.dedent(""" qutebrowser vVERSION{git_commit} Backend: {backend} I'm not sure why `'uptime'` gets added here (instead of adding it in the `substitutions = { ... }` block above). Can you please move both to the place above? else: monkeypatch.delattr(sys, 'frozen', raising=False) template = textwrap.dedent(""" qutebrowser vVERSION{git_commit} Backend: {backend}
codereview_python_data_11504
assert xs == [] -def test_non_sequence_types_are_deprecated(): - @given(permutations({1, 2, 3, 4})) - def inner(_permutation): - pass - with validate_deprecation(): - inner() :heart::heart::heart: that you wrote a test! It's a good one too, but we can make it more powerful: - Decorate it with `@checks_deprecated_behaviour` (from the same utils module), so that we test both that the deprecation fires and that the behavior is otherwise correct. - *Within* that, slip in a `@given(data=data(), xs=sets(integers()))`. We can then `p = data.draw(permtations(xs))` and `assert xs == set(p)` to test that the result is correct. (We're using `data()` here to avoid nesting `@given`, which would give quadratic and thus very slow tests) assert xs == [] +@checks_deprecated_behaviour +@given(data=data(), xs=sets(integers())) +def test_non_sequence_types_are_deprecated(data, xs): + p = data.draw(permutations(xs)) + assert xs == set(p)
codereview_python_data_11509
self.clear_messages() # Read the node states in the degree-bucketing order. - null_v = utils.toindex( - F.pack([v_bkt.totensor() for v_bkt in null_v_buckets]) - if len(null_v_buckets) > 0 else [] - ) reordered_v = utils.toindex( F.pack([v_bkt.totensor() for v_bkt in non_null_v_buckets]) if len(non_null_v_buckets) > 0 else [] Same above. Only one null_v bucket. self.clear_messages() # Read the node states in the degree-bucketing order. + null_v = utils.toindex(null_v_bucket or []) reordered_v = utils.toindex( F.pack([v_bkt.totensor() for v_bkt in non_null_v_buckets]) if len(non_null_v_buckets) > 0 else []
codereview_python_data_11512
scoped_session, data_access = model_manager.get(self.model_name) with scoped_session as session: bucket_acls = [] - policies = [policy for policy in data_access.scanner_iter(session, 'gcs_policy')] - for gcs_policy in policies: bucket = gcs_policy.parent project_id = bucket.parent.name acls = json.loads(gcs_policy.data) nit: can you please make the naming consistent? change `policies` to `gcs_policies` scoped_session, data_access = model_manager.get(self.model_name) with scoped_session as session: bucket_acls = [] + gcs_policies = [policy for policy in data_access.scanner_iter(session, 'gcs_policy')] + for gcs_policy in gcs_policies: bucket = gcs_policy.parent project_id = bucket.parent.name acls = json.loads(gcs_policy.data)
codereview_python_data_11516
from google.cloud.forseti.services.scanner import scanner_pb2_grpc from google.cloud.forseti.services.server_config import server_pb2 from google.cloud.forseti.services.server_config import server_pb2_grpc -from google.cloud.forseti.services.utils import oneof, opencensus_enabled # pylint: disable=too-many-instance-attributes nit: can you break this up into 2 separate import statements? ``` from google.cloud.forseti.services.utils import oneof from google.cloud.forseti.services.utils import opencensus_enabled ``` from google.cloud.forseti.services.scanner import scanner_pb2_grpc from google.cloud.forseti.services.server_config import server_pb2 from google.cloud.forseti.services.server_config import server_pb2_grpc +from google.cloud.forseti.services.utils import oneof +from google.cloud.forseti.services.utils import is_opencensus_enabled # pylint: disable=too-many-instance-attributes
codereview_python_data_11522
def __init__( self, full_name=None, group_identifier=None, identifier=None, path_separator='/', user_directory=None, username=None): - """Initializes an user account artifact. Args: full_name (Optional[str]): name describing the user e.g. full name. "...a user account..." def __init__( self, full_name=None, group_identifier=None, identifier=None, path_separator='/', user_directory=None, username=None): + """Initializes a user account artifact. Args: full_name (Optional[str]): name describing the user e.g. full name.
codereview_python_data_11525
self.get_client().startUntrustedTransaction(True, inputIndex, chipInputs, redeemScripts[inputIndex]) if changePath: - outputData = self.get_client().finalizeInput(output, format_satoshis_plain(outputAmount), format_satoshis_plain(tx.get_fee()), changePath, bfh(rawTx)) else: outputData = self.get_client().finalizeInputFull(txOutput) `output` is `str` here but `btchip-python` expects `bytes`. You can import `to_bytes` from `electrum.util` to convert it. Not sure if it's better to convert here or where its value gets set initially. self.get_client().startUntrustedTransaction(True, inputIndex, chipInputs, redeemScripts[inputIndex]) if changePath: + outputData = self.get_client().finalizeInput(to_bytes(output), format_satoshis_plain(outputAmount), format_satoshis_plain(tx.get_fee()), changePath, bfh(rawTx)) else: outputData = self.get_client().finalizeInputFull(txOutput)
codereview_python_data_11530
The correctness of the gradients is assumed, since the forward propagation is tested to be correct and we only use built-in tf ops. However, we perform a simple test to make sure that - backprop can actually run. We treat the flows as a tf.Variable - and optimize them to minimize the difference between the - interpolated image and the input image. """ batch_size, height, width, num_channels = [4, 5, 6, 7] image_shape = [batch_size, height, width, num_channels] ```suggestion The correctness of the gradients is assumed, since the forward propagation is tested to be correct and we only use built-in tf ops. However, we perform a simple test to make sure that backprop can actually run. ``` The correctness of the gradients is assumed, since the forward propagation is tested to be correct and we only use built-in tf ops. However, we perform a simple test to make sure that + backprop can actually run. """ batch_size, height, width, num_channels = [4, 5, 6, 7] image_shape = [batch_size, height, width, num_channels]
codereview_python_data_11534
mnemonic = ' '.join(mnemonic.split()) try: - from Crypto.Protocol import PBKDF2 def pseudorandom(self, key, msg): """Pseudorandom function for pbkdf2""" Shouldn't this be `from Crypto.Protocol.KDF import PBKDF2`? mnemonic = ' '.join(mnemonic.split()) try: + from Crypto.Protocol.KDF import PBKDF2 def pseudorandom(self, key, msg): """Pseudorandom function for pbkdf2"""
codereview_python_data_11536
@classmethod def can_handle_url(cls, url): - return Vimeo._url_re.match(url) def _get_streams(self): if "player.vimeo.com" in self.url: ```py return cls._url_re.match(url) ``` @classmethod def can_handle_url(cls, url): + return cls._url_re.match(url) def _get_streams(self): if "player.vimeo.com" in self.url:
codereview_python_data_11537
"mxnet-cu{cuda_v}"), CudaPackage("tensorflow-gpu", { "90" : [PckgVer("1.12.0", python_max_ver="3.7")], - "100" : [PckgVer("1.15.3", python_max_ver="3.7"), PckgVer("2.2.0", python_max_ver="3.7"), \ - "2.2.0"] }), CudaHttpPackage("torch", { "90" : ["http://download.pytorch.org/whl/cu{cuda_v}/torch-1.1.0-{platform}.whl"], "100" : ["http://download.pytorch.org/whl/cu{cuda_v}/torch-1.4.0+cu{cuda_v}-{platform}.whl"] }), ```suggestion "100" : [PckgVer("1.15.3", python_max_ver="3.7"), PckgVer("2.1.0", python_max_ver="3.7"), \ ``` We already have 2.2 supported. Check the line bellow. "mxnet-cu{cuda_v}"), CudaPackage("tensorflow-gpu", { "90" : [PckgVer("1.12.0", python_max_ver="3.7")], + "100" : [ + PckgVer("1.15.2", python_max_ver="3.7"), + PckgVer("2.1.1", python_max_ver="3.7"), + "2.2.0"] + }), CudaHttpPackage("torch", { "90" : ["http://download.pytorch.org/whl/cu{cuda_v}/torch-1.1.0-{platform}.whl"], "100" : ["http://download.pytorch.org/whl/cu{cuda_v}/torch-1.4.0+cu{cuda_v}-{platform}.whl"] }),
codereview_python_data_11539
# dict(type='TensorboardLoggerHook') ]) # yapf:enable -evaluation = dict(interval=1, metric=['bbox', 'segm']) # runtime settings total_epochs = 12 dist_params = dict(backend='nccl') Better to move it the L221, since evaluation config can be related to the dataset. # dict(type='TensorboardLoggerHook') ]) # yapf:enable # runtime settings total_epochs = 12 dist_params = dict(backend='nccl')
codereview_python_data_11541
An equivalence theorem relating the evolution of the g(n, m, p) and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176. """ - G=nx.random_graph(n, m, p, seed=seed) return nx.projected_graph(G, range(n)) def k_random_intersection_graph(n,m,k): This needs to be bipartite.random_graph An equivalence theorem relating the evolution of the g(n, m, p) and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176. """ + G=bipartite.random_graph(n, m, p, seed=seed) return nx.projected_graph(G, range(n)) def k_random_intersection_graph(n,m,k):
codereview_python_data_11545
eval_kwargs.update(dict(metric=args.eval, **kwargs)) metric = dataset.evaluate(outputs, **eval_kwargs) print(metric) - metric_dict = {args.config: metric} if args.work_dir is not None and rank == 0: mmcv.dump(metric_dict, json_file) The structure is a little bit weird, the following structure might be better. ```JSON dict(config=args.config, metric=metric) ``` eval_kwargs.update(dict(metric=args.eval, **kwargs)) metric = dataset.evaluate(outputs, **eval_kwargs) print(metric) + metric_dict = dict(config=args.config, metric=metric) if args.work_dir is not None and rank == 0: mmcv.dump(metric_dict, json_file)
codereview_python_data_11546
for pos, line in enumerate(grofile, start=-2): # 2 header lines, 1 box line at end if pos == n_atoms: - unitcell = list(map(float, line.split())) continue if pos < 0: continue I thought we benchmarked those to use `np.float32`? for pos, line in enumerate(grofile, start=-2): # 2 header lines, 1 box line at end if pos == n_atoms: + unitcell = np.float32(line.split()) continue if pos < 0: continue
codereview_python_data_11548
import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.restutil as restutil import azurelinuxagent.common.utils.textutil as textutil -from azurelinuxagent.common.utils.textutil import parse_doc, findall, find, findtext, \ - getattrib, gettext, remove_bom, get_bytes_from_pem import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.protocol.restapi import * @SRIKKANTH We've tended to import that package name and then reference methods within the package by prepending the name. Could we adopt that pattern here as well (e.g., `textutil.parse_doc` vs. `parse_doc`). import azurelinuxagent.common.logger as logger import azurelinuxagent.common.utils.restutil as restutil import azurelinuxagent.common.utils.textutil as textutil import azurelinuxagent.common.utils.fileutil as fileutil from azurelinuxagent.common.utils.cryptutil import CryptUtil from azurelinuxagent.common.protocol.restapi import *
codereview_python_data_11550
) self.add_option( "cert_passphrase", Optional[str], None, - "Passphrase for decyrpting the private key provided in the --cert setting." ) self.add_option( "ciphers_client", Optional[str], None, Nit: We always call them options, not settings. ```suggestion "Passphrase for decrypting the private key provided in the --cert option." ``` ) self.add_option( "cert_passphrase", Optional[str], None, + "Passphrase for decrypting the private key provided in the --cert option." ) self.add_option( "ciphers_client", Optional[str], None,
codereview_python_data_11551
default=False, description='Use multiprocessing to do scheduling in parallel.') parallel_scheduling_processes = parameter.IntParameter( - default=None, description='The number of processes to use for scheduling in parallel.' - ' The default is the number of available CPUs') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.') I think this will cause a warning, can you set the default to zero (`0`)? default=False, description='Use multiprocessing to do scheduling in parallel.') parallel_scheduling_processes = parameter.IntParameter( + default=0, description='The number of processes to use for scheduling in parallel.' + ' By default the number of available CPUs will be used') assistant = parameter.BoolParameter( default=False, description='Run any task from the scheduler.')
codereview_python_data_11553
k : int The k in "top-k". descending : bool - Controls whether to return the largest or smallest elements. idx : int - The key index we sort features along. Returns ------- Should we also return the index of the top-k features? k : int The k in "top-k". descending : bool + Controls whether to return the largest or smallest elements, defaults to True. idx : int + The key index we sort features along, defaults to -1. Returns -------
codereview_python_data_11555
yield lst[i:i + n] -def fetch_tracks_listened_to(lb_conn, mb_conn, ts): """ Actually fetch the top discoveries for the given year and set of users """ with lb_conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as lb_curs: Let's also add the text from the PR to the docstrings of the respective reports. yield lst[i:i + n] +def fetch_tracks_listened_to(lb_conn, mb_conn, start_ts, end_ts): """ Actually fetch the top discoveries for the given year and set of users """ with lb_conn.cursor(cursor_factory=psycopg2.extras.DictCursor) as lb_curs:
codereview_python_data_11557
@pytest.mark.parametrize('task', tasks) -@pytest.mark.parametrize('output', ['array', 'dataframe']) @pytest.mark.parametrize('eval_sizes', [[0.5, 1, 1.5], [0]]) @pytest.mark.parametrize('eval_names_prefix', ['specified', None]) def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, client): # Use larger trainset to prevent premature stopping due to zero loss, causing num_trees() < n_estimators. # Use small chunk_size to avoid single-worker allocation of eval data partitions. n_samples = 1000 why are you omitting sparse matrices? Did you run into some issues with them? @pytest.mark.parametrize('task', tasks) +@pytest.mark.parametrize('output', data_output) @pytest.mark.parametrize('eval_sizes', [[0.5, 1, 1.5], [0]]) @pytest.mark.parametrize('eval_names_prefix', ['specified', None]) def test_eval_set_no_early_stopping(task, output, eval_sizes, eval_names_prefix, client): + if task == 'ranking' and output == 'scipy_csr_matrix': + pytest.skip('LGBMRanker is not currently tested on sparse matrices') + # Use larger trainset to prevent premature stopping due to zero loss, causing num_trees() < n_estimators. # Use small chunk_size to avoid single-worker allocation of eval data partitions. n_samples = 1000
codereview_python_data_11559
('/version', VersionResource), ('/ping', PingResource), ('/import', DataImportResource), - ('/erc20details/', ERC20TokenInfo), ] logger = logging.getLogger(__name__) as mentioned above change location ('/version', VersionResource), ('/ping', PingResource), ('/import', DataImportResource), ] logger = logging.getLogger(__name__)
codereview_python_data_11580
if line[0] == '>': title = line[1:].rstrip() break # Main logic # Note, remove trailing whitespace, and any internal spaces I think this if statement should not be conditional - consider empty sequences (zero length), e.g. ``` >empty with a blank line afterwards ``` and: ``` >empty with no line afterwards ``` These are both valid but corner case FASTA files. if line[0] == '>': title = line[1:].rstrip() break + else: # no break encountered + return # Premature end of file, or just empty? # Main logic # Note, remove trailing whitespace, and any internal spaces
codereview_python_data_11581
-def verse(n): pass This is a too ambiguous name, could it be changed to something more meaningful? +def verse(day_number): pass
codereview_python_data_11583
'project_locale': project_locale, 'resource_count': resource_count, 'tags': ( - len( - TagsTool(projects=[project], locales=[locale], priority=True) - ) or False if project.tags_enabled else None ) `or False` is required? 'project_locale': project_locale, 'resource_count': resource_count, 'tags': ( + len(TagsTool(projects=[project], locales=[locale], priority=True)) if project.tags_enabled else None )
codereview_python_data_11593
return answer def _update_left_right_taxon_values(self, left_value): - """ update the left and right values in the table """ if not left_value: return Remove leading space on docstring? return answer def _update_left_right_taxon_values(self, left_value): + """update the left and right values in the table """ if not left_value: return
codereview_python_data_11594
""" def replace_text_elements(node): - """Recursively traverse the AST and perform find and replace on text values only""" if type(node) == ast.TextElement: node.value = node.value.replace(find, replace) return node This is not recursive, it doesn't call itself. It does traverse a tree, it seems, but that's not done by this function. :) """ def replace_text_elements(node): + """Perform find and replace on text values only""" if type(node) == ast.TextElement: node.value = node.value.replace(find, replace) return node
codereview_python_data_11603
return key if key[-1:] == '/' or key == '' else key + '/' @staticmethod - def _check_deprecated_argument(arguments): """ If `encrypt_key` is part of the arguments raise an exception - :param arguments: Arguments dictionary :return: None """ - if 'encrypt_key' in arguments: raise DeprecatedBotoClientException( 'encrypt_key deprecated in boto3. Please refer to boto3 documentation for encryption details.') Wouldn't it be best to make this `_check_deprecated_argument(**kwargs)`? to keep with Python practice return key if key[-1:] == '/' or key == '' else key + '/' @staticmethod + def _check_deprecated_argument(**kwargs): """ If `encrypt_key` is part of the arguments raise an exception :return: None """ + if 'encrypt_key' in kwargs: raise DeprecatedBotoClientException( 'encrypt_key deprecated in boto3. Please refer to boto3 documentation for encryption details.')
codereview_python_data_11606
'LOCATION_VIOLATION': 'location_violations', 'LOG_SINK_VIOLATION': 'log_sink_violations', 'RETENTION_VIOLATION': 'retention_violations', - 'ROLE_VIOLATION': 'custom_role_violations', 'SERVICE_ACCOUNT_KEY_VIOLATION': ( 'service_account_key_violations'), 'EXTERNAL_PROJECT_ACCESS_VIOLATION': 'external_project_access_violations' Change this to either `'CUSTOM_ROLE_VIOLATION': 'custom_role_violations',` or `'ROLE_VIOLATION': 'role_violations',` So that is matches the pattern of other violations. 'LOCATION_VIOLATION': 'location_violations', 'LOG_SINK_VIOLATION': 'log_sink_violations', 'RETENTION_VIOLATION': 'retention_violations', + 'CUSTOM_ROLE_VIOLATION': 'custom_role_violations', 'SERVICE_ACCOUNT_KEY_VIOLATION': ( 'service_account_key_violations'), 'EXTERNAL_PROJECT_ACCESS_VIOLATION': 'external_project_access_violations'
codereview_python_data_11610
super().__init__() def __getitem__(self, key): - """Return the sequence (as a bytes object) for the requested region.""" length = self.length if isinstance(key, slice): start, end, step = key.indices(length) """Return the sequence contents (as a bytes object) for the requested region.""" super().__init__() def __getitem__(self, key): + """Return the sequence contents (as a bytes object) for the requested region.""" length = self.length if isinstance(key, slice): start, end, step = key.indices(length)
codereview_python_data_11613
return 'VerificationCode {code}'.format(code=verification_code) -def _http_request(url, body=None, method='POST', force_reauthorization=False): """Make a POST request to the specified URL.""" authorization = _get_authorization(force_reauthorization) headers = { nit: I don't love function names that aren't verbs, but I guess there's nothing better here. return 'VerificationCode {code}'.format(code=verification_code) +def _http_request(url, + body=None, + method=_POST_METHOD, + force_reauthorization=False): """Make a POST request to the specified URL.""" authorization = _get_authorization(force_reauthorization) headers = {
codereview_python_data_11614
Returns an iloc object providing a convenient interface to slice and index into the Dataset using row and column indices. Allow selection by integer index, slice and list of integer - indices and boolean arrays, e.g.: Examples: Bit redundant after 'e.g:' (which is sufficient) Returns an iloc object providing a convenient interface to slice and index into the Dataset using row and column indices. Allow selection by integer index, slice and list of integer + indices and boolean arrays. Examples:
codereview_python_data_11621
state ^= lfsr_mask return struct.pack("<L", state)[:-1] - # def do_build(self): - # #make sure post build is called - # self.raw_packet_cache = None - # super(Packet, self).do_build() - def post_build(self, p, pay): # Switch payload and CRC crc = p[-3:] What is the point of this ? state ^= lfsr_mask return struct.pack("<L", state)[:-1] def post_build(self, p, pay): # Switch payload and CRC crc = p[-3:]
codereview_python_data_11647
self.assertIn('FailedEntryCount', rs) self.assertIn('FailedEntries', rs) self.assertEqual(rs['FailedEntryCount'], 0) - self.assertEqual(rs['FailedEntries'], [])\ # clean up self._clean_up() nit: unnecessary `\` at the end of the line self.assertIn('FailedEntryCount', rs) self.assertIn('FailedEntries', rs) self.assertEqual(rs['FailedEntryCount'], 0) + self.assertEqual(rs['FailedEntries'], []) # clean up self._clean_up()
codereview_python_data_11651
-#-*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- https://www.mdanalysis.org Personally, I would prefer to see this separated into two tests, one covering each `ValueError` - a non valid box dimension - shape coercion failure test each raisable `ValueError` using the `matches=` parameter of `pytest.raises(ValueError)`. I do appreciate that you have covered both cases in your tests though :). However if @lilyminium or @tylerjereddy think this is unnecessary I will defer to them. +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 fileencoding=utf-8 # # MDAnalysis --- https://www.mdanalysis.org
codereview_python_data_11654
def remove(self, correlation_id): try: - return self._futures.pop(correlation_id) except KeyError: raise FutureCollectionKeyError( "no such correlation id: {}".format(correlation_id)) This looks like a separate change? Should this be in the next commit? def remove(self, correlation_id): try: + del self._futures[correlation_id] except KeyError: raise FutureCollectionKeyError( "no such correlation id: {}".format(correlation_id))
codereview_python_data_11663
# create daily and cumulative stats dataframe daily_perfs = [] cum_perfs = [] for perf in perfs: if 'daily_perf' in perf: - perf['daily_perf'].update(perf['recorded_vars']) daily_perfs.append(perf['daily_perf']) else: cum_perfs.append(perf) Tending to think that we should do the merge/update of inside tradesimulation. i.e. instead of adding a recorded_vars key, we just merge into the daily_perf at the tradesimulation level. So as to have less moving parts. # create daily and cumulative stats dataframe daily_perfs = [] cum_perfs = [] + # TODO: the loop here could overwrite expected properties + # of daily_perf. Could potentially raise or log a + # warning. for perf in perfs: if 'daily_perf' in perf: + + perf['daily_perf'].update( + perf['daily_perf'].pop('recorded_vars') + ) daily_perfs.append(perf['daily_perf']) else: cum_perfs.append(perf)
codereview_python_data_11665
-import mmcv import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp -from mmcv.cnn import build_conv_layer, build_norm_layer from torch.nn.modules.utils import _pair from mmdet.models.backbones.resnet import Bottleneck, ResNet Will `multi_apply` make it easier? import torch import torch.nn as nn import torch.nn.functional as F import torch.utils.checkpoint as cp +from mmcv.cnn import build_conv_layer, build_norm_layer, kaiming_init from torch.nn.modules.utils import _pair from mmdet.models.backbones.resnet import Bottleneck, ResNet
codereview_python_data_11676
class TestHydrogenBondAnalysis(object): @staticmethod - @pytest.fixture() def universe(): return MDAnalysis.Universe(PDB_helix) @staticmethod - @pytest.fixture() def values(universe): return { 'num_bb_hbonds': universe.atoms.n_residues - universe.select_atoms('resname PRO').n_residues - 4, Shouldn't a method like this be replaced with a fixture? This would then make it easier to cache this result (if possible) by increasing the scope class TestHydrogenBondAnalysis(object): @staticmethod + @pytest.fixture(scope='class') def universe(): return MDAnalysis.Universe(PDB_helix) @staticmethod + @pytest.fixture(scope='class') def values(universe): return { 'num_bb_hbonds': universe.atoms.n_residues - universe.select_atoms('resname PRO').n_residues - 4,
codereview_python_data_11680
**kwargs, ): if groupby_args.get("level", None) is None and ( - not (isinstance(by, (type(query_compiler)) or hashable(by))) or isinstance(by, pandas.Grouper) ): by = try_cast_to_pandas(by, squeeze=True) probably a typo here: ```suggestion if groupby_args.get("level", None) is not None and ( ``` otherwise, we're defaulting to pandas at any time when `by` is specified **kwargs, ): if groupby_args.get("level", None) is None and ( + not (isinstance(by, (type(query_compiler))) or hashable(by)) or isinstance(by, pandas.Grouper) ): by = try_cast_to_pandas(by, squeeze=True)
codereview_python_data_11684
SettingValue(typ.Bool(), 'false'), "Whether to open windows instead of tabs."), - ('refresh-on-select', - SettingValue(typ.Bool(), 'false'), - "Whether the tabs should be loaded when doing session-load or after, when selecting the tab"), ('title-format', SettingValue(typ.FormatString( This feature is commonly called lazy loading, so I'd say we should call the option `lazy-load` instead? SettingValue(typ.Bool(), 'false'), "Whether to open windows instead of tabs."), + ('lazy-load', + SettingValue(typ.Bool(), 'false'), + "Lazy loading the tabs from a session"), ('title-format', SettingValue(typ.FormatString(
codereview_python_data_11687
self.subdomain = subdomain try: response = self.call(path, format=format, schema=schema, **extra_params) - except PluginError: self.subdomain = subdomain_buffer - raise self.subdomain = subdomain_buffer return response ```suggestion try: return self.call(path, format=format, schema=schema, **extra_params) finally: self.subdomain = subdomain_buffer ``` self.subdomain = subdomain try: response = self.call(path, format=format, schema=schema, **extra_params) + finally: self.subdomain = subdomain_buffer self.subdomain = subdomain_buffer return response
codereview_python_data_11690
import logging from pymongo import MongoClient -from pymongo.errors import ConnectionError import bigchaindb from bigchaindb.backend.connection import Connection You are defining a property that has the same name as the `self.db` variable in `__init__`. Doesn't this create problems? import logging from pymongo import MongoClient +from pymongo.errors import ConnectionFailure import bigchaindb from bigchaindb.backend.connection import Connection
codereview_python_data_11697
deployment_tpl_output_path = ( constants.DEPLOYMENT_TEMPLATE_OUTPUT_PATH.format(bucket_name)) - print('Copying the Forseti {} deployment template to:\n {}' .format(self.config.installation_type, deployment_tpl_output_path)) You can use the `\t` character to actually tab. deployment_tpl_output_path = ( constants.DEPLOYMENT_TEMPLATE_OUTPUT_PATH.format(bucket_name)) + print('Copying the Forseti {} deployment template to:\n\t{}' .format(self.config.installation_type, deployment_tpl_output_path))
codereview_python_data_11703
-'''A superclass for reading [f]ixed-column type [f]lat-[f]ile records.''' __docformat__ = "restructuredtext en" We tend to use triple double quotes in Biopython, rather than triple single quotes. +"""A superclass for reading [f]ixed-column type [f]lat-[f]ile records.""" __docformat__ = "restructuredtext en"
codereview_python_data_11706
class APITimelineEvent(pydantic.BaseModel): - event_type: UserTimelineEventType - user_name: str - created: int - metadata: APIEventMetadata - -class APIRecommendationNotificationTimelineEvent(pydantic.BaseModel): - # since `id` is not sent in APITimelineEvent - # this field is needed for deletion of Recommendation and Notification Events - id: int event_type: UserTimelineEventType user_name: str created: int You can instead add the `id` field to APITimelineEvent. If all entities are not going to have it, you can make it as `id: Optional[int]`. class APITimelineEvent(pydantic.BaseModel): + id: Optional[int] event_type: UserTimelineEventType user_name: str created: int
codereview_python_data_11711
@pytest.mark.parametrize('modifiers, text', [ (Qt.NoModifier, '2'), - (Qt.KeypadModifier, '2'), ]) def test_number_press_keypad(self, fake_keyevent, keyparser, config_stub, modifiers, text): - """Make sure a <Num+2> binding yields the 2 binding.""" config_stub.val.bindings.commands = {'normal': { '2': 'message-info 2', - '<Num+2>': 'message-info 2'}} keyparser._read_config('normal') keyparser.handle(fake_keyevent(Qt.Key_2, modifiers)) command = 'message-info {}'.format(text) Those changes seem incorrect - a `num-2` binding should still take precedence over a `2` binding (as `is_special` is only used for hints and marks). @pytest.mark.parametrize('modifiers, text', [ (Qt.NoModifier, '2'), + (Qt.KeypadModifier, 'num-2'), ]) def test_number_press_keypad(self, fake_keyevent, keyparser, config_stub, modifiers, text): + """Make sure a <Num+2> binding overrides the 2 binding.""" config_stub.val.bindings.commands = {'normal': { '2': 'message-info 2', + '<Num+2>': 'message-info num-2'}} keyparser._read_config('normal') keyparser.handle(fake_keyevent(Qt.Key_2, modifiers)) command = 'message-info {}'.format(text)
codereview_python_data_11712
'root': { 'level': logging.DEBUG, 'handlers': ['console', 'file', 'errors'], - 'port': 9020 }, } Should we use DEFAULT_SOCKET_LOGGING_PORT instead? 'root': { 'level': logging.DEBUG, 'handlers': ['console', 'file', 'errors'], + 'port': DEFAULT_SOCKET_LOGGING_PORT }, }
codereview_python_data_11713
data = client.fetch_organization(resource_key) return FACTORIES['organization'].create_new(data, root=True) except api_errors.ApiExecutionError as e: data = {'name': resource_key} resource = FACTORIES['organization'].create_new(data, root=True) resource.add_warning(e) Can we please add a warning log line here, so that this will show up in stackdriver instead of just the database? data = client.fetch_organization(resource_key) return FACTORIES['organization'].create_new(data, root=True) except api_errors.ApiExecutionError as e: + LOGGER.warn('Unable to fetch Organization %s: %s', resource_key, e) data = {'name': resource_key} resource = FACTORIES['organization'].create_new(data, root=True) resource.add_warning(e)
codereview_python_data_11717
import unittest -from difference_of_squares import difference_of_squares, \ - square_of_sum, sum_of_squares # Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0 It is preferable to use parenthesis instead of backslashes for multiline imports. Example: ```Python from difference_of_squares import ( difference_of_squares, square_of_sum, sum_of_squares, ) ``` import unittest +from difference_of_squares import ( + difference_of_squares, + square_of_sum, + sum_of_squares +) # Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0
codereview_python_data_11718
def sign_txin(self, txin_index, privkey_bytes, *, bip143_shared_txdigest_fields=None) -> str: txin = self.inputs()[txin_index] txin.validate_data(for_signing=True) - sighash = txin.sighash if txin.sighash is not None else Sighash.SIGHASH_ALL - sighash_type = '{0:02x}'.format(sighash) pre_hash = sha256d(bfh(self.serialize_preimage(txin_index, bip143_shared_txdigest_fields=bip143_shared_txdigest_fields))) privkey = ecc.ECPrivkey(privkey_bytes) Maybe it's just personal preference, but I find this more clear: ```suggestion sighash_type = sighash.to_bytes(length=1, byteorder="big").hex() ``` def sign_txin(self, txin_index, privkey_bytes, *, bip143_shared_txdigest_fields=None) -> str: txin = self.inputs()[txin_index] txin.validate_data(for_signing=True) + sighash = txin.sighash if txin.sighash is not None else Sighash.ALL + sighash_type = sighash.to_bytes(length=1, byteorder="big").hex() pre_hash = sha256d(bfh(self.serialize_preimage(txin_index, bip143_shared_txdigest_fields=bip143_shared_txdigest_fields))) privkey = ecc.ECPrivkey(privkey_bytes)
codereview_python_data_11729
"""Plotting library.""" from copy import deepcopy from io import BytesIO -from typing import Any, List, Dict, Tuple, Optional, Union import numpy as np ```suggestion from typing import Any, Dict, List, Optional, Tuple, Union ``` can you please alphabetize this list? I think that makes it a bit easier to keep track of what is being imported. """Plotting library.""" from copy import deepcopy from io import BytesIO +from typing import Any, Dict, List, Optional, Tuple, Union import numpy as np
codereview_python_data_11731
topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. - thresh (float, optional): If not None, threshold for pred with scores - under this value being incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer, This sentence is a little confusing. Is it better to say this? `If not None, predictions with scores under this threshold are to be considered incorrect` topk (int | tuple[int], optional): If the predictions in ``topk`` matches the target, the predictions will be regarded as correct ones. Defaults to 1. + thresh (float, optional): If not None, predictions with scores under + this threshold are considered incorrect. Default to None. Returns: float | tuple[float]: If the input ``topk`` is a single integer,
codereview_python_data_11735
assert msg in all_msgs def test_read_line_immediately(self, qtbot, ipc_server, caplog): - data = '{{"args": ["foo"], "target_arg": "tab", ' \ - '"protocol_version": {}}}\n'.format(ipc.PROTOCOL_VERSION) socket = FakeSocket(data=data.encode('utf-8')) ipc_server._server = FakeServer(socket) Please add parens and drop the `\`, i.e.: ``` python data = ('{{"args": ["foo"], "target_arg": "tab", ' '"protocol_version": {}}}\n'.format(ipc.PROTOCOL_VERSION)) ``` assert msg in all_msgs def test_read_line_immediately(self, qtbot, ipc_server, caplog): + data = ('{{"args": ["foo"], "target_arg": "tab", ' + '"protocol_version": {}}}\n'.format(ipc.PROTOCOL_VERSION)) socket = FakeSocket(data=data.encode('utf-8')) ipc_server._server = FakeServer(socket)
codereview_python_data_11743
import re import socket from scapy.packet import Packet, Packet_metaclass, bind_layers from scapy.fields import (Field, BitField, BitEnumField, XBitField, ByteField, ByteEnumField, ShortField, ShortEnumField, IntField, We don't need the RFCs inside the code... Remove them and simply link them (it takes too much space) ? import re import socket +from scapy import pton_ntop from scapy.packet import Packet, Packet_metaclass, bind_layers from scapy.fields import (Field, BitField, BitEnumField, XBitField, ByteField, ByteEnumField, ShortField, ShortEnumField, IntField,
codereview_python_data_11744
If JMeter is still running - let's stop it. """ - if self.distributed_servers and not self.settings.get("gui", False): - self.remote_shutdown() - time.sleep(10) # TODO: print JMeter's stdout/stderr on empty JTL shutdown_process(self.process, self.log) We're using 3-step shutdown even for non-distributed tests If JMeter is still running - let's stop it. """ + self.remote_shutdown() + time.sleep(10) # TODO: print JMeter's stdout/stderr on empty JTL shutdown_process(self.process, self.log)
codereview_python_data_11746
logger.warn("Cannot add cgroup '{0}' to tracking list; resource usage will not be tracked. " "Error: {1}".format(cgroup.path, ustr(exception))) - @staticmethod - def get_extension_cgroup_name(extension_name): - # Since '-' is used as a separator in systemd unit names, we replace it with '_' to prevent side-effects. - return "azure-vmextensions-" + extension_name.replace('-', '_') - @staticmethod def get_processes_in_cgroup(cgroup_path): with open(os.path.join(cgroup_path, "cgroup.procs"), "r") as cgroup_procs: let's use CGroupConfigurator._VMEXTENSIONS_SLICE instead logger.warn("Cannot add cgroup '{0}' to tracking list; resource usage will not be tracked. " "Error: {1}".format(cgroup.path, ustr(exception))) @staticmethod def get_processes_in_cgroup(cgroup_path): with open(os.path.join(cgroup_path, "cgroup.procs"), "r") as cgroup_procs:
codereview_python_data_11755
try: r = s.get(url, stream=True, verify=_verify, timeout=timeout) # check status code before attempting to read body - if r.status_code >= 400: raise Exception("Failed to download %s, response code %s" % (url, r.status_code)) total = 0 nit: could we use `if not r.ok` here? try: r = s.get(url, stream=True, verify=_verify, timeout=timeout) # check status code before attempting to read body + if not r.ok: raise Exception("Failed to download %s, response code %s" % (url, r.status_code)) total = 0
codereview_python_data_11757
self.module_name = EncodedString(self.module_name) self.context = context self.module_cname = Naming.module_cname - self.modulestate_cname = Naming.modulestate_cname - self.modulestateglobal_cname = Naming.modulestateglobal_cname self.module_dict_cname = Naming.moddict_cname self.method_table_cname = Naming.methtable_cname self.doc = "" Hmmm Is `self.modulestate_cname` ever something else than `Naming.modulestate_cname` ? Because if not, then this is a useless attribute. self.module_name = EncodedString(self.module_name) self.context = context self.module_cname = Naming.module_cname self.module_dict_cname = Naming.moddict_cname self.method_table_cname = Naming.methtable_cname self.doc = ""
codereview_python_data_11764
-from random import random - -<<<<<<< HEAD -======= -from bzt.utils import to_json -from tests import BZTestCase, r - ->>>>>>> cba72f5388bd3647b636c773e6893428e37da306 from bzt.modules.aggregator import ConsolidatingAggregator, DataPoint, KPISet, AggregatorListener from tests import BZTestCase, r from tests.mocks import MockReader This is unintentional, obviously from bzt.modules.aggregator import ConsolidatingAggregator, DataPoint, KPISet, AggregatorListener +from bzt.utils import to_json from tests import BZTestCase, r from tests.mocks import MockReader
codereview_python_data_11765
# Golang stacktraces. golang_panic_match = GOLANG_PANIC_REGEX.match(line) if golang_panic_match: - reason = golang_panic_match.group(1) - state.crash_type = _reduce_string(reason, GOLANG_PANIC_MAX_LEN) continue # Sanitizer SEGV crashes. We auto-reduce all long strings at end, no need of this explicitly. Also, this will be different from all other parsing logic, so should match the style of truncating from right. # Golang stacktraces. golang_panic_match = GOLANG_PANIC_REGEX.match(line) if golang_panic_match: + state.crash_type = golang_panic_match.group(2) continue # Sanitizer SEGV crashes.
codereview_python_data_11770
unpickle = SafeUnpickler else: - if sys.version_info[0] >= 3: - builtins_mod = 'builtins' - else: - builtins_mod = '__builtin__' - class SafeUnpickler(pickle.Unpickler): PICKLE_SAFE = { 'copy_reg': set(['_reconstructor']), - builtins_mod: set(['object', 'list', 'set']), 'collections': set(['deque']), 'graphite.render.datalib': set(['TimeSeries']), 'graphite.intervals': set(['Interval', 'IntervalSet']), This whole branch can be eliminated, every supported python has cPickle unpickle = SafeUnpickler else: class SafeUnpickler(pickle.Unpickler): PICKLE_SAFE = { 'copy_reg': set(['_reconstructor']), + 'builtins': set(['object', 'list', 'set']), 'collections': set(['deque']), 'graphite.render.datalib': set(['TimeSeries']), 'graphite.intervals': set(['Interval', 'IntervalSet']),
codereview_python_data_11771
if not mapping_uuid: return jsonify({}) function_name = data.get('FunctionName') or '' - enabled = data.get('Enabled') - if enabled is None: - enabled = True batch_size = data.get('BatchSize') or 100 mapping = update_event_source(mapping_uuid, function_name, enabled, batch_size) return jsonify(mapping) Lines 967-969 can be simplified to: ``` enabled = data.get('Enabled', True) ``` if not mapping_uuid: return jsonify({}) function_name = data.get('FunctionName') or '' + enabled = data.get('Enabled', True) batch_size = data.get('BatchSize') or 100 mapping = update_event_source(mapping_uuid, function_name, enabled, batch_size) return jsonify(mapping)
codereview_python_data_11774
""" Process the nonmandatory option `:doc:`, and extract fields `self.doc_file` and `self.doc_var`. If the option is not - provided, the fields are set to None. """ if "doc" not in self.options: self.doc_file = None backticks around `None`? """ Process the nonmandatory option `:doc:`, and extract fields `self.doc_file` and `self.doc_var`. If the option is not + provided, the fields are set to `None`. """ if "doc" not in self.options: self.doc_file = None
codereview_python_data_11776
""" def __init__(self, **kwargs): super().__init__() for name, value in kwargs.items(): setattr(self, name, value) This is the magic bit - short but elegant. Do you think it is self explanatory enough? """ def __init__(self, **kwargs): + """Initialize a new PairwiseAligner with the keyword arguments as attributes. + + This function subclasses `_aligners.PairwiseAligner` and loops over all + the keyword arguments that are given in the constructor to set them + as attributes on the object. This will call the `__setattr__` method to + do that. + """ super().__init__() for name, value in kwargs.items(): setattr(self, name, value)
codereview_python_data_11777
max_incoming_connections=20, max_future_callback_workers=10) - consensus_registry = ConsensusRegistry() - - consensus_notifier = ConsensusNotifier(consensus_service, - consensus_registry) # -- Setup P2P Networking -- # gossip = Gossip( Can we make this a separate class and give the operations on it descriptive names? For example, `ConsensusRegistry` with methods `register_engine()`, `unregister_engine()`, `get_connection_id_by_engine_info()`, and `get_engine_info_by_connection_id()`. max_incoming_connections=20, max_future_callback_workers=10) + consensus_notifier = ConsensusNotifier(consensus_service) # -- Setup P2P Networking -- # gossip = Gossip(
codereview_python_data_11780
annotations_file = annotations_file, ratio=True, ltrb=True, random_shuffle=True) - self.decode = ops.HostDecoder(device = "cpu", output_type = types.RGB) # Augumentation techniques self.crop = ops.RandomBBoxCrop( We can use nvJpeg here annotations_file = annotations_file, ratio=True, ltrb=True, random_shuffle=True) + self.decode = ops.nvJPEGDecoder(device="mixed", output_type=types.RGB) # Augumentation techniques self.crop = ops.RandomBBoxCrop(
codereview_python_data_11781
assert len(Foo.initialize_rules()) == 2 -def test_steps_printed_despite_BaseException(capsys): # Test for https://github.com/HypothesisWorks/hypothesis/issues/1372 class RaisesProblem(RuleBasedStateMachine): I think given that we've established we don't want this to happen for all base exceptions we should probably have a test that asserts that this *doesn't* happen for e.g. `KeyboardInterrupt` too. assert len(Foo.initialize_rules()) == 2 +def test_steps_printed_despite_pytest_fail(capsys): # Test for https://github.com/HypothesisWorks/hypothesis/issues/1372 class RaisesProblem(RuleBasedStateMachine):
codereview_python_data_11786
else: raise ValueError('Server info not found in log_file',log_file) except IOError: - pytest.fail("Unable to open log file %s\nYou may need to open a sage worksheet"%log_file) return host, int(port) secret_token = None this and the previous error message are maybe a little bit too vague? Isn't in both cases the problem, that the sage server isn't running? I would rephrase that by something like: "there is probably no sage server running. you either have to open a sage worksheet or run `smc-sage-server start`" at least, that's what I think needs to be done. else: raise ValueError('Server info not found in log_file',log_file) except IOError: + pytest.fail("Unable to open log file %s\nThere is probably no sage server running. You either have to open a sage worksheet or run smc-sage-server start"%log_file) return host, int(port) secret_token = None
codereview_python_data_11790
return str(self) == str(other) def __ne__(self, other): - """Compare sequences with a not-equal operand, see __eq__ documentation.""" # Seem to require this method for Python 2 but not needed on Python 3? return not (self == other) def __lt__(self, other): - """Compare sequences with a less-than operand, see __eq__ documentation.""" if hasattr(other, "alphabet"): if not Alphabet._check_type_compatible([self.alphabet, other.alphabet]): Is this really clearer (likewise for the other special methods)? return str(self) == str(other) def __ne__(self, other): + """Implement the not-equal operand.""" # Seem to require this method for Python 2 but not needed on Python 3? return not (self == other) def __lt__(self, other): + """Implement the less-than operand.""" if hasattr(other, "alphabet"): if not Alphabet._check_type_compatible([self.alphabet, other.alphabet]):
codereview_python_data_11792
# have the infrastructure here. if ( not irtyputils.is_object(ir_set.typeref) - or base_ptrref.out_target == actual_ptrref.out_target - or shape_ptrref.out_target == actual_ptrref.out_target ): return val Perhaps check `.id` equality instead? Should be more reliable. # have the infrastructure here. if ( not irtyputils.is_object(ir_set.typeref) + or base_ptrref.out_target.id == actual_ptrref.out_target.id + or shape_ptrref.out_target.id == actual_ptrref.out_target.id ): return val
codereview_python_data_11794
@PIPELINES.register_module() class RandomShift(object): - def __init__(self, prob=1.5, max_shift=32): self.prob = prob self.max_shift = max_shift May implement a new function to simplify the logic in the for loop. @PIPELINES.register_module() class RandomShift(object): + def __init__(self, prob=0.5, max_shift=32): self.prob = prob self.max_shift = max_shift
codereview_python_data_11800
Examples -------- - For example:: >>> ts.dimensions array([ 13., 14., 15., 90., 90., 90.], dtype=float32) you can remove this line when you have the example heading above. Examples -------- + The unitcell for a given system can be queried as either three + vectors lengths followed by their respective angle, or as three + triclinic vectors. >>> ts.dimensions array([ 13., 14., 15., 90., 90., 90.], dtype=float32)
codereview_python_data_11801
dml_map[typeref] = (dml_cte, dml_rvar) if ( isinstance(ir_stmt, irast.InsertStmt) and ir_stmt.on_conflict and ir_stmt.on_conflict[1] is not None I see why you've done this, but we probably shouldn't. The purpose of `dml_map` is to track all components of a polymorphic mutation (i.e when you `UPDATE SuperType` which compiles into a bunch of `UPDATE` statements for all subtypes). So, I'd add another list into `DMLParts` to track _all_ CTEs produced by a DML statement, and leave the `dml_map` alone. (Perhaps it also needs a better name to reflect its purpose). `else_cte` could be a proper member of `DMLParts` also. dml_map[typeref] = (dml_cte, dml_rvar) + dml_entries = list(dml_map.values()) + + else_cte = None if ( isinstance(ir_stmt, irast.InsertStmt) and ir_stmt.on_conflict and ir_stmt.on_conflict[1] is not None
codereview_python_data_11809
# Hardcoded 'Signature': 'EXAMPLEpH+..', 'SigningCertURL': 'https://sns.us-east-1.amazonaws.com/SimpleNotificationService-0000000000000000000000.pem', - 'UnsubscribeURL': ['%s/?Action=Unsubscribe&TopicArn=%s&Token=%s' % (external_url, topic_arn, token)] } if subject is not None: Why is the array needed here - shouldn't this be simply: ``` 'UnsubscribeURL': '%s/?Action=Unsubscribe&TopicArn=%s&Token=%s' % (external_url, topic_arn, token) ``` ? # Hardcoded 'Signature': 'EXAMPLEpH+..', 'SigningCertURL': 'https://sns.us-east-1.amazonaws.com/SimpleNotificationService-0000000000000000000000.pem', + 'UnsubscribeURL': '%s/?Action=Unsubscribe&TopicArn=%s&Token=%s' % (external_url, topic_arn, token) } if subject is not None:
codereview_python_data_11811
""" Create DataFrame from remote partitions. Parameters ---------- partitions : list Let's add a note on additional arguments. Something like that: you can pass `index`, `columns`, `row_lengths` and `column_widths` to this function in order to avoid triggering extra computations to compute the metadata. """ Create DataFrame from remote partitions. + Notes + ----- + Pass `index`, `columns`, `row_lengths` and `column_widths` to avoid triggering + extra computations of the metadata. + Parameters ---------- partitions : list
codereview_python_data_11816
@classmethod - def check_dense(cls, arrays): - return any(array.shape not in [arrays[0].shape, (1,)] for array in arrays[1:]) @classmethod If I understand this code correctly `check_compressed` might be a better name... **Edit:** How about inverting it and calling it `expanded_format`? @classmethod + def expanded_format(cls, arrays): + return not any(array.shape not in [arrays[0].shape, (1,)] for array in arrays[1:]) @classmethod
codereview_python_data_11817
def transitive_closure(G, reflexive=False): - """Returns transitive closure of a directed graph The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that for all v, w in V there is an edge (v, w) in E+ if and only if there "directed" could be removed here. def transitive_closure(G, reflexive=False): + """Returns transitive closure of a graph The transitive closure of G = (V,E) is a graph G+ = (V,E+) such that for all v, w in V there is an edge (v, w) in E+ if and only if there
codereview_python_data_11823
flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor( scale_factors).unsqueeze(1) - dets = [], labels = [] for img_id in range(len(img_metas)): cls_scores = flatten_cls_scores[img_id] score_factor = flatten_objectness[img_id] You need to separate this line into two lines without a comma like this: ``` dets = [] labels = [] ``` flatten_bboxes[..., :4] /= flatten_bboxes.new_tensor( scale_factors).unsqueeze(1) + dets = [] + labels = [] for img_id in range(len(img_metas)): cls_scores = flatten_cls_scores[img_id] score_factor = flatten_objectness[img_id]
codereview_python_data_11828
constants.REPRODUCTION_TIMEOUT_LIMIT)) result = runner.run_single_testcase( input_path, - timeout=constants.REPRODUCTION_TIMEOUT_LIMIT + 5, # processing buffer additional_args=arguments) return engine.ReproduceResult(result.command, result.return_code, I'd give even more than 5 here, maybe 20? constants.REPRODUCTION_TIMEOUT_LIMIT)) result = runner.run_single_testcase( input_path, + timeout=constants.REPRODUCTION_TIMEOUT_LIMIT + + 10, # processing buffer. additional_args=arguments) return engine.ReproduceResult(result.command, result.return_code,