id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_4160
Group.url: '[src], [href]', Group.prevnext: 'a, area, button, link, [role=button]', Group.focus: '*:focus', - Group.inputs: 'input[type=text], input[type=password], textarea', } Why only `text` and `password` as types, instead of `input:not([type=hidden])` like above in `Group.all`? Group.url: '[src], [href]', Group.prevnext: 'a, area, button, link, [role=button]', Group.focus: '*:focus', + Group.inputs: ('input[type=text], input[type=email], input[type=url], ' + 'input[type=tel], input[type=number], input[type=password], ' + 'input[type=search], textarea'), }
codereview_python_data_4165
os.symlink(os.path.join(os.pardir, os.pardir, os.pardir, 'Contents', 'MacOS', lib), os.path.join(dest, lib)) - # Patch Info.plist to declare URLs support plist_path = os.path.join(app_path, 'Contents', 'Info.plist') with open(plist_path, "rb") as f: plist_data = plistlib.load(f) `misc/qutebrowser.spec` also does some plist manipulations using the `info_plist` argument. Since PyInstaller can't do nested lists/dicts like we need here, I think it'd be best to move those here as well, so everything is in one place. os.symlink(os.path.join(os.pardir, os.pardir, os.pardir, 'Contents', 'MacOS', lib), os.path.join(dest, lib)) + # Patch Info.plist - pyinstaller's options are too limiting plist_path = os.path.join(app_path, 'Contents', 'Info.plist') with open(plist_path, "rb") as f: plist_data = plistlib.load(f)
codereview_python_data_4168
LOGGER = logger.get_logger(__name__) -CAI_RESOURCE_TYPE_MAPPING = { - 'lien': 'cloudresourcemanager.googleapis.com/Lien', - 'sink': 'logging.googleapis.com/LogSink' -} - class CaiDataModel(base_data_model.BaseDataModel): """Cloud Asset Inventory (CAI) Data Model.""" Is this repeated from above, from `config_validator_util/cv_data_converter.py`? If so, can we consolidate or centralize it? LOGGER = logger.get_logger(__name__) class CaiDataModel(base_data_model.BaseDataModel): """Cloud Asset Inventory (CAI) Data Model."""
codereview_python_data_4170
def test_paddle_prepare_first_batch(): from nvidia.dali.plugin.paddle import DALIGenericIterator as PaddleIterator check_prepare_first_batch(PaddleIterator, output_map=["data"], - to_np=lambda x: np.array(x["data"])) \ No newline at end of file add line break at the end def test_paddle_prepare_first_batch(): from nvidia.dali.plugin.paddle import DALIGenericIterator as PaddleIterator check_prepare_first_batch(PaddleIterator, output_map=["data"], \ No newline at end of file + to_np=lambda x: np.array(x["data"]))
codereview_python_data_4172
views.ajax_permissions, name='pontoon.teams.ajax.permissions'), url(r'^(?P<locale>[A-Za-z0-9\-\@\.]+)/request/$', views.request_item, name='pontoon.teams.request.projects'), - - # AJAX: Request team to be added to Pontoon - url(r'^request/team/$', - views.request_item, - name='pontoon.teams.request.locale'), ] What's the purpose of changing this file? views.ajax_permissions, name='pontoon.teams.ajax.permissions'), + # AJAX: Request projects to be added to locale url(r'^(?P<locale>[A-Za-z0-9\-\@\.]+)/request/$', views.request_item, name='pontoon.teams.request.projects'), ]
codereview_python_data_4173
logits_in_channel = ( self.conv_out_channels if self.upsample_method == 'deconv' else upsample_in_channels) - if self.predictor_cfg: - self.conv_logits = build_conv_layer(self.predictor_cfg, - logits_in_channel, - out_channels, 1) - else: - self.conv_logits = Conv2d(logits_in_channel, out_channels, 1) self.relu = nn.ReLU(inplace=True) self.debug_imgs = None Maybe we should use default `predictor_cfg=dict(type='conv2d')` thus we could unify lines 109-114 to be ```python self.conv_logits = build_conv_layer(self.predictor_cfg, logits_in_channel, out_channels, 1) ``` logits_in_channel = ( self.conv_out_channels if self.upsample_method == 'deconv' else upsample_in_channels) + self.conv_logits = build_conv_layer(self.predictor_cfg, + logits_in_channel, out_channels, 1) self.relu = nn.ReLU(inplace=True) self.debug_imgs = None
codereview_python_data_4174
for k in range(K)] B = [experiment('xentropy', label_type='binary', data=DATA)['time'] for k in range(K)] -print(f"Best `binary` time: {str(min(A))}") -print(f"Best `xentropy` time: {str(min(B))}") ```suggestion print(f"Best `binary` time: {min(A)}") print(f"Best `xentropy` time: {min(B)}") ``` `str()` is no longer necessary inside an f-string, so I think this can be simplified. for k in range(K)] B = [experiment('xentropy', label_type='binary', data=DATA)['time'] for k in range(K)] +print(f"Best `binary` time: {min(A)}") +print(f"Best `xentropy` time: {min(B)}")
codereview_python_data_4177
'Source type\t: {0:s}\n'.format(self._source_type)) if self._artifact_filters: self._output_writer.Write('Artifact filters\t: {0!s}\n'.format( - self._artifact_filters)) if self._filter_file: self._output_writer.Write('Filter file\t: {0:s}\n'.format( self._filter_file)) if this is a list the please use `', '.join(list)` or equiv 'Source type\t: {0:s}\n'.format(self._source_type)) if self._artifact_filters: + artifacts_string = ', '.join(self._artifact_filters) self._output_writer.Write('Artifact filters\t: {0!s}\n'.format( + artifacts_string)) if self._filter_file: self._output_writer.Write('Filter file\t: {0:s}\n'.format( self._filter_file))
codereview_python_data_4178
data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) - return dict(proposal=json_results) def det2json(dataset, results): The return types of `xxx2json` should remain the same. data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) + return json_results def det2json(dataset, results):
codereview_python_data_4185
return element @staticmethod - def _get_csv_config_random(path, delimiter, loop, variable_names, random_order, encoding, same_list): """ :type path: str is it possible to write in single line? return element @staticmethod + def _get_csv_config_random(path, delimiter, loop, variable_names, random_order): """ :type path: str
codereview_python_data_4194
:param ramp_up: int ramp_up period :return: """ - rampup_sel = ".//stringProp[@name='ThreadGroup.ramp_time']" for group in jmx.enabled_thread_groups(): prop = group.find(rampup_sel) Let's not rely on "stringProp". Let's rely on any node with such name. :param ramp_up: int ramp_up period :return: """ + rampup_sel = ".//*[@name='ThreadGroup.ramp_time']" for group in jmx.enabled_thread_groups(): prop = group.find(rampup_sel)
codereview_python_data_4198
from copy import deepcopy from hashlib import sha256 from plenum.common.types import OPERATION from plenum.common.constants import TXN_TYPE, RAW, ENC, HASH from plenum.server.client_authn import NaclAuthNr, CoreAuthNr, CoreAuthMixin -from indy_common.constants import ATTRIB, POOL_UPGRADE, SCHEMA, CLAIM_DEF, \ - GET_NYM, GET_ATTR, GET_SCHEMA, GET_CLAIM_DEF, POOL_CONFIG from indy_node.persistence.idr_cache import IdrCache I think we already have a list with query txns: `openTxns` in `constants/py`. Let's use just one list in all places. from copy import deepcopy from hashlib import sha256 +from indy_node.server.config_req_handler import ConfigReqHandler from plenum.common.types import OPERATION from plenum.common.constants import TXN_TYPE, RAW, ENC, HASH from plenum.server.client_authn import NaclAuthNr, CoreAuthNr, CoreAuthMixin +from indy_common.constants import ATTRIB, GET_TXNS +from indy_node.server.pool_req_handler import PoolRequestHandler +from indy_node.server.domain_req_handler import DomainReqHandler + from indy_node.persistence.idr_cache import IdrCache
codereview_python_data_4200
class Handler(base_handler.Handler): - """Handler that gets the crash stats when user first lands on the page.""" @handler.post(handler.JSON, handler.JSON) @handler.oauth Does handler.oauth decorator not check this ? class Handler(base_handler.Handler): + """Handler for crash querying.""" @handler.post(handler.JSON, handler.JSON) @handler.oauth
codereview_python_data_4203
def test_preflow_push_makes_enough_space(): #From ticket #1542 G = nx.DiGraph() - G.add_path([0,1,3],capacity=1) - G.add_path([1,2,3],capacity=1) - R = preflow_push(G,0,3,value_only=False) assert_equal(R.graph['flow_value'], 1) def test_shortest_augmenting_path_two_phase(): Please ensure that every comma is followed by a space. def test_preflow_push_makes_enough_space(): #From ticket #1542 G = nx.DiGraph() + G.add_path([0, 1, 3], capacity=1) + G.add_path([1, 2, 3], capacity=1) + R = preflow_push(G, 0, 3, value_only=False) assert_equal(R.graph['flow_value'], 1) def test_shortest_augmenting_path_two_phase():
codereview_python_data_4204
im_ids_16) def _test_tf_dataset(device): skip_for_incompatible_tf() Why we need run `sess` twice? im_ids_16) +def _dataset_options(): + options = tf.data.Options() + try: + options.experimental_optimization.apply_default_optimizations = False + options.experimental_optimization.autotune = False + except: + print('Could not set TF Dataset Options') + + return options + + def _test_tf_dataset(device): skip_for_incompatible_tf()
codereview_python_data_4206
# -*- coding: utf-8 -*- import time -<<<<<<< HEAD import json import random -======= -import random ->>>>>>> refs/remotes/origin/patch-2 from pgoapi.utilities import f2i from pokemongo_bot import logger Merge conflict was committed here ^ # -*- coding: utf-8 -*- import time import json import random from pgoapi.utilities import f2i from pokemongo_bot import logger
codereview_python_data_4215
def test_example(self): p = PDBParser(PERMISSIVE=True) - s = p.get_structure("test", sys.argv[1]) - io = PDBIO() - io.set_structure(s) filenumber, filename = tempfile.mkstemp() os.close(filenumber) with open("out2.pdb", "w") as fp: s1 = p.get_structure("test1", sys.argv[1]) s2 = p.get_structure("test2", sys.argv[2]) You are still using ``sys.argv[1]`` and ``sys.argv[2]`` for file names. You need to use two automatically named temporary files here (a few lines above you setup a single temporary filename but never used it). def test_example(self): p = PDBParser(PERMISSIVE=True) filenumber, filename = tempfile.mkstemp() os.close(filenumber) + io = PDBIO() + io.set_structure(s) with open("out2.pdb", "w") as fp: s1 = p.get_structure("test1", sys.argv[1]) s2 = p.get_structure("test2", sys.argv[2])
codereview_python_data_4216
except Exception as e: print(e) -#snippet-end:[iam.python.create_role.complete] #snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] #snippet-sourcedescription:[create_role.py demonstrates how to create an IAM Role.] #snippet-keyword:[Python] This should say `#snippet-start:[iam.python.create_role.complete]` except Exception as e: print(e) +#snippet-start:[iam.python.create_role.complete] #snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.] #snippet-sourcedescription:[create_role.py demonstrates how to create an IAM Role.] #snippet-keyword:[Python]
codereview_python_data_4217
import os import os.path -from molecule import utilities import anyconfig DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), 'conf/defaults.yml') PROJECT_CONFIG = 'molecule.yml' LOCAL_CONFIG = '~/.config/molecule/config.yml' Not sure why this moved? I think our standard order is: 1. Core modules 2. 3rd party modules 3. Our modules import os import os.path import anyconfig +from molecule import utilities + DEFAULT_CONFIG = os.path.join(os.path.dirname(__file__), 'conf/defaults.yml') PROJECT_CONFIG = 'molecule.yml' LOCAL_CONFIG = '~/.config/molecule/config.yml'
codereview_python_data_4220
mem_limit = max(DEFAULT_MEM_LIMIT_MIN_MB, round(os_util.get_total_mem() * DEFAULT_MEM_LIMIT_PCT / 100, 0)) # agent values - if AGENT_NAME.lower() in cgroup_name.lower(): mem_limit = min(DEFAULT_MEM_LIMIT_MAX_MB, mem_limit) return mem_limit - - -class CGroupsUtils(object): - def __init__(self): - pass - - -class CGroupsEventListener(object): - def __init__(self): - pass These 2 classes are not used mem_limit = max(DEFAULT_MEM_LIMIT_MIN_MB, round(os_util.get_total_mem() * DEFAULT_MEM_LIMIT_PCT / 100, 0)) # agent values + if AGENT_CGROUP_NAME.lower() in cgroup_name.lower(): mem_limit = min(DEFAULT_MEM_LIMIT_MAX_MB, mem_limit) return mem_limit
codereview_python_data_4223
return not rev if x is None else rev def sort_func(src, rev, na_pos): - res = sorted(src, key=lambda x: (key_func(x, rev, na_pos), x), reverse=rev) return res If you handle `na_pos="remove"` here, then the tests can be simplified (making them parametrized on the `na_position` argument): ``` if na_pos == "remove": src = [s for s in src if s is not None] ``` return not rev if x is None else rev def sort_func(src, rev, na_pos): + if na_pos == "remove": + res = sorted([s for s in src if s != None], reverse=rev) + else: + res = sorted(src, key=lambda x: (key_func(x, rev, na_pos), x), reverse=rev) return res
codereview_python_data_4225
# where 0 represents unlimited. # We need to check that we aren't asking for a bigger queue than the - # platform supports, which requires access to this internal value. # pylint: disable=no-member,protected-access queue_max_length = _multiprocessing.SemLock.SEM_VALUE_MAX # pylint: enable=no-member,protected-access internal to what? internal value => protected module. # where 0 represents unlimited. # We need to check that we aren't asking for a bigger queue than the + # platform supports, which requires access to this internal + # multiprocessing value. # pylint: disable=no-member,protected-access queue_max_length = _multiprocessing.SemLock.SEM_VALUE_MAX # pylint: enable=no-member,protected-access
codereview_python_data_4234
from .sklearn import LGBMModel -def _check_not_tuple_of_2_elements(obj: Any, obj_name: str = 'obj') -> Optional[str]: """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError(f"{obj_name} must be a tuple of 2 elements.") ```suggestion def _check_not_tuple_of_2_elements(obj: Any, obj_name: str = 'obj') -> None: ``` This function will never return a string. It will only return `None` or throw an error. from .sklearn import LGBMModel +def _check_not_tuple_of_2_elements(obj: Any, obj_name: str = 'obj') -> None: """Check object is not tuple or does not have 2 elements.""" if not isinstance(obj, tuple) or len(obj) != 2: raise TypeError(f"{obj_name} must be a tuple of 2 elements.")
codereview_python_data_4235
# e.g. with lib.log #: Release of MDAnalysis as a string, using `semantic versioning`_. -__version__ = "1.0.0-dev0" # NOTE: keep in sync with RELEASE in setup.py Tests are failing because this tag is different from the one in `MDAnalysisTests/__init__.py`. ``` E AssertionError: MDAnalysis release 1.0.1-dev0 must be installed to have meaningful tests, not 1.0.0-dev0 E assert '1.0.0-dev0' == '1.0.1-dev0' E - 1.0.1-dev0 E ? ^ E + 1.0.0-dev0 E ? ^ ``` # e.g. with lib.log #: Release of MDAnalysis as a string, using `semantic versioning`_. +__version__ = "1.0.1-dev0" # NOTE: keep in sync with RELEASE in setup.py
codereview_python_data_4243
def __repr__(self) -> str: return ( f'<pg.{self.__class__.__name__} ' - f'name={self.relation.name!r} at 0x{id(self):x}>' ) ```suggestion f'name={self.relation.name!r} at {id(self):#x}>' ``` :) def __repr__(self) -> str: return ( f'<pg.{self.__class__.__name__} ' + f'name={self.relation.name!r} at {id(self):#x}>' )
codereview_python_data_4245
from __future__ import print_function import tensorflow as tf -from tensorflow_addons.utils import keras_utils -@keras_utils.register_keras_custom_object class SWA(tf.keras.optimizers.Optimizer): """This class extends optimizers with Stochastic Weight Averaging (SWA). Replace with `@tf.keras.utils.register_keras_serializable(package='Addons')`. See 2b070a15fb516e6b3c1cca3898ae611024d7481d. from __future__ import print_function import tensorflow as tf + +@tf.keras.utils.register_keras_serializable(package='Addons') class SWA(tf.keras.optimizers.Optimizer): """This class extends optimizers with Stochastic Weight Averaging (SWA).
codereview_python_data_4248
Returns ------- - dict - With ``str`` keys and ``tensor`` values. """ return self._src_data This line is usually used to explain what is the meaning of the returned value. Returns ------- + dict with ``str`` keys and ``tensor`` values + Features of the source nodes. """ return self._src_data
codereview_python_data_4266
""" data = self._data if not isinstance(data, (bytes, bytearray)): - data = bytes(data) return data.decode("ASCII") def __hash__(self): What else could it be? """ data = self._data if not isinstance(data, (bytes, bytearray)): + data = bytes(self) return data.decode("ASCII") def __hash__(self):
codereview_python_data_4271
reproduction_help_url = ndb.StringProperty(default='') # Documentation url. - documentation_url = ndb.StringProperty( - default='https://google.github.io/clusterfuzz/') # Bug report url. - bug_report_url = ndb.StringProperty( - default='https://github.com/google/clusterfuzz/issues') # Platforms that coverage is supported for. platform_group_mappings = ndb.TextProperty(default='') lets put this in class inside handler as a global var. we don't want to hardcode those in db, as it is hard to migrate. reproduction_help_url = ndb.StringProperty(default='') # Documentation url. + documentation_url = ndb.StringProperty(default='') # Bug report url. + bug_report_url = ndb.StringProperty(default='') # Platforms that coverage is supported for. platform_group_mappings = ndb.TextProperty(default='')
codereview_python_data_4273
def _get_streams(self): is_live = False - video_id = self._find_video_id(self.url) - self.video_id = video_id - log.debug("Using video ID: {0}", video_id) - info = self._get_stream_info(video_id) if info and info.get("status") == "fail": log.error("Could not get video info: {0}".format(info.get("reason"))) return You could do, `self.video_id = video_id = self._find_video_id(self.url)` Or rewrite the subsequent lines to use `self.video_id`? def _get_streams(self): is_live = False + self.video_id = self._find_video_id(self.url) + log.debug("Using video ID: {0}", self.video_id) + info = self._get_stream_info(self.video_id) if info and info.get("status") == "fail": log.error("Could not get video info: {0}".format(info.get("reason"))) return
codereview_python_data_4276
-from calendar import monthrange -from datetime import datetime -from dateutil.relativedelta import relativedelta - -import listenbrainz_spark -from listenbrainz_spark.exceptions import SQLException - -from pyspark.sql.utils import * -from listenbrainz_spark.stats import utils \ No newline at end of file This isn't ideal, I'd prefer to move these functions to utils and then change the places where they're imported to utils as well. Right now, if i want to get to the definition of a function, I'd have to go here, figure out that this file imports utils and then go to utils. if we fix the imports, the reader can directly reach utils. \ No newline at end of file
codereview_python_data_4280
""" request: HTTPRequest response: Optional[HTTPResponse] = None - trailers: http.Headers error: Optional[flow.Error] = None """ Note that it's possible for a Flow to have both a response and an error I suppose trailers are optional as well? """ request: HTTPRequest response: Optional[HTTPResponse] = None + trailers: Optional[http.Headers] = None error: Optional[flow.Error] = None """ Note that it's possible for a Flow to have both a response and an error
codereview_python_data_4282
lines = [] output = output_writer.ReadOutput() output = codecs.decode(output, 'utf-8') for line in output.split('\n'): lines.append(line) why not encode the `expected_line` instead of decoding all the output? lines = [] output = output_writer.ReadOutput() + # TODO: add test output writer that produces strings also see: + # https://github.com/log2timeline/plaso/issues/1963 output = codecs.decode(output, 'utf-8') for line in output.split('\n'): lines.append(line)
codereview_python_data_4285
TX_IN_BACKLOG = 'backlog' """return if transaction is in backlog""" - def __init__(self, tendermint_host=None, tendermint_port=None, connection=None): """Initialize the Bigchain instance A Bigchain instance has several configuration parameters (e.g. host). I don't think we need this, the user can use the config file `.bigchaindb` or the environment variables. TX_IN_BACKLOG = 'backlog' """return if transaction is in backlog""" + def __init__(self, connection=None): """Initialize the Bigchain instance A Bigchain instance has several configuration parameters (e.g. host).
codereview_python_data_4286
runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) # user-defined hooks - if cfg.get('hooks', None): for hook_cfg in cfg.hooks: hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL') Rename the field `hooks` to `additional_hooks` or `custom_hooks`? runner.register_hook(eval_hook(val_dataloader, **eval_cfg)) # user-defined hooks + if cfg.get('custom_hooks', None): for hook_cfg in cfg.hooks: hook_cfg = hook_cfg.copy() priority = hook_cfg.pop('priority', 'NORMAL')
codereview_python_data_4287
url = flow.request.pretty_url else: url = flow.request.url - terminalWidthLimit = shutil.get_terminal_size()[0] - 25 if self.flow_detail < 1 and len(url) > terminalWidthLimit: url = url[:terminalWidthLimit] + "…" url = click.style(strutils.escape_control_characters(url), bold=True) Rare case, but what happens if `get_terminal_size()[0]` is < 25? Why do we substract 25 here at all? url = flow.request.pretty_url else: url = flow.request.url + terminalWidthLimit = max(shutil.get_terminal_size()[0] - 25 , 50) if self.flow_detail < 1 and len(url) > terminalWidthLimit: url = url[:terminalWidthLimit] + "…" url = click.style(strutils.escape_control_characters(url), bold=True)
codereview_python_data_4292
.. versionchanged:: 0.11.0 Frames now 0-based instead of 1-based .. versionchanged:: 2.0.0 - Reader now raises error if unitcell has neither 3 or 9 entries. Reader now reads a 3 entry zero unit cell (i.e. ``[0, 0, 0]``) as a being without dimension information (i.e. will the timestep dimension to ``None``). ```suggestion Reader now only parses boxes defined with 3 or 9 fields. ``` .. versionchanged:: 0.11.0 Frames now 0-based instead of 1-based .. versionchanged:: 2.0.0 + Reader now only parses boxes defined with 3 or 9 fields. Reader now reads a 3 entry zero unit cell (i.e. ``[0, 0, 0]``) as a being without dimension information (i.e. will the timestep dimension to ``None``).
codereview_python_data_4298
@staticmethod def _print_default_format_warning(file_format): - """Print a warning to stdout. Temporary warning (similar to a deprecation warning) that files are being downloaded in mmCIF. Should this end with '(PRIVATE)' like in `Bio/Nexus/Nexus.py` so as to keep this consistent? @staticmethod def _print_default_format_warning(file_format): + """Print a warning to stdout (PRIVATE). Temporary warning (similar to a deprecation warning) that files are being downloaded in mmCIF.
codereview_python_data_4302
echo = self.stub.Ping(notifier_pb2.PingRequest(data=data)).data return echo == data - def run(self, config_file, inventory_index_id): """Runs the notifier. Args: - config_file (str): Path the to the forseti config file. inventory_index_id (int): Inventory Index Id. Returns: config file location should not be sent to the server. If we are ever going to host the server and the client on different machines, it will be a problem. Maybe send the file content or necessary parameters. echo = self.stub.Ping(notifier_pb2.PingRequest(data=data)).data return echo == data + def run(self, inventory_id): """Runs the notifier. Args: inventory_index_id (int): Inventory Index Id. Returns:
codereview_python_data_4315
mode = self.rule['mode'] has_violation = False - crypto_key = self.rule['key'] - for key_data in crypto_key: rule_algorithms = key_data.get('algorithms') rule_protection_level = key_data.get('protection_level') rule_purpose = key_data.get('purpose') crypto_key can be confusing here, crypto_key_rule might be better mode = self.rule['mode'] has_violation = False + crypto_key_rule = self.rule['key'] + for key_data in crypto_key_rule: rule_algorithms = key_data.get('algorithms') rule_protection_level = key_data.get('protection_level') rule_purpose = key_data.get('purpose')
codereview_python_data_4318
spotipy_call = getattr(spotipy_client, endpoint) recently_played = spotipy_call(**kwargs) break except SpotifyException as e: retries -= 1 if e.http_status == 429: these could fail if the endpoint doesn't exist on the client, or if the kwargs aren't valid for that call. We should catch these specific cases and log them internally (e.g. imagine if spotipy changes the api at some point) spotipy_call = getattr(spotipy_client, endpoint) recently_played = spotipy_call(**kwargs) break + except (AttributeError, TypeError) as err: + current_app.logger.critical("Invalid spotipy endpoint or arguments:", err, exc_info=True) + return None except SpotifyException as e: retries -= 1 if e.http_status == 429:
codereview_python_data_4323
def gen_base_anchors(self): w = self.base_size h = self.base_size - if self.ctr is None: - x_ctr = self.ctr_offset * (w - 1) - y_ctr = self.ctr_offset * (h - 1) else: - x_ctr, y_ctr = self.ctr h_ratios = torch.sqrt(self.ratios) w_ratios = 1 / h_ratios Is `-1` necessary? def gen_base_anchors(self): w = self.base_size h = self.base_size + if self.center is None: + x_center = self.center_offset * w + y_center = self.center_offset * h else: + x_center, y_center = self.center h_ratios = torch.sqrt(self.ratios) w_ratios = 1 / h_ratios
codereview_python_data_4325
self.ready_event = threading.Event() self.stop_event = threading.Event() - self.server_sock = self._create_socket_and_bind() - # in case self.port = 0 - self.port = self.server_sock.getsockname()[1] - @classmethod def text_response_server(cls, text, request_timeout=0.5, **kwargs): def text_response_handler(sock): I'm _really_ nervous about this being in `__init__`. Is there any reason it's here? self.ready_event = threading.Event() self.stop_event = threading.Event() @classmethod def text_response_server(cls, text, request_timeout=0.5, **kwargs): def text_response_handler(sock):
codereview_python_data_4326
The time difference between frames (ps). If :attr:`time` is set, then `dt` will be ignored. filename: string, optional - The name of the file from which this instance is created """ super(MemoryReader, self).__init__() Maybe state it will otherwise be None The time difference between frames (ps). If :attr:`time` is set, then `dt` will be ignored. filename: string, optional + The name of the file from which this instance is created. Set to None + when created from an array """ super(MemoryReader, self).__init__()
codereview_python_data_4330
Parameters ---------- row_labels : list-like, slice or label - The indices for the rows to extract. col_labels : list-like, slice or label - The indices for the columns to extract. Returns ------- docstring is now wrong Parameters ---------- row_labels : list-like, slice or label + The row labels for the rows to extract. col_labels : list-like, slice or label + The column labels for the columns to extract. Returns -------
codereview_python_data_4334
parent (Resource): The parent Resource. lifecycle_state (LifecycleState): The lifecycle state of the bucket. - lifecycle (list): A list of dicts that contains actions - ("delete") and conditions ("age") """ super(Bucket, self).__init__( resource_id=bucket_id, A list of bucket lifecycle rules. Each item is a dict containing an action and conditions when this action should be taken. parent (Resource): The parent Resource. lifecycle_state (LifecycleState): The lifecycle state of the bucket. + retentions (list): A list of RetentionInfo """ super(Bucket, self).__init__( resource_id=bucket_id,
codereview_python_data_4335
def _ref(var): - release = tf.__version__[:5] - if release < "2.0.0": - return var - else: - return var.ref() if hasattr(var, "ref") else var.experimental_ref() class DecoupledWeightDecayExtension: tf.addons supports TF2 only anyway, maybe we can remove this string comparison and just return var.ref()/experimental_ref()? def _ref(var): + return var.ref() if hasattr(var, "ref") else var.experimental_ref() class DecoupledWeightDecayExtension:
codereview_python_data_4338
class argmap: - """A decorating class which calls specified transformations on a function's - arguments before calling it. Arguments can be specified either as strings, numerical indices, or (in the next example) tuples thereof @argmap(sum, 'x', 2) ```suggestion """Decorate a function by applying a map to its arguments A decorating class which calls specified transformations on a function's ``` class argmap: + """A decorating class which applies a map to a function's arguments before + calling it. Arguments can be specified either as strings, numerical indices, or (in the next example) tuples thereof @argmap(sum, 'x', 2)
codereview_python_data_4340
# optimizer model = dict( pretrained='open-mmlab://resnext101_64x4d', - backbone=dict( - type='ResNeXt', - depth=101, - groups=64, - base_width=4, - num_stages=4, - out_indices=(0, 1, 2, 3), - frozen_stages=1, - norm_cfg=dict(type='BN', requires_grad=True), - style='pytorch')) -optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) clean unnecessary arguments. # optimizer model = dict( pretrained='open-mmlab://resnext101_64x4d', + backbone=dict(type='ResNeXt', depth=101, groups=64, base_width=4)) +optimizer = dict(type='SGD', lr=0.01)
codereview_python_data_4342
def __init__(self, streaming_jar, modules=None, streaming_args=None, libjars=None, libjars_in_hdfs=None, jobconfs=None, - archives=None, input_format=None, output_format=None, - end_job_with_atomic_move_dir=True): def get(x, default): return x is not None and x or default self.streaming_jar = streaming_jar Why not add as last argument? def __init__(self, streaming_jar, modules=None, streaming_args=None, libjars=None, libjars_in_hdfs=None, jobconfs=None, + input_format=None, output_format=None, + end_job_with_atomic_move_dir=True, archives=None): def get(x, default): return x is not None and x or default self.streaming_jar = streaming_jar
codereview_python_data_4343
# Arguments that are just blank spaces aren't really arguments # We need to get rid of those. If the user intended to pass a sequence # of spaces, it would come between quotes - clean_args = [] - for a in args: - if isinstance(a, str): - if a.strip() != '': - clean_args.append(a) - else: - clean_args.append(a) - - args = clean_args - verify_arg_signature(self.func, list(args), {}) remainder: typing.Sequence[str] = [] `args` is defined as `typing.Sequence[str]` in the method signature, so this check seems superfluous? # Arguments that are just blank spaces aren't really arguments # We need to get rid of those. If the user intended to pass a sequence # of spaces, it would come between quotes + args = [a for a in args if a.strip() != ''] verify_arg_signature(self.func, list(args), {}) remainder: typing.Sequence[str] = []
codereview_python_data_4344
def read_numpy_offsets(filename): - """read offsets into dictionary. This assume offsets have been saved using - numpy Parameters ---------- Keep a single line as a summary, add everything after a blank, like ```reST read offsets in `filename` into dictionary This assumes offsets have been saved using numpy. ``` def read_numpy_offsets(filename): + """read offsets into dictionary. + + This assume offsets have been saved using numpy Parameters ----------
codereview_python_data_4351
@property def is_unique(self): return self.nunique(dropna=False) == len(self) @property @amyskov plz add docstring compatible with numpydoc standard (#1411) @property def is_unique(self): + """Check if Series has no duplicate values. + + Returns + ------- + bool + True if there is no duplicates in Series, False otherwise. + + """ return self.nunique(dropna=False) == len(self) @property
codereview_python_data_4358
DEFAULT_LOG_FMT = ('%(asctime)s %(levelname)s ' '%(name)s(%(funcName)s): %(message).1024s') -SYSLOG_LOG_FMT = ('%(levelname)s [forseti-security] %(__version__)s ' '%(name)s(%(funcName)s): %(message).1024s') # %(asctime)s is used as the marker by multiline parser to determine We said we would put `[ ]` around the version. `[forseti-security] [v2.3.0]` DEFAULT_LOG_FMT = ('%(asctime)s %(levelname)s ' '%(name)s(%(funcName)s): %(message).1024s') +SYSLOG_LOG_FMT = ('%(levelname)s [forseti-security] %[__version__]s ' '%(name)s(%(funcName)s): %(message).1024s') # %(asctime)s is used as the marker by multiline parser to determine
codereview_python_data_4360
''' - This solution implements a breadth-start_bucket search of the graph of possible valid states for the two buckets until it reaches a state in which one of the two buckets contains the goal amount ''' This appears to be an unintentional change from a Replace-All; please revert it. ''' + This solution implements a breadth-first search of the graph of possible valid states for the two buckets until it reaches a state in which one of the two buckets contains the goal amount '''
codereview_python_data_4364
'Rule %s already defined in rules: %s' % ( rule['name'], ', '.join(sorted(self.rules.keys())))) - if self._add_rule_callback: - if self._add_rule_callback(new_rule): - self.rules[new_rule['name']] = new_rule - else: self.rules[new_rule['name']] = new_rule def filtered_by_networks(self, networks): (nit) Possible improvement in logic here. ``` add_rule = self._add_rule_callback(new_rule) if self._add_rule_callback else True if add_rule: self.rules[new_rule['name']] = new_rule ``` 'Rule %s already defined in rules: %s' % ( rule['name'], ', '.join(sorted(self.rules.keys())))) + callback_ok = ( + self._add_rule_callback(new_rule) + if self._add_rule_callback else True) + if callback_ok: self.rules[new_rule['name']] = new_rule def filtered_by_networks(self, networks):
codereview_python_data_4365
def list_exfilled_files(self, startdir=None): if not startdir: startdir = self.get_config('exfil_dir') exfil_files = dict() exfil_folders = [f.path for f in os.scandir(startdir) if f.is_dir()] I went to the Exfills UI before running any operations, selected "all" and got an Internal Server Error on this line: `FileNotFoundError: [Errno 2] No such file or directory: '/tmp/caldera'` def list_exfilled_files(self, startdir=None): if not startdir: startdir = self.get_config('exfil_dir') + if not os.path.exists(startdir): + return dict() exfil_files = dict() exfil_folders = [f.path for f in os.scandir(startdir) if f.is_dir()]
codereview_python_data_4370
http_message = message if ctype := message.headers.get("content-type"): if ct := http.parse_content_type(ctype): - content_type = http.assemble_content_type(ct[0], ct[1], ct[2]) description, lines, error = get_content_view( viewmode, content, I believe we never need the content type attributes when selecting a contentview? Unless there's something I am missing, please revert the changes here. http_message = message if ctype := message.headers.get("content-type"): if ct := http.parse_content_type(ctype): + content_type = f"{ct[0]}/{ct[1]}" description, lines, error = get_content_view( viewmode, content,
codereview_python_data_4374
obj_type = TRACER_TO_REAL_TYPE_MAP[type(obj)] real_type = TRACER_TO_REAL_TYPE_MAP[tracer_type] raise errors.InvalidReferenceError( - f'{str(refname)!r} exists, but is a ' - f'{obj_type.get_schema_class_displayname()!r}, ' - f'not a {real_type.get_schema_class_displayname()!r}', context=sourcectx, ) Hm, perhaps we could lose the quotes here. `'test::X' exists, but is a scalar type, not a object type` reads better to me (minus a more obvious article error). obj_type = TRACER_TO_REAL_TYPE_MAP[type(obj)] real_type = TRACER_TO_REAL_TYPE_MAP[tracer_type] raise errors.InvalidReferenceError( + f'{str(refname)!r} exists, but is ' + f'{english.add_a(obj_type.get_schema_class_displayname())}, ' + f'not {english.add_a(real_type.get_schema_class_displayname())}', context=sourcectx, )
codereview_python_data_4384
if SHOULD_INVENTORY_GROUPS: GROUPS_DOMAIN_SUPER_ADMIN_EMAIL = context.properties[ 'groups-domain-super-admin-email'] - GSUITE_SERVICE_ACCOUNT_KEY_FILE = context.properties[ 'groups-service-account-key-file'] # TODO: remove this in a future version This will require changes to the docs. I suggest searching the gh-pages branch for the previous variable name. if SHOULD_INVENTORY_GROUPS: GROUPS_DOMAIN_SUPER_ADMIN_EMAIL = context.properties[ 'groups-domain-super-admin-email'] + GROUPS_SERVICE_ACCOUNT_KEY_FILE = context.properties[ 'groups-service-account-key-file'] # TODO: remove this in a future version
codereview_python_data_4387
self.max_pokemon_storage = inventory.get_pokemon_inventory_size() self.last_pokemon_count = 0 - self.config_transfer = self.config.get("transfer", True) - self.config_evolve = self.config.get("evolve", True) self.config_evolve_time = self.config.get("evolve_time", 20) self.config_evolve_for_xp = self.config.get("evolve_for_xp", True) self.config_evolve_only_with_lucky_egg = self.config.get("evolve_only_with_lucky_egg", False) I'd prefer things to be off by default. If we use the default value, they are probably missing important stuff in their config. self.max_pokemon_storage = inventory.get_pokemon_inventory_size() self.last_pokemon_count = 0 + self.config_transfer = self.config.get("transfer", False) + self.config_evolve = self.config.get("evolve", False) self.config_evolve_time = self.config.get("evolve_time", 20) self.config_evolve_for_xp = self.config.get("evolve_for_xp", True) self.config_evolve_only_with_lucky_egg = self.config.get("evolve_only_with_lucky_egg", False)
codereview_python_data_4388
_row_partition_class = PandasOnDaskFrameRowPartition @classmethod - def get_indices(cls, axis, partitions, index_func=None): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful Since `index_func` has to be a callable, ```suggestion def get_indices(cls, axis, partitions, index_func): ``` _row_partition_class = PandasOnDaskFrameRowPartition @classmethod + def get_indices(cls, axis, partitions, index_func): """This gets the internal indices stored in the partitions. Note: These are the global indices of the object. This is mostly useful
codereview_python_data_4394
self.state = state self.retval = retval - @staticmethod - def get_name(task, req): - return req.shadow if hasattr(req, 'shadow') else task.name - def handle_error_state(self, task, req, eager=False, call_errbacks=True): store_errors = not eager If you modify this method signature to receive the name directly you can use it in both occurrences: ``` def get_task_name(name, request): return getattr(request, 'shadow', name) ``` The use of `getattr` is also a simplification here :smile: ! self.state = state self.retval = retval def handle_error_state(self, task, req, eager=False, call_errbacks=True): store_errors = not eager
codereview_python_data_4399
self.vip_dictionary[Pokemons.id_for(key)] = 0 def is_snipeable(self, pokemon): - pokeballs_count = self.inventory.get(Item.ITEM_POKE_BALL.value).count - greatballs_count = self.inventory.get(Item.ITEM_GREAT_BALL.value).count - ultraballs_count = self.inventory.get(Item.ITEM_ULTRA_BALL.value).count - all_balls_count = pokeballs_count + greatballs_count + ultraballs_count - # Skip if already handled if self._is_cached(pokemon): self._trace('{} was already handled! Skipping...'.format(pokemon.get('pokemon_name'))) Yes, but this will result in potential repeated vague "Damn! ...." messages. Better left out of is_snipeable so it just cleanly skips. Alternatively, clean up is_snipeable so that it gives useful feedback, instead of just "Damn! Dunno what happened, maybe magic?" whenever a pokemon is skipped,not exist,bad iv etc. self.vip_dictionary[Pokemons.id_for(key)] = 0 def is_snipeable(self, pokemon): # Skip if already handled if self._is_cached(pokemon): self._trace('{} was already handled! Skipping...'.format(pokemon.get('pokemon_name')))
codereview_python_data_4404
self.tmaster.reset(addons) self.tmaster.addons.trigger("tick") class ProxyTestBase: # Test Configuration I think we have a `human_address` function for this somewhere...? self.tmaster.reset(addons) self.tmaster.addons.trigger("tick") + def start(self): + super().start() + while True: + if self.tmaster: + break + time.sleep(0.01) + class ProxyTestBase: # Test Configuration
codereview_python_data_4407
refpos = positions.mean(axis=0) refpos = refpos.astype(np.float32, copy=False) - if target_position is None: - target = distances.apply_PBC(refpos, self.dimensions) - else: - target = distances.minimize_periodic_vector(reference_point=target_position, center_position=refpos, box=self.dimensions) positions += target - refpos How can `target_position` be None? refpos = positions.mean(axis=0) refpos = refpos.astype(np.float32, copy=False) + + target = distances.minimize_periodic_vector(reference_point=target_position, center_position=refpos, box=self.dimensions) positions += target - refpos
codereview_python_data_4408
print('Start training...') # train -gbm = lgb.LGBMRegressor(boosting_type='rgf', - objective='regression', num_leaves=31, learning_rate=0.05, n_estimators=20) I think it's better to create a new example print('Start training...') # train +gbm = lgb.LGBMRegressor(objective='regression', num_leaves=31, learning_rate=0.05, n_estimators=20)
codereview_python_data_4412
or "REPORT" in raw_message # necessary until tail is updated in docker images. See this PR: # http://git.savannah.gnu.org/gitweb/?p=coreutils.git;a=commitdiff;h=v8.24-111-g1118f32 - or "tail: unrecognized file system type 0x794c7630" in raw_message or regex_filter and not re.search(regex_filter, raw_message) ): nit: Wondering if we should simply filter on `"tail: unrecognized file system type"`, or is the type identifier `0x794c7630` always the same? or "REPORT" in raw_message # necessary until tail is updated in docker images. See this PR: # http://git.savannah.gnu.org/gitweb/?p=coreutils.git;a=commitdiff;h=v8.24-111-g1118f32 + or "tail: unrecognized file system type" in raw_message or regex_filter and not re.search(regex_filter, raw_message) ):
codereview_python_data_4421
dest="url_param", metavar="URL", help=""" - Alternative parameter to specify a URL to attempt to - extract streams from. """ ) stream.add_argument( Is there a way we could make this more user friendly in terms of explanation? I feel like it is a bit confusing, not sure if there is a better way though. dest="url_param", metavar="URL", help=""" + A URL to attempt to extract streams from. + + If it's a HTTP URL then "http://" can be omitted. + + This is an alternative to setting the URL using a positional argument. """ ) stream.add_argument(
codereview_python_data_4422
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4438-SEA 1645521621 3021675987</p> <hr> <p>Varnish cache server</p> </body> Can we please change this to: ``` test_role_arn = 'arn:aws:sts::000000000000:role/rd_role' ``` From the external user's perspective, we should always be using the default account ID `000000000000` in LocalStack (which is also configurable). Mocked/hard-coded account IDs like `123456789012` (or other values for other APIs) should remain an internal implementation detail - hence we're performing the conversion between these formats in the API proxy listener. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4432-SEA 1645521621 2104936845</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_4434
AbstractCommandline.__init__(self, cmd, **kwargs) def _validate_incompatibilities(self, incompatibles): - """Use _validate method by the BLAST+ (PRIVATE).""" for a in incompatibles: if self._get_parameter(a): for b in incompatibles[a]: The meaning has changed, how about: ```python """Validate parameters for incompatibilities (PRIVATE). Used by the _validate method. """ ``` AbstractCommandline.__init__(self, cmd, **kwargs) def _validate_incompatibilities(self, incompatibles): + """Validate parameters for incompatibilities (PRIVATE). + + Used by the _validate method. + """ for a in incompatibles: if self._get_parameter(a): for b in incompatibles[a]:
codereview_python_data_4437
self.base._module_persistor.set_data(self.repo_module, stream=self.stream, version=version, profiles=sorted(set(profiles))) - def nevra(self): - return self.artifacts() - def artifacts(self): return self.module_metadata.get_rpm_artifacts().get() Please, do you think it will be possible to remove the function and replace it by artifacts()? self.base._module_persistor.set_data(self.repo_module, stream=self.stream, version=version, profiles=sorted(set(profiles))) def artifacts(self): return self.module_metadata.get_rpm_artifacts().get()
codereview_python_data_4442
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. """ proxy = select_proxy(request.url, proxies) - if proxy and not request.url.lower().startswith('https'): url = urldefragauth(request.url) else: url = request.path_url This is a regressing, breaking change. It's possible that `request.url` may begin with `https` but not actually be a HTTPS request. Please keep the urlparse logic in place. =) :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. """ proxy = select_proxy(request.url, proxies) + scheme = urlparse(request.url.lower()).scheme + if proxy and scheme != 'https': url = urldefragauth(request.url) else: url = request.path_url
codereview_python_data_4449
:return: """ if self.client.token: - worker_index = os.environ.get('TAURUS_INDEX_ALL', 'no_index') - artifacts_zip = "artifacts.%s.zip" % worker_index mfile = self.__get_jtls_and_more() self.log.info("Uploading all artifacts as %s ...", artifacts_zip) self.client.upload_file(artifacts_zip, mfile.getvalue()) It won't be in os.environ... :return: """ if self.client.token: + worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '') + if worker_index: + suffix = '-' + worker_index + else: + suffix = '' + artifacts_zip = "artifacts%s.zip" % suffix mfile = self.__get_jtls_and_more() self.log.info("Uploading all artifacts as %s ...", artifacts_zip) self.client.upload_file(artifacts_zip, mfile.getvalue())
codereview_python_data_4453
fetched_recs = self.idx._get_record(99228) - self.compare_record(recs[0], fetched_recs[0]) - self.compare_record(recs[1], fetched_recs[1]) - self.compare_record(recs[2], fetched_recs[2]) - self.compare_record(recs[3], fetched_recs[3]) - self.compare_record(recs[4], fetched_recs[4]) - self.compare_record(recs[5], fetched_recs[5]) class TestSearchGoodMAF(unittest.TestCase): """Test index searching on a properly-formatted MAF.""" Do you find the unrolled loop clearer? To be it is a recipe for human error in the indices. fetched_recs = self.idx._get_record(99228) + self.compare_records(recs, fetched_recs) class TestSearchGoodMAF(unittest.TestCase): """Test index searching on a properly-formatted MAF."""
codereview_python_data_4454
async def _create_relationships(self, relationships, operation): for relationship in relationships: await self._save_fact(operation, relationship.source) - if all(relationship.target): - await self._save_fact(operation, relationship.target) self.relationships.append(relationship) async def _save_fact(self, operation, trait): - if not any(f.trait == trait[0] and f.value == trait[1] for f in operation.all_facts()): self.facts.append(Fact(trait=trait[0], value=trait[1], score=1)) async def _update_scores(self, operation, increment): shouldn't this check be inside the save_fact function? async def _create_relationships(self, relationships, operation): for relationship in relationships: await self._save_fact(operation, relationship.source) + await self._save_fact(operation, relationship.target) self.relationships.append(relationship) async def _save_fact(self, operation, trait): + if all(trait) and not any(f.trait == trait[0] and f.value == trait[1] for f in operation.all_facts()): self.facts.append(Fact(trait=trait[0], value=trait[1], score=1)) async def _update_scores(self, operation, increment):
codereview_python_data_4465
assert edge_dir == 'in' req_list = [] partition_book = dist_graph.get_partition_book() - np_nodes = np.array(nodes) partition_id = F.asnumpy( partition_book.nid2partid(F.tensor(np_nodes))) for pid in range(partition_book.num_partitions()): what happens if `nodes` is a DL tensor? assert edge_dir == 'in' req_list = [] partition_book = dist_graph.get_partition_book() + np_nodes = np.array(toindex(nodes).tonumpy()) partition_id = F.asnumpy( partition_book.nid2partid(F.tensor(np_nodes))) for pid in range(partition_book.num_partitions()):
codereview_python_data_4467
return 'ANDROID' in (plt or platform()) -def is_android_gce(plt=None): """Return true if we are on android gce platform.""" return 'ANDROID_X86' in (plt or platform()) nit: rename to is_android_cuttlefish to be clearer. return 'ANDROID' in (plt or platform()) +def is_android_cuttlefish(plt=None): """Return true if we are on android gce platform.""" return 'ANDROID_X86' in (plt or platform())
codereview_python_data_4474
# if it is a new pokemon to our dex, simulate app animation delay if exp_gain >= 500: - sleep (randrange(newtodex_wait_min, newtodex_wait_max)) except IOError as e: self.logger.info('[x] Error while opening location file: %s' % e) @mjmadsen the variable here should be catchsim_newtodex_wait_min and catchsim_newtodex_wait_max # if it is a new pokemon to our dex, simulate app animation delay if exp_gain >= 500: + sleep (randrange(catchsim_newtodex_wait_min, catchsim_newtodex_wait_max)) except IOError as e: self.logger.info('[x] Error while opening location file: %s' % e)
codereview_python_data_4479
Returns: str: A string representation of FirewallAction. """ - return "FirewallAction(action=%s, rules=%s)" % (self.action, self.rules) def json_dict(self): """Gets the JSON key and values for the firewall action. nit: could you use single quotes instead of double? (just for consistency) Returns: str: A string representation of FirewallAction. """ + return 'FirewallAction(action=%s, rules=%s)' % (self.action, self.rules) def json_dict(self): """Gets the JSON key and values for the firewall action.
codereview_python_data_4490
"""Add another sequence or string to this sequence. The sequence is first converted to a Bio.Seq object before the addition. """ # Let the Seq object deal with the alphabet issues etc return self.toseq() + other I think it is unclear from text which object gets turned into a Seq object. How about something like ``This sequence is first converted to a Seq object before the addition of the other sequence.``? Should we say it will return a ``Seq`` object (not a ``DBSeq`` object)? """Add another sequence or string to this sequence. The sequence is first converted to a Bio.Seq object before the addition. + The returned object is a Bio.Seq, not a BioSQL.DBSeq """ # Let the Seq object deal with the alphabet issues etc return self.toseq() + other
codereview_python_data_4496
point["lat"] = float(point_tuple[0]) point["lng"] = float(point_tuple[1]) point["alt"] = float(point_tuple[2]) - if "mode" not in point: - point["mode"] = "walking" return points def load_gpx(self): Replace this if block for `point["mode"] = mode.get("mode", "walking")`. If a mode has been set, it'll stay as it is, otherwise assign "walking" to it. point["lat"] = float(point_tuple[0]) point["lng"] = float(point_tuple[1]) point["alt"] = float(point_tuple[2]) + point["mode"] = mode.get("mode", "walking") return points def load_gpx(self):
codereview_python_data_4505
BigqueryAccessControlsData = collections.namedtuple( 'BigqueryAccessControlsData', - ['resource', 'bigquery_acl']) class BigqueryScanner(base_scanner.BaseScanner): """Scanner for BigQuery acls.""" (here and in function parameters of find_policy_violations). To avoid ambiguity, I think we should either rename resource to something like parent_project, or change resource to storing the dataset resource. A possible advantage of using the dataset as the resource is it allows the bigquery scanner to examine other properties of the dataset (e.g. location) in the future. BigqueryAccessControlsData = collections.namedtuple( 'BigqueryAccessControlsData', + ['parent_project', 'bigquery_acl']) class BigqueryScanner(base_scanner.BaseScanner): """Scanner for BigQuery acls."""
codereview_python_data_4510
user_id, limit=min(_parse_int_arg("count", DEFAULT_ITEMS_PER_GET), MAX_ITEMS_PER_GET), from_id=min_ts, - to_id=max_ts, - order=request.args.get("order", "desc"), ) listen_data = [] for listen in listens: does listenstore accept None as an argument? what's it's behaviour? user_id, limit=min(_parse_int_arg("count", DEFAULT_ITEMS_PER_GET), MAX_ITEMS_PER_GET), from_id=min_ts, + to_id=max_ts ) listen_data = [] for listen in listens:
codereview_python_data_4511
# custom wrappers around ops from nvidia.dali import backend as _b -from nvidia.dali._multiproc.messages import TaskArgs import nvidia.dali.types from nvidia.dali._utils.external_source_impl import \ get_callback_from_source as _get_callback_from_source, \ This is bad - you're exposing stuff from a private module in a public module (external_source). The name should be private. I'd also recommend keeping some module name instead of importing the class directly. If you want to import only this symbol, make it private: ```suggestion from nvidia.dali._multiproc.messages import TaskArgs as _msg_TaskArgs ``` # custom wrappers around ops from nvidia.dali import backend as _b +from nvidia.dali._multiproc.messages import TaskArgs as _TaskArgs import nvidia.dali.types from nvidia.dali._utils.external_source_impl import \ get_callback_from_source as _get_callback_from_source, \
codereview_python_data_4522
labelleft=False, ) - ax.margins(margins[0], margins[1]) node_collection.set_zorder(2) return node_collection ```suggestion if margins is not None: ax.margins(margins[0], margins[1]) ``` I think this is the cause of (at least) most errors. We have to treat the default case explicitly. labelleft=False, ) + if margins is not None: + ax.margins(margins[0], margins[1]) node_collection.set_zorder(2) return node_collection
codereview_python_data_4525
return Result.ok(data=data) -def cast_validator(v): pub_key = v['pub_key']['data'] - # NOTE: tendermint expects public to be ecoded in go-wire format # so `01` has to be appended pubKey = bytes.fromhex('01{}'.format(pub_key)) return Validator(pubKey=pubKey, Why does nothing have to be appended before the power? I guess an integer is a basic type and doesn't need a prefix in go-wire encoding. return Result.ok(data=data) +def encode_validator(v): pub_key = v['pub_key']['data'] + # NOTE: tendermint expects public to be encoded in go-wire format # so `01` has to be appended pubKey = bytes.fromhex('01{}'.format(pub_key)) return Validator(pubKey=pubKey,
codereview_python_data_4543
`num_outputs` : int, optional If specified, denotes the number of TensorLists that are produced by the source function. - If set the returned output would be a list. Keyword Args ------------ ```suggestion If set, the operator returns a list of ``DataNode`` objects, otherwise a single ``DataNode`` object is returned. ``` `num_outputs` : int, optional If specified, denotes the number of TensorLists that are produced by the source function. + If set, the operator returns a list of ``DataNode`` objects, otherwise a single ``DataNode`` + object is returned. Keyword Args ------------
codereview_python_data_4545
def __init__(self): super(ApiritifNoseExecutor, self).__init__() self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log) - self._iteration_number = None - self._iteration_start = None def reporting_setup(self, prefix=None, suffix=None): if not self.reported: Why store these fields at all? Why not just make iteration listener to receive `iteration_started(time)` and `iteration_ended(time)` so it decides itself what to store and do? def __init__(self): super(ApiritifNoseExecutor, self).__init__() self._tailer = FileReader(file_opener=lambda _: None, parent_logger=self.log) def reporting_setup(self, prefix=None, suffix=None): if not self.reported:
codereview_python_data_4550
""" username = user['musicbrainz_id'] # insert payload looks for 'id' key whereas the user dict here has it in 'user_id' key - user['id'] = user['user_id'] retries = 10 while retries >= 0: try: current_app.logger.debug('Submitting %d listens for user %s', len(listens), username) - insert_payload(listens, user, listen_type=listen_type) current_app.logger.debug('Submitted!') break except (InternalServerError, ServiceUnavailable) as e: I'm a little bit worried that we can just pass in random data to this method and sometimes it works and sometimes it doesn't. Even though the query from the `external_service_oauth` table happens to include mostly the right data, this change showed that it's easy to mess up and get the wrong value. As we're using pydantic classes in other places now, how about we create a `ListenSubmitUserMetadata` class (with a better name), which explicitly includes the 2 required fields, user_id and musicbrainz_id. Then we can create it in each place that it's needed from the data that we have, plus we get IDE completion in the right places when using it. """ username = user['musicbrainz_id'] # insert payload looks for 'id' key whereas the user dict here has it in 'user_id' key + user_metadata = SubmitListenUserMetadata(user_id=user['user_id'], musicbrainz_id=username) retries = 10 while retries >= 0: try: current_app.logger.debug('Submitting %d listens for user %s', len(listens), username) + insert_payload(listens, user_metadata, listen_type=listen_type) current_app.logger.debug('Submitted!') break except (InternalServerError, ServiceUnavailable) as e:
codereview_python_data_4551
raw_data = to_str(base64.b64decode(self.querystring.get('RawMessage.Data')[0])) - LOGGER.error('Raw email:\n%s' % raw_data) source = get_source_from_raw(raw_data) if not source: Error log level seem a bit too verbose here - can we use `LOGGER.info` (or `debug`) instead? raw_data = to_str(base64.b64decode(self.querystring.get('RawMessage.Data')[0])) + LOGGER.debug('Raw email:\n%s' % raw_data) source = get_source_from_raw(raw_data) if not source:
codereview_python_data_4552
try: body_seek(body_pos) except (IOError, OSError): - raise UnrewindableBodyError("An error occurred when rewinding request " "body for redirect/retry.") elif body_pos is _FAILEDTELL: raise UnrewindableBodyError("Unable to record file position for rewinding " Can you remove this patch please? urllib3 is a separate project that we vendor in, and we don't carry patches to it in tree. If you'd like to fix this, please make this patch upstream. try: body_seek(body_pos) except (IOError, OSError): + raise UnrewindableBodyError("An error occured when rewinding request " "body for redirect/retry.") elif body_pos is _FAILEDTELL: raise UnrewindableBodyError("Unable to record file position for rewinding "
codereview_python_data_4554
-# This file add snack case alias for coco api from pycocotools.coco import COCO as _COCO from pycocotools.cocoeval import COCOeval as _COCOeval class COCO(_COCO): def __init__(self, annotation_file=None): super().__init__(annotation_file=annotation_file) May add a docstring to indicate the difference between _COCO and COCO and our motivation. +# This file add snake case alias for coco api from pycocotools.coco import COCO as _COCO from pycocotools.cocoeval import COCOeval as _COCOeval class COCO(_COCO): + """This class is almost the same as official pycocotools package. + + It implements some snake case function aliases. So that the COCO class has + the same interface as LVIS class. + """ def __init__(self, annotation_file=None): super().__init__(annotation_file=annotation_file)
codereview_python_data_4556
) async def test_edgeql_select_subshape_filter_01(self): async with self.assertRaisesRegexTx( edgedb.QueryError, "possibly an empty set returned", Ideally we should produce a good hint on what to do to fix the error with the use of `assert_exists`. ) async def test_edgeql_select_subshape_filter_01(self): + # TODO: produce a better error message with a hint here? async with self.assertRaisesRegexTx( edgedb.QueryError, "possibly an empty set returned",
codereview_python_data_4559
import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (Conv2d, build_activation_layer, build_norm_layer, - constant_init, normal_init, trunc_normal_, - trunc_normal_init) from mmcv.cnn.bricks.drop import build_dropout from mmcv.cnn.bricks.transformer import MultiheadAttention from mmcv.runner import (BaseModule, ModuleList, Sequential, _load_checkpoint, load_state_dict) from torch.nn.modules.utils import _pair as to_2tuple `trunc_normal_` cannot be imported from `mmcv.cnn`, please import it from `mmcv.cnn.utils.weight_init ` or use `nn.init.trunc_normal_ ` instead. import torch.nn as nn import torch.nn.functional as F from mmcv.cnn import (Conv2d, build_activation_layer, build_norm_layer, + constant_init, normal_init, trunc_normal_init) from mmcv.cnn.bricks.drop import build_dropout from mmcv.cnn.bricks.transformer import MultiheadAttention +from mmcv.cnn.utils.weight_init import trunc_normal_ from mmcv.runner import (BaseModule, ModuleList, Sequential, _load_checkpoint, load_state_dict) from torch.nn.modules.utils import _pair as to_2tuple
codereview_python_data_4561
except KeyboardInterrupt: return except BaseException: - logger.exception("Error in event callback (%s) for %r", callback, event) @property def task_module(self): Always put changes like this in separate PRs please except KeyboardInterrupt: return except BaseException: + logger.exception("Error in event callback for %r", event) @property def task_module(self):
codereview_python_data_4562
max_crm_calls = configs.get('max_crm_api_calls_per_100_seconds', 400) crm_rate_limiter = RateLimiter(max_crm_calls, 100) pipelines = [ - LoadOrgIamPoliciesPipeline( - cycle_timestamp, configs, crm_rate_limiter, dao), # TODO: add load projects pipeline # TODO: add load project policies pipeline ] There is a lot happening in main() which makes testing hard. It would be worth just doing the bare initialization required in main then calling out to one or more functions that build, run and report the pipeline status, which can be tested. (Note for a future PR) max_crm_calls = configs.get('max_crm_api_calls_per_100_seconds', 400) crm_rate_limiter = RateLimiter(max_crm_calls, 100) + crm_api_client = crm.CloudResourceManagerClient( + rate_limiter=crm_rate_limiter) pipelines = [ + load_org_iam_policies_pipeline.LoadOrgIamPoliciesPipeline( + cycle_timestamp, configs, crm_api_client, dao, parser), # TODO: add load projects pipeline # TODO: add load project policies pipeline ]
codereview_python_data_4564
self.write(data) -class RootPathHandler(tornado.web.RequestHandler): def get(self): - visualization_graph = configuration.get_config().get('core', 'visualization_graph', '') if visualization_graph == "d3": self.redirect("/static/visualiser/index.d3.html") else: self.redirect("/static/visualiser/index.html") Can you use the new syntax instead? `core().visualization_graph` I mean, and set the parameter to have a default of "svg" maybe? On the other hand this is in the server code. Maybe it's good to not intermix with the client logic. self.write(data) +class RootPathHandler(BaseTaskHistoryHandler): def get(self): + visualization_graph = self._scheduler._config.visualization_graph if visualization_graph == "d3": self.redirect("/static/visualiser/index.d3.html") + elif visualization_graph == "svg": + self.redirect("/static/visualiser/index.html") else: self.redirect("/static/visualiser/index.html")
codereview_python_data_4575
new_instance = copy.deepcopy(self) for k, v in new_instance.items(): if isinstance(v, torch.Tensor): - v.to(*args, **kwargs) return new_instance # Tensor-like methods Does it support the slicing operator? new_instance = copy.deepcopy(self) for k, v in new_instance.items(): if isinstance(v, torch.Tensor): + new_instance[k] = v.to(*args, **kwargs) return new_instance # Tensor-like methods
codereview_python_data_4582
-def treeFromTraversals(preorder, inorder): if len(preorder) != len(inorder): raise ValueError("traversals must have the same length") if set(preorder) != set(inorder): Forgive me; I've been bouncing between too many different languages lately. My previous suggestion was incorrect, this should be `snake_case`. ```suggestion def tree_from_traversals(preorder, inorder): ``` +def tree_from_traversals(preorder, inorder): if len(preorder) != len(inorder): raise ValueError("traversals must have the same length") if set(preorder) != set(inorder):