id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_6877
tx = b.create_transaction(b.me, user_vk, None, 'CREATE', payload=payload) payload_uuid = tx['transaction']['data']['uuid'] matches = b.get_tx_by_payload_uuid(payload_uuid) assert len(matches) == 1 assert matches[0]['id'] == tx['id'] This method will check the `bigchain` table for transactions with that matching payload. At this point you created the transactions but the transaction was not written to the the database. To do so you need to create and write a block. ``` python block = b.create_block([tx]) b.write_block(block, durability='hard') ``` tx = b.create_transaction(b.me, user_vk, None, 'CREATE', payload=payload) payload_uuid = tx['transaction']['data']['uuid'] + block = b.create_block([tx]) + b.write_block(block, durability='hard') + matches = b.get_tx_by_payload_uuid(payload_uuid) assert len(matches) == 1 assert matches[0]['id'] == tx['id']
codereview_python_data_6878
test_with_categorical_variable ) for test_with_interaction_constraints in [True, False]: for monotone_constraints_method in ["basic", "intermediate", "advanced"]: params = { "min_data": 20, I'm ok with this change, but could you update the assert on line 1334 with an error message that will tell us which case it failed on if it fails? ```python error_msg = f"Model not correctly constrained (test_with_interaction_constraints={test_with_interaction_constraints})" assert is_correctly_constrained( constrained_model, test_with_categorical_variable ), error_msg ``` Without that, that particular test could fail and we wouldn't know which case it failed on just from the logs. I think it would also be useful to include the value of `monotone_constraints_method` in that message too (since it is similarly from an inner `for` loop), but that's not directly related to this PR so up to you if you want to do that at the same time or separately. test_with_categorical_variable ) for test_with_interaction_constraints in [True, False]: + error_msg = ("Model not correctly constrained " + f"(test_with_interaction_constraints={test_with_interaction_constraints})") for monotone_constraints_method in ["basic", "intermediate", "advanced"]: params = { "min_data": 20,
codereview_python_data_6879
violations (dict): Violations to be uploaded as findings. organization_id (str): The id prefixed with 'organizations/'. """ - findings = self._transform_for_cscc_api(violations) client = securitycenter.SecurityCenterClient() Can we put a log message here? violations (dict): Violations to be uploaded as findings. organization_id (str): The id prefixed with 'organizations/'. """ + findings = self._transform_for_api(violations) client = securitycenter.SecurityCenterClient()
codereview_python_data_6889
format = 'TNG' units = {'time': 'ps', 'length': 'nm', 'velocity': 'nm/ps', 'force': 'kJ/(mol*nm)'} - _writer = TRRWriter - _file = TRRFile def __init__(self, filename, convert_units=True, **kwargs): """ Why do we need '_frame'? We can just use ts.frame format = 'TNG' units = {'time': 'ps', 'length': 'nm', 'velocity': 'nm/ps', 'force': 'kJ/(mol*nm)'} + # _writer = TRRWriter + # _file = TRRFile def __init__(self, filename, convert_units=True, **kwargs): """
codereview_python_data_6890
@staticmethod def _convert_fs_path(fs_path): - # Only Python 2 'unioode' needs to be converted to str/bytes try: if isinstance(fs_path, unicode): fs_path = fs_path.encode(sys.getfilesystemencoding()) ```suggestion # Only Python 2 'unicode' needs to be converted to str/bytes ``` @staticmethod def _convert_fs_path(fs_path): + # Only Python 2 'unicode' needs to be converted to str/bytes try: if isinstance(fs_path, unicode): fs_path = fs_path.encode(sys.getfilesystemencoding())
codereview_python_data_6902
if isinstance(rampup, numeric_types) and isinstance(hold, numeric_types): duration = hold + rampup else: - duration = 0 trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui", testclass="ThreadGroup", testname=testname) what if one of them is property and one is not? if isinstance(rampup, numeric_types) and isinstance(hold, numeric_types): duration = hold + rampup else: + duration = 1 trg = etree.Element("ThreadGroup", guiclass="ThreadGroupGui", testclass="ThreadGroup", testname=testname)
codereview_python_data_6906
async def task_agent_with_ability(self, paw=None, ability_id=None): abilities = await self.get_service('data_svc').locate('abilities', match=dict(ability_id=ability_id)) - agent = await self.get_service('data_svc').locate('agents', match=dict(paw=paw)) - if abilities and agent: agent_abilities = await agent[0].capabilities(ability_set=abilities) - op = [op for op in await self.get_service('data_svc').locate('operations', match=dict(state='running')) - if agent[0] in await op.active_agents()] - if op and agent_abilities: return await op[0].build_and_apply_custom_link(agent=agent[0], ability=agent_abilities[0]) async def get_link_pin(self, json_data): this is way better. last thing: instead of defining agent here and then finding it in the operation below, can we just do the operation loop up here (returning an operation object, not an agent). async def task_agent_with_ability(self, paw=None, ability_id=None): abilities = await self.get_service('data_svc').locate('abilities', match=dict(ability_id=ability_id)) + op = [op for op in await self.get_service('data_svc').locate('operations', match=dict(state='running')) + for agent in await op.active_agents() if agent.paw == paw] + if abilities and op: + agent = (await op[0].get_active_agent_by_paw(paw=paw))[0] agent_abilities = await agent[0].capabilities(ability_set=abilities) + if agent_abilities: return await op[0].build_and_apply_custom_link(agent=agent[0], ability=agent_abilities[0]) async def get_link_pin(self, json_data):
codereview_python_data_6917
if isinstance(arrowsize, list): if len(arrowsize) != len(edge_pos): - raise ValueError("arrowsize should have the same length as G.edges") else: mutation_scale = arrowsize # scale factor of arrow head This error message will be incorrect in the case where a user passes in an explicit `edgelist` that has fewer edges than `G.edges`. A simple fix might be to simply replace "G.edges" with "edgelist", which is slightly more accurate (the default edgelist is `list(G.edges)`) if not quite perfect! if isinstance(arrowsize, list): if len(arrowsize) != len(edge_pos): + raise ValueError("arrowsize should have the same length as edgelist") else: mutation_scale = arrowsize # scale factor of arrow head
codereview_python_data_6919
if src[self.RESP_TIMES]: self[self.RESP_TIMES].add(src[self.RESP_TIMES]) - - if not self[self.PERCENTILES]: # using existing percentiles # FIXME: it's not valid to overwrite, better take average self[self.PERCENTILES] = copy.deepcopy(src[self.PERCENTILES]) `elif` changed to `if`. Won't it change the logic? if src[self.RESP_TIMES]: self[self.RESP_TIMES].add(src[self.RESP_TIMES]) + elif not self[self.PERCENTILES]: # using existing percentiles # FIXME: it's not valid to overwrite, better take average self[self.PERCENTILES] = copy.deepcopy(src[self.PERCENTILES])
codereview_python_data_6921
Column A new column """ - if isinstance(self.storage, distributed.dist_tensor.DistTensor): # copy actual tensor from DistTensor, if self.storage is a DistTensor self._copy_dist_tensor() col = self.clone() Is this needed? `Column.data` will call `_copy_dist_tensor` anyway. Column A new column """ + if isinstance(self.storage, distributed.DistTensor): # copy actual tensor from DistTensor, if self.storage is a DistTensor self._copy_dist_tensor() col = self.clone()
codereview_python_data_6927
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4463-SEA 1645521622 3631596217</p> <hr> <p>Varnish cache server</p> </body> I think we're missing `MessageConversion._fix_error_codes(method, data, response)` here. <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4453-SEA 1645521622 611658737</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_6933
pass def _matrix(self, options): - """Creates a matrix for NEXUS object (PRIVATE)""" if not self.ntax or not self.nchar: raise NexusError('Dimensions must be specified before matrix!') self.matrix = {} And the trailing full stop too please. pass def _matrix(self, options): + """Creates a matrix for NEXUS object (PRIVATE).""" if not self.ntax or not self.nchar: raise NexusError('Dimensions must be specified before matrix!') self.matrix = {}
codereview_python_data_6936
""" COVERAGE_UNCHANGED_SPECIFICATION = QuerySpecification( - adjusted_weight=0.65, threshold=1.0, query_format=COVERAGE_UNCHANGED_FORMAT, formatter=_coverage_formatter, Can you explain this more. """ COVERAGE_UNCHANGED_SPECIFICATION = QuerySpecification( + adjusted_weight=0.70, threshold=1.0, query_format=COVERAGE_UNCHANGED_FORMAT, formatter=_coverage_formatter,
codereview_python_data_6937
num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) - labels = anchors.new_zeros(num_valid_anchors, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds - # remind new system set FG cat_id: [0, num_class-1], BG cat_id: num_class - if gt_labels is not None: - labels += num_classes if len(pos_inds) > 0: pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes, Could we deal with rpn condition together to reduce ambiguity? num_valid_anchors = anchors.shape[0] bbox_targets = torch.zeros_like(anchors) bbox_weights = torch.zeros_like(anchors) + labels = anchors.new_full( + num_valid_anchors, background_label, dtype=torch.long) label_weights = anchors.new_zeros(num_valid_anchors, dtype=torch.float) pos_inds = sampling_result.pos_inds neg_inds = sampling_result.neg_inds if len(pos_inds) > 0: pos_bbox_targets = bbox2delta(sampling_result.pos_bboxes, sampling_result.pos_gt_bboxes,
codereview_python_data_6940
) unrejected_date = models.DateTimeField(null=True, blank=True) - class Source(models.TextChoices): TRANSLATION_MEMORY = "translation-memory", "Translation Memory" GOOGLE_TRANSLATE = "google-translate", "Google Translate" MICROSOFT_TRANSLATOR = "microsoft-translator", "Microsoft Translator" I'd rename the `Source` class to `MachinerySource` to be more explicit. ) unrejected_date = models.DateTimeField(null=True, blank=True) + class MachinerySource(models.TextChoices): TRANSLATION_MEMORY = "translation-memory", "Translation Memory" GOOGLE_TRANSLATE = "google-translate", "Google Translate" MICROSOFT_TRANSLATOR = "microsoft-translator", "Microsoft Translator"
codereview_python_data_6945
self.cumulative_stats = CumulativeStats() stats_pane = Pile([(WEIGHT, 0.50, self.latest_stats), - (1, Filler(Divider())), (WEIGHT, 0.50, self.cumulative_stats), ]) self.graphs = ThreeGraphs() We don't need this divider self.cumulative_stats = CumulativeStats() stats_pane = Pile([(WEIGHT, 0.50, self.latest_stats), (WEIGHT, 0.50, self.cumulative_stats), ]) self.graphs = ThreeGraphs()
codereview_python_data_6947
} } } - - -TINY_CONTEXT = { - "@context": { - "ExhibitionEvent": {"@id": "schema:ExhibitionEvent"}, - "Eye": {"@id": "schema:Eye"} - } -} Do we have integration tests trying to write these Context examples? } } }
codereview_python_data_6949
""" Test if tax values are known for this basket """ - for line in self.all_lines(): - if not line.is_tax_known: - return False - return True @property def total_excl_tax(self): Nitpicking... Can be replaced by ``` return all([line.is_tax_known for line in self.all_lines()]) ``` """ Test if tax values are known for this basket """ + return all([line.is_tax_known for line in self.all_lines()]) @property def total_excl_tax(self):
codereview_python_data_6953
request = Request(data=encoded_data, headers=headers, method=method) return request elif req_data.get('Action', [None])[0] == 'SendMessage': - queue_url = req_data.get('QueueUrl', path)[0] queue_name = queue_url[queue_url.rindex('/') + 1:] message_body = req_data.get('MessageBody', [None])[0] if lambda_api.process_sqs_message(message_body, queue_name): I think this should be wrapped in an array, otherwise we're only selecting the first character of `path`: ``` queue_url = req_data.get('QueueUrl', [path])[0] ``` (That may actually be the reason why one of the tests currently fails). request = Request(data=encoded_data, headers=headers, method=method) return request elif req_data.get('Action', [None])[0] == 'SendMessage': + queue_url = req_data.get('QueueUrl', [path])[0] queue_name = queue_url[queue_url.rindex('/') + 1:] message_body = req_data.get('MessageBody', [None])[0] if lambda_api.process_sqs_message(message_body, queue_name):
codereview_python_data_6956
def get_expr_referrers(schema: s_schema.Schema, - obj: so.Object) -> Dict[Any, Any]: """Return schema referrers with refs in expressions.""" refs = schema.get_referrers_ex(obj) The correct annotation for the return type is `Dict[so.Object, str]` def get_expr_referrers(schema: s_schema.Schema, + obj: so.Object) -> Dict[so.Object, str]: """Return schema referrers with refs in expressions.""" refs = schema.get_referrers_ex(obj)
codereview_python_data_6960
label = param.Boolean(default=True, doc=""" Whether to use the parameter documentation as the dimension label""") def __call__(self, parameterized, **kwargs): types = {param.String:str, param.Integer:int} Can the precedence threshold be a parameter (defaulting to 0)? label = param.Boolean(default=True, doc=""" Whether to use the parameter documentation as the dimension label""") + precedence_threshold = param.Number(default=0, doc=""" + The threshold below which parameters are ignored.""") + def __call__(self, parameterized, **kwargs): types = {param.String:str, param.Integer:int}
codereview_python_data_6964
stride=1, padding=0, dilation=1, - deformable_groups=1): super(DeformConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels The default value should be kept consistent. stride=1, padding=0, dilation=1, + deformable_groups=1, + bias=None): + assert bias is None super(DeformConv, self).__init__() self.in_channels = in_channels self.out_channels = out_channels
codereview_python_data_6970
class _NoLLSLenField(LenField): def i2m(self, pkt, x): if x is None: x = self.adjust(len(pkt.payload)) Please add a doctring. Also, why did use use an underscore to name this object? class _NoLLSLenField(LenField): + """ + LenField that will ignore the size of OSPF_LLS_Hdr if it exists + in the payload + """ def i2m(self, pkt, x): if x is None: x = self.adjust(len(pkt.payload))
codereview_python_data_6991
self.next_update = datetime.now() + timedelta(seconds=self.min_interval) - def _process_config(self): - """ - Fetches the configuration for this worker and stores the values internally. - :return: Nothing. - :rtype: None - """ - self.min_interval = int(self.config.get('min_interval', self.DEFAULT_MIN_INTERVAL)) - self.displayed_stats = self.config.get('stats', self.DEFAULT_DISPLAYED_STATS) - self.terminal = self.config.get('terminal', self.TERMINAL) def _get_stats_title(self, player_stats): """ Here you rewrite all the configs again. IMHO, this code should go to `def __init__` and instead of the constants as default we can have actual values of these constants. self.next_update = datetime.now() + timedelta(seconds=self.min_interval) def _get_stats_title(self, player_stats): """
codereview_python_data_6993
return get_archive_type(filename) != ArchiveType.UNKNOWN -def _normalize_filename(filename): - """Normalize file name from archive for directory traversal detection.""" - if filename.startswith('./'): - filename = filename[2:] - if os.path.altsep: - filename = filename.replace(os.path.altsep, os.path.sep) - filename = filename.encode('ascii', 'ignore') - filename = filename.rstrip(os.path.sep) - return filename - - def unpack(archive_path, output_directory, trusted=False, can we just use os.path.normpath ? ``` os.path.normpath(path) Normalize a pathname by collapsing redundant separators and up-level references so that A//B, A/B/, A/./B and A/foo/../B all become A/B. This string manipulation may change the meaning of a path that contains symbolic links. On Windows, it converts forward slashes to backward slashes. To normalize case, use normcase(). ``` return get_archive_type(filename) != ArchiveType.UNKNOWN def unpack(archive_path, output_directory, trusted=False,
codereview_python_data_6994
elif isinstance(result_files, dict): result_file = result_files[res_type] else: - assert NotImplementedError assert result_file.endswith('.json') coco_dets = coco.loadRes(result_file) May raise a TypeError here. elif isinstance(result_files, dict): result_file = result_files[res_type] else: + assert TypeError('result_files must be a str or dict') assert result_file.endswith('.json') coco_dets = coco.loadRes(result_file)
codereview_python_data_6995
""" Utility to remove a temporary directory during program exit. """ try: shutil.rmtree(dirname) - print(("Removed temporary directory: %s" % dirname)) except OSError: # if the temp dir was removed already by other means pass What is the second set of brackets for? """ Utility to remove a temporary directory during program exit. """ try: shutil.rmtree(dirname) + print("Removed temporary directory: %s" % dirname) except OSError: # if the temp dir was removed already by other means pass
codereview_python_data_6997
from mitmproxy import options from .. import tservers import pytest -from unittest import mock - - -class UrlError(Exception): - pass - - -def mock_log(message): - if "Invalid URL" in message: - raise UrlError(message) class TestFlowlist(tservers.MasterTest): It is probably easier if you directly check that `signals.status_message.send` was called with specific arguments, instead of using a side effect + exception wrapper. E.g., `Mock.assert_called_once_with` or similar. from mitmproxy import options from .. import tservers import pytest +from unittest import mock as Mock class TestFlowlist(tservers.MasterTest):
codereview_python_data_7000
def _use_berry(self, berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball): # Delay to simulate selecting berry - action_delay(self.config.action_wait_min, self.config.action_wait_max) new_catch_rate_by_ball = [] self.emit_event( 'pokemon_catch_rate', Selecting a berry doesn't make a request to the servers. It's handled in the app. When you want to select a berry in the app you open your inventory. I guess this trigger a call to update the inventory. Then you select a berry, but Niantic doesn't know about it. It knows only when you throw it on the pokemon. def _use_berry(self, berry_id, berry_count, encounter_id, catch_rate_by_ball, current_ball): # Delay to simulate selecting berry + action_delay(self.config.catchsim_berry_wait_min, self.config.catchsim_berry_wait_max) new_catch_rate_by_ball = [] self.emit_event( 'pokemon_catch_rate',
codereview_python_data_7003
.distinct() try: - similar_artist_df.take(1)[0] except IndexError: current_app.logger.error('Similar artists not generated.', exc_info=True) raise would prefer to do this in a seperate function .distinct() try: + _is_empty_dataframe(similar_artist_df) except IndexError: current_app.logger.error('Similar artists not generated.', exc_info=True) raise
codereview_python_data_7006
description='Port for webhdfs') user = luigi.Parameter(default='', description='Defaults to $USER envvar', config_path=dict(section='hdfs', name='user')) - client_type = luigi.Parameter(default='insecure', - description='Type of client to use. One of insecure, kerberos or token') - token = luigi.Parameter(default='', description='Hadoop delegation token, only used when client_type="token"') class WebHdfsClient(hdfs_abstract_client.HdfsFileSystem): Maybe use ChoiceParameter? description='Port for webhdfs') user = luigi.Parameter(default='', description='Defaults to $USER envvar', config_path=dict(section='hdfs', name='user')) + client_type = luigi.Parameter(default='insecure', description='Type of client to use. Can be insecure or kerberos') class WebHdfsClient(hdfs_abstract_client.HdfsFileSystem):
codereview_python_data_7014
import numpy as np import pytest import torch -import torch.nn as nn from mmdet import digit_version from mmdet.models.dense_heads import RetinaHead, YOLOV3Head -from .utils import convert_result_list, verify_model - -onnx_io = 'tmp.onnx' data_path = osp.join(osp.dirname(__file__), 'data') Could we move this class to `utils`? import numpy as np import pytest import torch from mmdet import digit_version from mmdet.models.dense_heads import RetinaHead, YOLOV3Head +from .utils import (WrapFunction, convert_result_list, ort_validate, + verify_model) data_path = osp.join(osp.dirname(__file__), 'data')
codereview_python_data_7017
continue if self.breakable_incubator: - if incubator.get('uses_remaining') is not None: if egg["km"] not in self.breakable_incubator: continue if self.infinite_incubator: - if incubator.get('uses_remaining') is None: if egg["km"] not in self.infinite_incubator: continue Is this check necessary? continue if self.breakable_incubator: + if incubator.get('uses_remaining') is not None: # test if the incubator is of type breakable if egg["km"] not in self.breakable_incubator: continue if self.infinite_incubator: + if incubator.get('uses_remaining') is None: # test if the incubator is of type infinite if egg["km"] not in self.infinite_incubator: continue
codereview_python_data_7018
def __init__(self, in_vm_artifacts_profile_json): if in_vm_artifacts_profile_json and not in_vm_artifacts_profile_json.isspace(): - self.__parse(in_vm_artifacts_profile_json) - - def __parse(self, json_str): - # trim null and whitespaces - trimmed = json_str.rstrip(' \t\r\n\0') - self.__dict__.update(json.loads(trimmed)) def is_extension_handlers_handling_on_hold(self): # hasattr() is not available in Python 2.6 might want to put this in textutil, could be handy elsewhere def __init__(self, in_vm_artifacts_profile_json): if in_vm_artifacts_profile_json and not in_vm_artifacts_profile_json.isspace(): + self.__dict__.update(parse_json(in_vm_artifacts_profile_json)) def is_extension_handlers_handling_on_hold(self): # hasattr() is not available in Python 2.6
codereview_python_data_7021
def remote_shutdown(self): udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - udp_sock.sendto(b"Shutdown", ("localhost", self.management_port)) self.log.debug("Shutdown command sent") time.sleep(10) - udp_sock.sendto(b"StopTestNow", ("localhost", self.management_port)) self.log.debug("StopTestNow command sent") @staticmethod After first command, check if JMeter has finished def remote_shutdown(self): udp_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + udp_sock.sendto(b"Shutdown", ("127.0.0.1", self.management_port)) self.log.debug("Shutdown command sent") time.sleep(10) + udp_sock.sendto(b"StopTestNow", ("127.0.0.1", self.management_port)) self.log.debug("StopTestNow command sent") @staticmethod
codereview_python_data_7026
else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) - seg_map = img_info['filename'].split(".")[0] + ".png" ann = dict( bboxes=gt_bboxes, Thank you for your feedback, you may also need to consider the scenario of `a.b.c.jpg`. else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) + seg_map = img_info['filename'].rsplit(".", 1)[0]+ ".png" ann = dict( bboxes=gt_bboxes,
codereview_python_data_7033
self.assertEqual(item['attr2'], 'value2') attributes = [{'AttributeName': 'id', 'AttributeType': STRING}] - userIdIdx = [ {'Create': { 'IndexName': 'id-index', 'KeySchema': [{ nitpick: could be changed from `camelCase` to `snake_case` (`user_id_idx`). self.assertEqual(item['attr2'], 'value2') attributes = [{'AttributeName': 'id', 'AttributeType': STRING}] + user_id_idx = [ {'Create': { 'IndexName': 'id-index', 'KeySchema': [{
codereview_python_data_7034
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4431-SEA 1645521628 3669625611</p> <hr> <p>Varnish cache server</p> </body> nit: We could remove lines 54-55 and change line 53 to: ``` event_bus = self._get_param('EventBusName') or DEFAULT_EVENT_BUS_NAME ``` Also, let's assume that `event_bus` is `None` - then line 60 (`EVENT_RULES[event_bus].append(name)`) would insert an item at dict index `None`, right? (which is probably not desired) <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4448-SEA 1645521628 2255518445</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_7037
row = result.fetchone() if row: users = {} - for user in row[1]: - users[user] = row[1][user][0] - return SimilarUsers(user_id=row[0], similar_users=users) return None not directly related to this change, but can we use dict access here to make it clearer what we're selecting? `row['similar_users']` row = result.fetchone() if row: users = {} + for user in row['similar_users']: + # first element of array is similarity, second is global_similarity + users[user] = row['similar_users'][user][0] + return SimilarUsers(user_id=row['user_id'], similar_users=users) return None
codereview_python_data_7039
"one hundred and twenty-three")) def test_number_too_large(self): - with self.assertRaisesWithMessage(ValueError) as error: say(1e12) - self.assertTrue(len(str(error.exception)) > 0) def test_number_negative(self): - with self.assertRaisesWithMessage(ValueError) as error: say(-1) - self.assertTrue(len(str(error.exception)) > 0) # Utility functions def setUp(self): Non-zero message length already checked by `assertRaisesWithMessage` "one hundred and twenty-three")) def test_number_too_large(self): + with self.assertRaisesWithMessage(ValueError): say(1e12) def test_number_negative(self): + with self.assertRaisesWithMessage(ValueError): say(-1) # Utility functions def setUp(self):
codereview_python_data_7041
_entry_db_names = _get_entry_dbs() if db not in _entry_db_names: raise ValueError( - "TogoWS entry fetch does not officially support " "database '%s'." % db ) if field: try: You can merge those two strings into one now. _entry_db_names = _get_entry_dbs() if db not in _entry_db_names: raise ValueError( + "TogoWS entry fetch does not officially support database '%s'." % db ) if field: try:
codereview_python_data_7048
self.was_ended = arborted try: self.trigger.release() - except (THREAD_EXCEPTION, AttributeError) as e: pass class SelectableSelector(object): Please remove ` as e` as you don't need it. self.was_ended = arborted try: self.trigger.release() + except (THREAD_EXCEPTION, AttributeError): pass class SelectableSelector(object):
codereview_python_data_7052
else: raise - def _foreach_controller(self, operation, message): """ Executes the given operation on all controllers that need to be tracked; outputs 'message' if the controller is not mounted. """ This method can be static. else: raise + @staticmethod + def _foreach_controller(operation, message): """ Executes the given operation on all controllers that need to be tracked; outputs 'message' if the controller is not mounted. """
codereview_python_data_7055
@cli.command() @click.option('--location', '-l', default=os.path.join(os.getcwd(), 'listenbrainz-export')) def import_dump(location): db.init_db_connection(config.SQLALCHEMY_DATABASE_URI) db_dump.import_postgres_dump(location) We need more error catching here -- there are a lot of things that could go wrong during the import and we need to handle them here. @cli.command() @click.option('--location', '-l', default=os.path.join(os.getcwd(), 'listenbrainz-export')) def import_dump(location): + """ Import a ListenBrainz PostgreSQL dump into the PostgreSQL database. + + Note: This method tries to import the private dump first, followed by the statistics + dump. However, in absence of a private dump, it imports sanitized versions of the + user table in the statistics dump in order to satisfy foreign key constraints. + + Args: + location (str): path to the directory which contains the private and the stats dump + """ db.init_db_connection(config.SQLALCHEMY_DATABASE_URI) db_dump.import_postgres_dump(location)
codereview_python_data_7058
except KeyError: continue stream = HLSStream.parse_variant_playlist(self.session, url) - self.session.new_http_session() streams.update(stream) return streams why not just `self.session.http.close()` or `http.close()` --- do you have an example url where the video will only work with this? or are this the geo-blocked videos except KeyError: continue stream = HLSStream.parse_variant_playlist(self.session, url) + # work around broken HTTP connection persistence by acquiring a new connection + http.close() streams.update(stream) return streams
codereview_python_data_7064
if self.wrap: for i in range(0, len(data), self.wrap): - self.handle.write(data[i: i + self.wrap] + "\n") else: self.handle.write(data + "\n") I think that change (and the one below) would be counter to black coding style (which TravisCI is probably about to complain about via the flake8 checks). See the CONTRIBUTING.rst file for details. if self.wrap: for i in range(0, len(data), self.wrap): + self.handle.write(data[i : i + self.wrap] + "\n") else: self.handle.write(data + "\n")
codereview_python_data_7070
} ) _rtmp_quality_lookup = { - "オリジナル画質": "original", - "original spec": "original", "低画質": "low", "low spec": "low" } # changes here must also be updated in test_plugin_showroom _quality_weights = { - "original": 720, "other": 360, "low": 160 } This is the only thing I can see where I imagine @beardypig will chime in and talk about the naming of the weights. } ) _rtmp_quality_lookup = { + "オリジナル画質": "high", + "original spec": "high", "低画質": "low", "low spec": "low" } # changes here must also be updated in test_plugin_showroom _quality_weights = { + "high": 720, "other": 360, "low": 160 }
codereview_python_data_7077
style_opts = line_properties + ['level'] - apply_ranges = False - _plot_methods = dict(single='Span') def get_data(self, element, ranges, style): I think this should probably be a boolean parameter. style_opts = line_properties + ['level'] + apply_ranges = param.Boolean(default=False, doc=""" + Whether to include the annotation in axis range calculations.""") + _plot_methods = dict(single='Span') def get_data(self, element, ranges, style):
codereview_python_data_7081
A list of transactions containing related to the asset. If no transaction exists for that asset it returns an empty list `[]` """ - cursor = self.connection.run( - r.table('bigchain', read_mode=self.read_mode) - .get_all(asset_id, index='asset_id') - .concat_map(lambda block: block['block']['transactions']) - .filter(lambda transaction: transaction['transaction']['asset']['id'] == asset_id)) return [Transaction.from_dict(tx) for tx in cursor] def get_asset_by_id(self, asset_id): `.` at end of line. A list of transactions containing related to the asset. If no transaction exists for that asset it returns an empty list `[]` """ + cursor = self.backend.get_transactions_by_asset_id(asset_id) return [Transaction.from_dict(tx) for tx in cursor] def get_asset_by_id(self, asset_id):
codereview_python_data_7082
self.active_flows.add(flow) def response(self, flow: http.HTTPFlow): if self.stream and flow.websocket is None: self.stream.add(flow) self.active_flows.discard(flow) Does this mean the HTTP 101 flow is not saved and lost? But it contains interesting data such as authentication tokens. Or why was that change needed? self.active_flows.add(flow) def response(self, flow: http.HTTPFlow): + # websocket flows will receive either websocket_end or websocket_error, + # we don't want to persist them here already if self.stream and flow.websocket is None: self.stream.add(flow) self.active_flows.discard(flow)
codereview_python_data_7095
template = config.val.url.searchengines[engine] url = qurl_from_user_input(template.format(urllib.parse.quote(term))) - if config.val.url.open_base_url: - try: - search_url = urllib.parse.urlparse(config.val.url.searchengines[term]) - url = QUrl('{}://{}'.format(search_url.scheme, search_url.netloc)) - except KeyError: - pass qtutils.ensure_valid(url) return url The try cach can be avoided here if you write something like: ``` if config.val.url.open_base_url and term in config.val.url.searchengines.keys(): search_url = urllib.parse.urlparse(config.val.url.searchengines[term]) url = QUrl('{}://{}'.format(search_url.scheme, search_url.netloc)) ``` template = config.val.url.searchengines[engine] url = qurl_from_user_input(template.format(urllib.parse.quote(term))) + if config.val.url.open_base_url and \ + term in config.val.url.searchengines.keys(): + search_url = urllib.parse.urlparse(config.val.url.searchengines[term]) + url = QUrl('{}://{}'.format(search_url.scheme, search_url.netloc)) qtutils.ensure_valid(url) return url
codereview_python_data_7097
"""Decorator to perform check that the model handle exists in the service. Args: - f(func): The model handle should exists when executing function f Returns: wrapper: Function wrapper to perform model handle existence check. To be consistent with the rest of our codebase, please add a space between the arg and the type, such as: `f (func)` Please fix this in the rest of this PR. Thanks. """Decorator to perform check that the model handle exists in the service. Args: + f (func): The model handle should exists when executing function f Returns: wrapper: Function wrapper to perform model handle existence check.
codereview_python_data_7102
``id`` property. metadata (dict): Metadata to be stored along with the Transaction. - version (int): Defines the version number of a Transaction. """ CREATE = 'CREATE' TRANSFER = 'TRANSFER' Why do we need to change this. Wouldn't be enough to update `version.py`? ``id`` property. metadata (dict): Metadata to be stored along with the Transaction. + version (string): Defines the version number of a Transaction. """ CREATE = 'CREATE' TRANSFER = 'TRANSFER'
codereview_python_data_7106
class IntervalDomain(dbops.Domain): def __init__(self) -> None: super().__init__( - name=('edgedb', 'interval_t'), base='interval', constraints=( dbops.DomainCheckConstraint( - domain_name=('edgedb', 'interval_t'), expr=r''' EXTRACT(months from VALUE) = 0 AND EXTRACT(years from VALUE) = 0 AND Let's call this `duration_t`, because it underpins `std::duration`. class IntervalDomain(dbops.Domain): def __init__(self) -> None: super().__init__( + name=('edgedb', 'duration_t'), base='interval', constraints=( dbops.DomainCheckConstraint( + domain_name=('edgedb', 'duration_t'), expr=r''' EXTRACT(months from VALUE) = 0 AND EXTRACT(years from VALUE) = 0 AND
codereview_python_data_7109
module = self.eth_modules.get('uniswap', None) if not module: return None - - if module == 'loading': - with gevent.Timeout(10): - while True: - module = self.eth_modules.get('uniswap', None) - if module == 'loading': - gevent.sleep(0.5) - else: - return module # type: ignore return module # type: ignore @property since you don't need an async init the extra logic from this function can go module = self.eth_modules.get('uniswap', None) if not module: return None + return module # type: ignore @property
codereview_python_data_7117
# try to revoke token with Google Auth API otherwise Google will consider the account # be still connected and will not send a refresh_token next time the user tries to # connect again. if it doesn't succeed proceed normally and just delete from our database - self._revoke_token(user["access_token"]) super(YoutubeService, self).remove_user(user_id) - def _revoke_token(self, access_token): """ Revoke the given access_token using Google OAuth Revoke endpoint. Args: access_token: the token to be revoked It might be useful to include a user id here, so at least we could go back through logs if we ever get a support request about this. # try to revoke token with Google Auth API otherwise Google will consider the account # be still connected and will not send a refresh_token next time the user tries to # connect again. if it doesn't succeed proceed normally and just delete from our database + self._revoke_token(user_id, user["access_token"]) super(YoutubeService, self).remove_user(user_id) + def _revoke_token(self, user_id: int, access_token: str): """ Revoke the given access_token using Google OAuth Revoke endpoint. Args: access_token: the token to be revoked
codereview_python_data_7120
trajectory is changed. See section on **Dynamic selections** below. [``True``] rdkit_kwargs : dict (optional) - Arguments passed to the RDKitConverter when using selection based on - SMARTS queries **selgroups : keyword arguments of str: AtomGroup (optional) when using the "group" keyword in selections, groups are defined by passing them as keyword arguments. See section on **preexisting :class:`converters.RDKitConverter` (or whatever sphinx highlighting looks better)? trajectory is changed. See section on **Dynamic selections** below. [``True``] rdkit_kwargs : dict (optional) + Arguments passed to the :class:`~MDAnalysis.converters.RDKitConverter` + when using selection based on SMARTS queries **selgroups : keyword arguments of str: AtomGroup (optional) when using the "group" keyword in selections, groups are defined by passing them as keyword arguments. See section on **preexisting
codereview_python_data_7126
label : string, optional If not None, the parsed nodes will be renamed according to node - attributes indicated by `label`. Default value: `'label'`. destringizer : callable, optional A destringizer that recovers values stored as strings in GML. If it The double backticks around the string literal `'label'` are important. In other words, this should remain ```'label'```. label : string, optional If not None, the parsed nodes will be renamed according to node + attributes indicated by `label`. Default value: 'label'. destringizer : callable, optional A destringizer that recovers values stored as strings in GML. If it
codereview_python_data_7130
edge_shuffled = edge_mapping.shape != (0,) use_bcast = op not in ['copy_lhs', 'copy_rhs'] and u_shp[1:] != e_shp[1:] # pass edge_mapping to tvm only when array packing will be used - use_idx = edge_shuffled and num_feat_partitions > 1 and not use_bcast and use_e f_input = [indptr, indices] key = (num_rows, num_cols, nnz, op, reduce_op, u_shp, e_shp, use_idx, \ num_feat_partitions, num_col_partitions, indice_type, feat_type, target) Used for indexing, should find a more elegant way. edge_shuffled = edge_mapping.shape != (0,) use_bcast = op not in ['copy_lhs', 'copy_rhs'] and u_shp[1:] != e_shp[1:] # pass edge_mapping to tvm only when array packing will be used + use_idx = num_feat_partitions > 1 and not use_bcast and use_e f_input = [indptr, indices] key = (num_rows, num_cols, nnz, op, reduce_op, u_shp, e_shp, use_idx, \ num_feat_partitions, num_col_partitions, indice_type, feat_type, target)
codereview_python_data_7133
Attaches plot refresh to all streams on the object. """ def append_refresh(dmap): - for stream in get_streams(dmap): stream._hidden_subscribers.append(plot.refresh) return obj.traverse(append_refresh, [DynamicMap]) Can't this be simplified to: ``` python if isinstance(obj, DynamicMap) and isinstance(obj.callback, Callable): return [(index, obj)] ``` leaving the rest of the function to handle `DynamicMaps` with `Callable` callbacks? Attaches plot refresh to all streams on the object. """ def append_refresh(dmap): + for stream in get_nested_streams(dmap): stream._hidden_subscribers.append(plot.refresh) return obj.traverse(append_refresh, [DynamicMap])
codereview_python_data_7135
def now(self): """Return the current time and date as a datetime.""" - from celery.utils.time import to_utc - from datetime import datetime - now_in_utc = to_utc(datetime.utcnow()) return now_in_utc.astimezone(self.timezone) why are we importing stuff here? def now(self): """Return the current time and date as a datetime.""" now_in_utc = to_utc(datetime.utcnow()) return now_in_utc.astimezone(self.timezone)
codereview_python_data_7142
MIN_SECONDS_ALLOWED_FOR_CELL_CHECK = 10 MIN_SECONDS_ALLOWED_FOR_REQUESTING_DATA = 5 MIN_BALLS_FOR_CATCHING = 10 - MAX_CACHE_LIST_SIZE = 500 def __init__(self, bot, config): super(Sniper, self).__init__(bot, config) You do realize people bot on raspberry devices, right? Increasing this this high might not be a good idea. MIN_SECONDS_ALLOWED_FOR_CELL_CHECK = 10 MIN_SECONDS_ALLOWED_FOR_REQUESTING_DATA = 5 MIN_BALLS_FOR_CATCHING = 10 + MAX_CACHE_LIST_SIZE = 300 def __init__(self, bot, config): super(Sniper, self).__init__(bot, config)
codereview_python_data_7143
sort = False join = kwargs.get("join", "outer") ignore_index = kwargs.get("ignore_index", False) - other_data = [o._data_obj for o in other] - new_data = self._data_obj._concat(axis, other_data, join, sort) if ignore_index: - new_data.index = pandas.RangeIndex( len(self.index) + sum(len(o.index) for o in other) ) - return self.__constructor__(new_data) # END Append/Concat/Join Can we change the default value of sort to `False` instead? sort = False join = kwargs.get("join", "outer") ignore_index = kwargs.get("ignore_index", False) + other_modin_frame = [o._modin_frame for o in other] + new_modin_frame = self._modin_frame._concat(axis, other_modin_frame, join, sort) if ignore_index: + new_modin_frame.index = pandas.RangeIndex( len(self.index) + sum(len(o.index) for o in other) ) + return self.__constructor__(new_modin_frame) # END Append/Concat/Join
codereview_python_data_7159
as the ``descriptor`` argument. """ def __init__(self, oauth_credentials=None, descriptor='', http_=None, - chunksize=10 * 1024 * 1024): self.chunksize = chunksize http_ = http_ or httplib2.Http() you should default this to CHUNKSIZE and set CHUNKSIZE to 10x1024x1024 as the ``descriptor`` argument. """ def __init__(self, oauth_credentials=None, descriptor='', http_=None, + chunksize=CHUNKSIZE): self.chunksize = chunksize http_ = http_ or httplib2.Http()
codereview_python_data_7166
class DelayTest(unittest_utils.ForsetiTestCase): """ Tests for the Delay Utility.""" - def mytest_delay(self): """test to verify that a delay is used""" delay_by = 9 param = 1 test functions have to start with test_ to get run. Please rename to test_delay. class DelayTest(unittest_utils.ForsetiTestCase): """ Tests for the Delay Utility.""" + def delay_test(self): """test to verify that a delay is used""" delay_by = 9 param = 1
codereview_python_data_7179
self.stage_type = stage_type self.scales = scales self.width = width - # delattr(self, 'conv2') delattr(self, self.norm2_name) def forward(self, x): May I ask why change this? self.stage_type = stage_type self.scales = scales self.width = width + delattr(self, 'conv2') delattr(self, self.norm2_name) def forward(self, x):
codereview_python_data_7181
feature.type = element.attrib.get("type", "") if "id" in element.attrib: feature.id = element.attrib["id"] - if "description" in element.attrib: - feature.qualifiers["description"] = element.attrib["description"] - if "evidence" in element.attrib: - feature.qualifiers["evidence"] = element.attrib["evidence"] - if "status" in element.attrib: - feature.qualifiers["status"] = element.attrib["status"] for feature_element in element: if feature_element.tag == NS + "location": position_elements = feature_element.findall(NS + "position") I'm looking at that and thinking a for loop would be more elegant - especially if there might be more extra fields later on? ```python for field in ["description", "evidence", "status"]: if field in element.attrib: feature.qualifiers[field] = element.attrib[field] ``` Or, if those are almost always present perhaps this: ```python for field in ["description", "evidence", "status"]: try: feature.qualifiers[field] = element.attrib[field] except KeyError: pass ``` That is longer though, so maybe not? feature.type = element.attrib.get("type", "") if "id" in element.attrib: feature.id = element.attrib["id"] for feature_element in element: if feature_element.tag == NS + "location": position_elements = feature_element.findall(NS + "position")
codereview_python_data_7182
raise Exception("Couldn't find location for table: {0}".format(str(self))) return location - def open(self, mode): - return NotImplementedError("open() is not supported for {}".format(self.__class__.__name__)) - class HiveTableTarget(HivePartitionTarget): """ - exists returns true if the table exists. """ def __init__(self, table, database='default', client=None): could this docstring be improved? raise Exception("Couldn't find location for table: {0}".format(str(self))) return location class HiveTableTarget(HivePartitionTarget): """ + Target representing non-partitioned table """ def __init__(self, table, database='default', client=None):
codereview_python_data_7187
demands.root_user = True def run_on_module(self): - module_specs = self.base.repo_module_dict.install(self.opts.module_nsvp) if module_specs: raise NoModuleException(", ".join(module_specs)) Are Lock and Unlock sub-commands in scope? demands.root_user = True def run_on_module(self): + module_specs = self.base.repo_module_dict.install(self.opts.module_nsvp, + self.base.conf.strict) if module_specs: raise NoModuleException(", ".join(module_specs))
codereview_python_data_7189
# limitations under the License. # import os from .cfg_parser import LuigiConfigParser from .toml_parser import LuigiTomlParser Am i correct that if you want to use a config parser other than `cfg`, you have to define the `LUIGI_CONFIG_PARSER` environmental variable? # limitations under the License. # import os +import os.path +import warnings from .cfg_parser import LuigiConfigParser from .toml_parser import LuigiTomlParser
codereview_python_data_7193
class ConflictingHeaderError(RequestException): """Mutually exclusive request headers set""" -class UnreachableCodeError(RequestException, RuntimeError): - """Unreachable code block reached""" # Warnings Heh, while I like this name I don't think it's a good one. It's too general. =) Let's instead define a new exception that is more like this: ``` python class InvalidBodyError(RequestException, ValueError): """An invalid request body was specified.""" ``` class ConflictingHeaderError(RequestException): """Mutually exclusive request headers set""" +class InvalidBodyError(RequestException, ValueError): + """An invalid request body was specified""" # Warnings
codereview_python_data_7200
from plenum.common.constants import STEWARD_STRING from plenum.common.exceptions import RequestNackedException from plenum.common.util import randomString -from plenum.test.helper import sdk_multi_sign_request_objects, sdk_json_to_request_object, sdk_send_signed_requests, \ - sdk_get_and_check_replies from plenum.test.pool_transactions.helper import prepare_nym_request -def test_send_same_txn_with_different_signatures_in_separate_batches( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee, sdk_wallet_client): # filling nym request and getting steward did nym_request, new_did = looper.loop.run_until_complete( The test name looks incorrect. from plenum.common.constants import STEWARD_STRING from plenum.common.exceptions import RequestNackedException from plenum.common.util import randomString +from plenum.test.helper import sdk_multi_sign_request_objects, \ + sdk_json_to_request_object, sdk_send_signed_requests, sdk_get_and_check_replies from plenum.test.pool_transactions.helper import prepare_nym_request +def test_txn_with_different_signature_and_idr( looper, txnPoolNodeSet, sdk_pool_handle, sdk_wallet_trustee, sdk_wallet_client): # filling nym request and getting steward did nym_request, new_did = looper.loop.run_until_complete(
codereview_python_data_7204
Notes ----- - The implementation is adapted from the algorithm by Sporns et al. [1]_ - which is inspired from the original work from Maslov and Sneppen(2002) [2]_ References ---------- These sentences have lost their periods. Notes ----- + The implementation is adapted from the algorithm by Sporns et al. [1]_. + which is inspired from the original work by Maslov and Sneppen(2002) [2]_. References ----------
codereview_python_data_7206
'Paths', ] for name, path in _path_info().items(): - lines += [ '{}: {}'.format(name, path) ] return '\n'.join(lines) nitpick: No spaces inside `[]` here 'Paths', ] for name, path in _path_info().items(): + lines += ['{}: {}'.format(name, path)] return '\n'.join(lines)
codereview_python_data_7207
raise configexc.ValidationError(value, str(e)) return except UnicodeEncodeError as e: - raise configexc.ValidationError(value, e) class AutoSearch(BaseType): Keeping this here is actually good - I just didn't think about this in the example I did give you. Good we have tests! :smile: raise configexc.ValidationError(value, str(e)) return except UnicodeEncodeError as e: + raise configexc.ValidationError(value, str(e)) class AutoSearch(BaseType):
codereview_python_data_7216
if rank == 0: torch.cuda.synchronize() elapsed = time.perf_counter() - start_time - batch_size = data['img'][0].size(0) prog_bar.update( completed=batch_size * world_size, elapsed_time=elapsed) We need to keep using to obtain the `batch_size` in case downstream repos' inputs do not have the key `img`. ```python batch_size = ( len(data['img_meta']._data) if 'img_meta' in data else data['img'][0].size(0)) ``` if rank == 0: torch.cuda.synchronize() elapsed = time.perf_counter() - start_time + batch_size = len(data['img_meta']._data) prog_bar.update( completed=batch_size * world_size, elapsed_time=elapsed)
codereview_python_data_7218
'type': self.cname, }) # Override directives that should not be inherited from user code. - from . import Options - directives = Options.get_conversion_utility_code_directives(env.directives) from .UtilityCode import CythonUtilityCode env.use_utility_code(CythonUtilityCode.load( cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", context=context, compiler_directives=directives)) Would it make sense to make the `CythonUtilityCode` directly responsible for this? 'type': self.cname, }) # Override directives that should not be inherited from user code. from .UtilityCode import CythonUtilityCode + directives = CythonUtilityCode.filter_directives(env.directives) env.use_utility_code(CythonUtilityCode.load( cls.replace('unordered_', '') + ".from_py", "CppConvert.pyx", context=context, compiler_directives=directives))
codereview_python_data_7220
def ping(self, **kwargs): worker_id = kwargs['worker'] worker = self._update_worker(worker_id) - return {"messages": worker.fetch_messages()} def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table: I just thought about it now (not related so much to this patch). But when this is transfered to the worker. Is it returning a python-structure or an actual JSON message? I suppose JSON would be ideal so theoretically one can communicate with the server using other languages. def ping(self, **kwargs): worker_id = kwargs['worker'] worker = self._update_worker(worker_id) + return {"rpc_messages": worker.fetch_rpc_messages()} def _upstream_status(self, task_id, upstream_status_table): if task_id in upstream_status_table:
codereview_python_data_7222
oss_fuzz_project.ccs = ccs oss_fuzz_project.put() else: - cpu_weight = ( - OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT - if language in MEMORY_SAFE_LANGUAGES else - OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT) data_types.OssFuzzProject( id=project, I would convert this to a normal `if-else` the one line version is not very readable when it's more than one line. oss_fuzz_project.ccs = ccs oss_fuzz_project.put() else: + if language in MEMORY_SAFE_LANGUAGES: + cpu_weight = OSS_FUZZ_MEMORY_SAFE_LANGUAGE_PROJECT_WEIGHT + else: + cpu_weight = OSS_FUZZ_DEFAULT_PROJECT_CPU_WEIGHT data_types.OssFuzzProject( id=project,
codereview_python_data_7228
from hypothesis.internal.floats import int_to_float LABEL_MASK = 2 ** 64 - 1 -_SEQUENCE_TYPES = (OrderedDict, Sequence, enum.EnumMeta) def calc_label_from_name(name): I'd delete this and just use it inline. from hypothesis.internal.floats import int_to_float LABEL_MASK = 2 ** 64 - 1 def calc_label_from_name(name):
codereview_python_data_7229
def get_issue_url(testcase): - """Return issue url for a testcase.""" issue_tracker = get_issue_tracker_for_testcase(testcase) if not issue_tracker: return None Maybe expand on description here that this is for display and account for group bug. def get_issue_url(testcase): + """Return issue url for a testcase. This is used when rendering a testcase, + details page, therefore it accounts for |group_bug_information| as well.""" issue_tracker = get_issue_tracker_for_testcase(testcase) if not issue_tracker: return None
codereview_python_data_7230
return imports @abstractmethod - def build_source_code(self, execution=None): pass @staticmethod Why this is changed? return imports @abstractmethod + def build_source_code(self): pass @staticmethod
codereview_python_data_7241
"strings." ) raise InvalidArgument(f"thing={thing!r} must be a type") # pragma: no cover - if types.is_forbidden_to_dispatch(thing): # pragma: no cover # Code like `st.from_type(TypeAlias)` does not make sense. raise InvalidArgument(f"thing={thing!r} does not make sense as a strategy") # Now that we know `thing` is a type, the first step is to check for an This is going to be a bit awkward, but I think I'd prefer to write a separate clause for each inline, so that we can have a detailed and specific error message. I can imagine wanting abstraction or helper functions when we're handling 4+ types like this, but it feels premature now. "strings." ) raise InvalidArgument(f"thing={thing!r} must be a type") # pragma: no cover + if thing in types.TypeAliasTypes: # pragma: no cover # Code like `st.from_type(TypeAlias)` does not make sense. raise InvalidArgument(f"thing={thing!r} does not make sense as a strategy") # Now that we know `thing` is a type, the first step is to check for an
codereview_python_data_7246
'runnning locally.') flags.DEFINE_integer('max_admin_api_calls_per_day', 150000, 'Admin SDK queries per day.') -flags.DEFINE_integer('max_results_admin_api', 500, -+ 'maxResult param for the Admin SDK list() method') class AdminDirectoryClient(_base_client.BaseClient): nit: align the leftmost quotes 'runnning locally.') flags.DEFINE_integer('max_admin_api_calls_per_day', 150000, 'Admin SDK queries per day.') +flags.DEFINE_string('max_results_admin_api', 500, + 'maxResult param for the Admin SDK list() method') class AdminDirectoryClient(_base_client.BaseClient):
codereview_python_data_7251
Parameters ---------- - applier : Function object to register `funcs` - *funcs : list of functions to register in `applier` Returns ------- ```suggestion applier: Callable Function object to register `funcs` *funcs: list List of functions to register in `applier` ``` Parameters ---------- + applier: Callable + Function object to register `funcs` + *funcs: list + List of functions to register in `applier` Returns -------
codereview_python_data_7254
from __future__ import print_function import glob import os import sys Please revert changes to this file from __future__ import print_function import glob +import locale import os import sys
codereview_python_data_7255
if not flow: raise exceptions.CommandError("No flow selected.") if part in ("response-headers", "response-body", "set-cookies"): - flow.response = http.HTTPResponse.make() if part == "cookies": self.master.switch_view("edit_focus_cookies") elif part == "form": Pretty bad idea to always overwrite the response if not flow: raise exceptions.CommandError("No flow selected.") if part in ("response-headers", "response-body", "set-cookies"): + if flow.response is None: + flow.response = http.HTTPResponse.make() if part == "cookies": self.master.switch_view("edit_focus_cookies") elif part == "form":
codereview_python_data_7256
def parse(file, format, **kwargs): - """Parse a file and return each of the trees it contains. If a file only contains one tree, this still returns an iterable object that contains one element. It would be good to have something in the one line indicating this is an iterator, maybe "Parse a file iteratively, and yield each of the trees it contains."? def parse(file, format, **kwargs): + """Parse iteratively a file and return each of the trees it contains. If a file only contains one tree, this still returns an iterable object that contains one element.
codereview_python_data_7272
alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max) if self.bot.config.walk_max > 0: - mode = "walking" - if "mode" in point: - mode = point["mode"] step_walker = walker_factory(self.walker, self.bot, lat, Do this instead: `mode = point.get("mode", "walking")` alt = uniform(self.bot.config.alt_min, self.bot.config.alt_max) if self.bot.config.walk_max > 0: + mode = point.get("mode", "walking") step_walker = walker_factory(self.walker, self.bot, lat,
codereview_python_data_7274
Installed = "Installed" Enabled = "Enabled" FailedUpgrade = "FailedUpgrade" - DidNotRun = "DidNotRun" class GoalStateStatus(object): I do not like the idea of creating a new state for this case. It seems like an overkill for what we're trying to achieve since creating and maintaining a new state requires dealing with the filesystem which comes with its own issues. Additionally, I don't think it fits the needs completely either. For eg, you're setting the HandlerState for Single config extension to `DidNotRun` but not for MultiConfig extensions. This mismatch makes the code more prone to errors. Installed = "Installed" Enabled = "Enabled" FailedUpgrade = "FailedUpgrade" class GoalStateStatus(object):
codereview_python_data_7285
self.nodes.add(key) self.nodes_len = len(self.nodes) for i in range(self.replica_count): - replica_key = "%s:%d" % (key, i) if self.hash_type == 'fnv1a_ch': replica_key = "%d-%s" % (i, key[1]) position = self.compute_ring_position(replica_key) entry = (position, key) bisect.insort(self.ring, entry) I would prefer an explicit `if else` here unless this is considered more pythonic. self.nodes.add(key) self.nodes_len = len(self.nodes) for i in range(self.replica_count): if self.hash_type == 'fnv1a_ch': replica_key = "%d-%s" % (i, key[1]) + else: + replica_key = "%s:%d" % (key, i) position = self.compute_ring_position(replica_key) entry = (position, key) bisect.insort(self.ring, entry)
codereview_python_data_7293
from platforms.fuchsia.util.device import Device from platforms.fuchsia.util.fuzzer import Fuzzer from platforms.fuchsia.util.host import Host from system import environment from system import minijail from system import new_process from system import shell -from zipfile import ZipFile # Maximum length of a random chosen length for `-max_len`. MAX_VALUE_FOR_MAX_LENGTH = 10000 Nit: instead of this, "from system import archive" and use archive.unpack instead. It's fine to pass trusted=True given that the file is checked in. from platforms.fuchsia.util.device import Device from platforms.fuchsia.util.fuzzer import Fuzzer from platforms.fuchsia.util.host import Host +from system import archive from system import environment from system import minijail from system import new_process from system import shell # Maximum length of a random chosen length for `-max_len`. MAX_VALUE_FOR_MAX_LENGTH = 10000
codereview_python_data_7303
return self.__it.__length_hint__() def __repr__(self): if self.__done: return repr(self.__consumed) else: - return '[{}]'.format(', '.join( [repr(x) for x in self.__consumed] + ['...'])) def __iter__(self): Please use `{0}` here, as position-less format is not supported by all 2.7 versions return self.__it.__length_hint__() def __repr__(self): + # override list.__repr__ to avoid consuming the generator if self.__done: return repr(self.__consumed) else: + return '[{0}]'.format(', '.join( [repr(x) for x in self.__consumed] + ['...'])) def __iter__(self):
codereview_python_data_7306
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4427-SEA 1645523378 794088412</p> <hr> <p>Varnish cache server</p> </body> Instead of `Config`, use `Service` <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4460-SEA 1645523378 1850283307</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_7312
int The number of nodes """ - assert self._handle is not None return _CAPI_DGLGraphNumVertices(self._handle) def number_of_edges(self): The assertion is not needed. Or you could think that all APIs require such assertion which means we should assume the _handle is never None after initialization. int The number of nodes """ return _CAPI_DGLGraphNumVertices(self._handle) def number_of_edges(self):
codereview_python_data_7333
raise errors.InvalidConstraintDefinitionError( f'Constraint {constraint.get_displayname(schema)} on ' f'{subject.get_displayname(schema)} is not supported ' - f'because it would depend on multiple tables', context=source_context, ) elif ref_tables: ```suggestion f'because it would depend on multiple objects', ``` raise errors.InvalidConstraintDefinitionError( f'Constraint {constraint.get_displayname(schema)} on ' f'{subject.get_displayname(schema)} is not supported ' + f'because it would depend on multiple objects', context=source_context, ) elif ref_tables:
codereview_python_data_7360
return data @ma.post_load - def build_planner(self, data, **kwargs): return None if kwargs.get('partial') is True else Operation(**data) do we want None or just an empty list? return data @ma.post_load + def build_operation(self, data, **kwargs): return None if kwargs.get('partial') is True else Operation(**data)
codereview_python_data_7363
results['pad_shape'] = img.shape results['flip'] = False results['scale_factor'] = 1.0 - cnum = 1 if len(img.shape) < 3 else img.shape[2] - results['img_norm_cfg'] = [[0.0] * cnum, [1.0] * cnum, False] return results def __repr__(self): Use a more specific variable name like `num_channels`. results['pad_shape'] = img.shape results['flip'] = False results['scale_factor'] = 1.0 + num_channels = 1 if len(img.shape) < 3 else img.shape[2] + results['img_norm_cfg'] = [[0.0] * num_channels, [1.0] * num_channels, + False] return results def __repr__(self):
codereview_python_data_7374
sudo('dpkg --configure -a') sudo('apt-get -y -f install') sudo('apt-get -y install build-essential wget bzip2 ca-certificates \ - libglib2.0-0 libxext6 libsm6 libxrender1 libssl-dev \ - git gcc g++ python3-dev libboost-python-dev libffi-dev \ software-properties-common python-software-properties \ python3-setuptools ipython3 sysstat s3cmd') sudo('easy_install3 pip') remove `libssl-dev` and `libffi-dev` here and run a quick test: note that you would need to pip install from bigchaindb master for including #212 sudo('dpkg --configure -a') sudo('apt-get -y -f install') sudo('apt-get -y install build-essential wget bzip2 ca-certificates \ + libglib2.0-0 libxext6 libsm6 libxrender1 \ + git gcc g++ python3-dev libboost-python-dev \ software-properties-common python-software-properties \ python3-setuptools ipython3 sysstat s3cmd') sudo('easy_install3 pip')
codereview_python_data_7383
def assertAlmostEqualList(self, list1, list2, **kwargs): self.assertEqual(len(list1), len(list2)) - for i in range(len(list1)): - self.assertAlmostEqual(list1[i], list2[i], **kwargs) def test_nucleotides(self): filename = "GFF/multi.fna" I would use zip(list1, list2) here. def assertAlmostEqualList(self, list1, list2, **kwargs): self.assertEqual(len(list1), len(list2)) + for (v1, v2) in zip(list1, list2): + self.assertAlmostEqual(v1, v2, **kwargs) def test_nucleotides(self): filename = "GFF/multi.fna"