id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_10108
for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: - query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key](value) # Query parameters override other parameters connparams.update(query) This line is longer than 80 characters, thus Travis CI checks are failing. for key, value in query.items(): if key in redis.connection.URL_QUERY_ARGUMENT_PARSERS: + query[key] = redis.connection.URL_QUERY_ARGUMENT_PARSERS[key]( + value + ) # Query parameters override other parameters connparams.update(query)
codereview_python_data_10111
return 'text/html', html -@add_handler('spawn_output') def qute_spawn_output(_url): - """Handler for qute://spawn_output.""" html = jinja.render('pre.html', title='spawn output', content=spawn_output) return 'text/html', html This *might* work, but having underlines in URLs looks a bit weird I'd prefer `spawn-output` here. return 'text/html', html +@add_handler('spawn-output') def qute_spawn_output(_url): + """Handler for qute://spawn-output.""" html = jinja.render('pre.html', title='spawn output', content=spawn_output) return 'text/html', html
codereview_python_data_10114
iterator_stmt = setgen.new_set_from_set( iterator_view, preserve_scope_ns=True, ctx=scopectx) - ptr_target = inference.infer_type(iterator_stmt, ctx.env) - anytype = ptr_target.find_any(ctx.env.schema) if anytype is not None: raise errors.QueryError( 'FOR statement has iterator of indeterminate type', Would `ptr_target.is_polymorphic()` work here? Speaking of `edb/schema/types.py`, `Type.contains_any()` implementation is off w.r.t. its return type. iterator_stmt = setgen.new_set_from_set( iterator_view, preserve_scope_ns=True, ctx=scopectx) + iterator_type = inference.infer_type(iterator_stmt, ctx.env) + anytype = iterator_type.find_any(ctx.env.schema) if anytype is not None: raise errors.QueryError( 'FOR statement has iterator of indeterminate type',
codereview_python_data_10117
return_inverse=True ) - if new_categories[0] == _sortable_sentinel: # f_to_use return _sortable_sentinel for locations that should be # missing values in our output. Since np.unique returns the uniques # in sorted order, and since _sortable_sentinel sorts before any since this is a sentinel could we use `is`? return_inverse=True ) + if new_categories[0] is _sortable_sentinel: # f_to_use return _sortable_sentinel for locations that should be # missing values in our output. Since np.unique returns the uniques # in sorted order, and since _sortable_sentinel sorts before any
codereview_python_data_10118
shard_outputs = [] for pipeline in pipelines: pipe_outputs = pipeline.run() - if device == 'gpu': - shard_outputs.append( - tuple(result.as_cpu().as_array() for result in pipe_outputs)) - else: - shard_outputs.append( - tuple(result.as_array() for result in pipe_outputs)) - results.append(tuple(shard_outputs)) return results ```suggestion shard_outputs.append(tuple(test_utils.to_array(result) for result in pipe_outputs)) ``` shard_outputs = [] for pipeline in pipelines: pipe_outputs = pipeline.run() + shard_outputs.append(tuple(to_array(result) for result in pipe_outputs)) results.append(tuple(shard_outputs)) return results
codereview_python_data_10119
arn_to_lambda[arn].cwd = lambda_cwd -def add_event_source(function_name, source_arn, enabled, batch_size=10): mapping = { 'UUID': str(uuid.uuid4()), 'StateTransitionReason': 'User action', Instead of encoding the default value in the function signature, I'd rather handle the default case in the function body (we can define `DEFAULT_BATCH_SIZE` towards top of the file): ``` DEFAULT_BATCH_SIZE = 10 ... def add_event_source(function_name, source_arn, enabled, batch_size=None): batch_size = batch_size or DEFAULT_BATCH_SIZE ... ``` arn_to_lambda[arn].cwd = lambda_cwd +def add_event_source(function_name, source_arn, enabled, batch_size=None): + batch_size = batch_size or DEFAULT_BATCH_SIZE + mapping = { 'UUID': str(uuid.uuid4()), 'StateTransitionReason': 'User action',
codereview_python_data_10121
corpus_directories.insert(0, merge_directory) if use_minijail: - target = '/' + MERGE_DIRECTORY_NAME - minijail_chroot.add_binding( - minijail.ChrootBinding(merge_directory, target, True)) merge_result = runner.merge( corpus_directories, Please use the helper bind_corpus_dirs(minijail_chroot, [merge_directory]) we use this in similar way for e.g. new_testcase_mutations_directory. corpus_directories.insert(0, merge_directory) if use_minijail: + bind_corpus_dirs(minijail_chroot, [merge_directory]) merge_result = runner.merge( corpus_directories,
codereview_python_data_10122
if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for darknet') - assert not (init_cfg and pretrained), \ - 'init_cfg and pretrained cannot be setting at the same time' - self.depth = depth self.out_indices = out_indices self.frozen_stages = frozen_stages May put this part to the lines in 132-147. if depth not in self.arch_settings: raise KeyError(f'invalid depth {depth} for darknet') self.depth = depth self.out_indices = out_indices self.frozen_stages = frozen_stages
codereview_python_data_10123
raise TypeError('`amount` must be an int') if amount < 1: raise AmountError('`amount` must be greater than 0') - if amount > 9 * 10 ** 18: - raise AmountError('`amount` must be <= 9000000000000000000') self.fulfillment = fulfillment self.amount = amount Can we make this a const somewhere so that we don't need to recalculate this value everytime? raise TypeError('`amount` must be an int') if amount < 1: raise AmountError('`amount` must be greater than 0') + if amount > self.MAX_AMOUNT: + raise AmountError('`amount` must be <= %s' % self.MAX_AMOUNT) self.fulfillment = fulfillment self.amount = amount
codereview_python_data_10126
'sleep_max', 'watchdog', 'pending_contact'])) - @aiohttp_apispec.response_schema(AgentSchema(only=[]), description="Returns JSON response with updated Agent fields") async def update_agent(self, request: web.Request): agent = await self.update_object(request) return web.json_response(agent.display) This `only=[]` can be removed from the AgentSchema here 'sleep_max', 'watchdog', 'pending_contact'])) + @aiohttp_apispec.response_schema(AgentSchema(), description="Returns JSON response with updated Agent fields") async def update_agent(self, request: web.Request): agent = await self.update_object(request) return web.json_response(agent.display)
codereview_python_data_10129
def fallback_chord_unlock(self, header_result, body, countdown=1, **kwargs): kwargs['result'] = [r.as_tuple() for r in header_result] - body_type = body.get('type', None) queue = body.options.get('queue', getattr(body_type, 'queue', None)) priority = body.options.get('priority', getattr(body_type, 'priority', 0)) self.app.tasks['celery.chord_unlock'].apply_async( this change need to be unit test covered def fallback_chord_unlock(self, header_result, body, countdown=1, **kwargs): kwargs['result'] = [r.as_tuple() for r in header_result] + try: + body_type = getattr(body, 'type', None) + except NotRegistered: + body_type = None + queue = body.options.get('queue', getattr(body_type, 'queue', None)) priority = body.options.get('priority', getattr(body_type, 'priority', 0)) self.app.tasks['celery.chord_unlock'].apply_async(
codereview_python_data_10132
LEVEL = 1 # warn about all errors level 1 or higher -def _write_file_encode(file,line): try: file.write(line) except UnicodeEncodeError: - file.write(line.encode('ascii','replace')) def message(position, message, level=1): Space after comma, please. Also in the calls below. LEVEL = 1 # warn about all errors level 1 or higher +def _write_file_encode(file, line): try: file.write(line) except UnicodeEncodeError: + file.write(line.encode('ascii', 'replace')) def message(position, message, level=1):
codereview_python_data_10133
self.dummy_cycle(m, 1, b"") assert len(m.view) == i - @mock.patch('mitmproxy.tools.console.signals.add_log', side_effect=mock_add_log) - def test_run_script_once(self, test_func): m = self.mkmaster() f = tflow.tflow(resp=True) - with mitmproxy.test.tutils.raises(ScriptError): - m.run_script_once("nonexistent", [f]) def test_intercept(self): """regression test for https://github.com/mitmproxy/mitmproxy/issues/1605""" Have you considered just running `m.run_script_once("nonexistent", [f])` and then checking `m.logbuffer` ? self.dummy_cycle(m, 1, b"") assert len(m.view) == i + def test_run_script_once(self): m = self.mkmaster() f = tflow.tflow(resp=True) + m.run_script_once("nonexistent", [f]) + assert "Input error" in str(m.logbuffer[0]) def test_intercept(self): """regression test for https://github.com/mitmproxy/mitmproxy/issues/1605"""
codereview_python_data_10135
"""Open main startpage in current tab.""" self.openurl(config.val.url.start_pages[0]) - def _selection_callback(self, s): - try: - self._run_userscript(s) - except cmdexc.CommandError as e: - message.error(str(e)) - - def _run_userscript(self, selection): """Run a userscript given as argument. Args: This seems like a quite generic name - why not something like `_run_userscript_wrapper` or so? """Open main startpage in current tab.""" self.openurl(config.val.url.start_pages[0]) + def _run_userscript(self, selection, cmd, args, verbose): """Run a userscript given as argument. Args:
codereview_python_data_10141
@LOSSES.register_module() -class AELoss(nn.Module): """Associative Embedding Loss. More details can be found in Using the full name `AssociativeEmbeddingLoss` seems better. @LOSSES.register_module() +class AssociativeEmbeddingLoss(nn.Module): """Associative Embedding Loss. More details can be found in
codereview_python_data_10144
"ownedassetsresource", ), ) - assert_proper_response_with_result(response) - data = response.json() - assert data['message'] == '' - assert set(data['result']) == {'ETH', 'BTC', 'EUR', A_RDN.identifier} def test_ignored_assets_modification(rotkehlchen_api_server_with_exchanges): Hmm I think you misunderstood the reason for this change. `assert_proper_response_with_result` is exactly like `assert_proper_response()` but also gets the response's result. So this PR should reduce overall test lines. Not increase them. What `assert_proper_response_with_result()` does internally is to: 1. get the response.json() 2. Check `response_data['message'] == ''` -> so that the result is proper 3. Return data['result'] So for example for this particular instance what you should have done is replace lines 64-67 with: ```python result = assert_proper_response_with_result(response) assert(set(result)) == {'ETH', 'BTC', 'EUR', A_RDN.identifier} ``` "ownedassetsresource", ), ) + result = assert_proper_response_with_result(response) + assert set(result) == {'ETH', 'BTC', 'EUR', A_RDN.identifier} def test_ignored_assets_modification(rotkehlchen_api_server_with_exchanges):
codereview_python_data_10147
ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) else: ious = bbox_overlaps( - gts[i], img_proposal[:prop_num, :4], extra_length=extra_length) all_ious.append(ious) all_ious = np.array(all_ious) recalls = _recalls(all_ious, proposal_nums, iou_thrs) `bbox_overlaps` has `no extra_length` kwarg ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) else: ious = bbox_overlaps( + gts[i], + img_proposal[:prop_num, :4], + use_legacy_coordinate=use_legacy_coordinate) all_ious.append(ious) all_ious = np.array(all_ious) recalls = _recalls(all_ious, proposal_nums, iou_thrs)
codereview_python_data_10152
If min_value is not None then all generated values are no less than min_value. If max_value is not None then all generated values are no greater than max_value. min_value and max_value may be anything accepted - by the :python:`fractions.Fraction` constructor. If max_denominator is not None then the denominator of any generated values is no greater than max_denominator. Note that max_denominator must I think the tilde here is correct (it makes it show up as Fraction rather than fractions.Fraction. If min_value is not None then all generated values are no less than min_value. If max_value is not None then all generated values are no greater than max_value. min_value and max_value may be anything accepted + by the :python:`~fractions.Fraction` constructor. If max_denominator is not None then the denominator of any generated values is no greater than max_denominator. Note that max_denominator must
codereview_python_data_10153
(target_h + eps)) # view(..., -1) does not work for empty tensor loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], - dim=-1).view(loss_dx.size(0), -1) loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta) We can use flatten rather than view to avoid that (target_h + eps)) # view(..., -1) does not work for empty tensor loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], + dim=-1).flatten(1) loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta)
codereview_python_data_10166
self.psk_secret = psk self.psk_mode = psk_mode if handle_session_ticket is None: - handle_session_ticket = (session_ticket_file is not None) if handle_session_ticket: session_ticket_file = session_ticket_file or get_temp_file() self.handle_session_ticket = handle_session_ticket The parenthesis are not needed. self.psk_secret = psk self.psk_mode = psk_mode if handle_session_ticket is None: + handle_session_ticket = session_ticket_file is not None if handle_session_ticket: session_ticket_file = session_ticket_file or get_temp_file() self.handle_session_ticket = handle_session_ticket
codereview_python_data_10173
@pytest.mark.parametrize("dtype", _DTYPES) -@pytest.mark.parametrize("shape", [(3, 3,), (3, 3, 1), (3, 3, 3), (4, 3, 3, 3)]) def test_equalize_dtype_shape(dtype, shape): image = np.ones(shape=shape, dtype=dtype) equalized = color_ops.equalize(tf.constant(image)).numpy() If you're using a `random`, use a fixed seed value. @pytest.mark.parametrize("dtype", _DTYPES) +@pytest.mark.parametrize("shape", [(4, 4,), (4, 4, 1), (4, 4, 3), (5, 4, 4, 3)]) def test_equalize_dtype_shape(dtype, shape): image = np.ones(shape=shape, dtype=dtype) equalized = color_ops.equalize(tf.constant(image)).numpy()
codereview_python_data_10175
class OptimScheduler(LossRecorder): - ''' - Learning rate Scheduler for training involving multiple phases. - ''' def __init__(self, layer_opt, phases, nb_batches, stop_div = False): self.phases, self.nb_batches, self.stop_div = phases, nb_batches, stop_div Could you put all the one-liner docs on a single line? (i.e. put the quotes on the same line) class OptimScheduler(LossRecorder): + '''Learning rate Scheduler for training involving multiple phases.''' def __init__(self, layer_opt, phases, nb_batches, stop_div = False): self.phases, self.nb_batches, self.stop_div = phases, nb_batches, stop_div
codereview_python_data_10179
# Step 2. edge softmax to compute attention scores graph.edata['sa'] = edge_softmax(graph, graph.edata['a']) - # Step 3. Broadcast softmax value to each edge, and then attention is done - graph.apply_edges(fn.u_mul_e('ft', 'sa', 'attn')) - - # Step 4. Aggregate attention to dst,user nodes, so formula 7 is done - graph.update_all(fn.copy_e('attn', 'm'), fn.sum('m', 'agg_u')) # output results to the destination nodes rst = graph.dstdata['agg_u'] Please merge this step with the following step by: ```python graph.update_all(fn.u_mul_e('ft', 'sa', 'attn'), fn.sum('attn', 'agg_u')) ``` to remove the overhead of materializing ft * sa on edges. # Step 2. edge softmax to compute attention scores graph.edata['sa'] = edge_softmax(graph, graph.edata['a']) + # Step 3. Broadcast softmax value to each edge, and aggregate dst node + graph.update_all(fn.u_mul_e('ft', 'sa', 'attn'), fn.sum('attn', 'agg_u')) # output results to the destination nodes rst = graph.dstdata['agg_u']
codereview_python_data_10184
if win_id and count: raise TypeError("Argument marked as both count/win_id!") if zero_count and not count: - raise TypeError("Zero_count Argument cannot exist without count!") self.win_id = win_id self.count = count self.zero_count = zero_count nitpick: Please lower-case `Zero_count` (as it's a literal argument name) and `Argument` here. if win_id and count: raise TypeError("Argument marked as both count/win_id!") if zero_count and not count: + raise TypeError("zero_count argument cannot exist without count!") self.win_id = win_id self.count = count self.zero_count = zero_count
codereview_python_data_10188
if tx_dict['operation'] in [Transaction.CREATE, Transaction.GENESIS]: # TODO: Maybe replace this call to a call to get_asset_by_id asset = list(bigchain.get_assets([tx_dict['id']]))[0] - asset.pop('id') tx_dict.update({'asset': asset}) return cls.from_dict(tx_dict) ```python del asset['id'] ``` would be more precise given that we do not need the popped item :smile: if tx_dict['operation'] in [Transaction.CREATE, Transaction.GENESIS]: # TODO: Maybe replace this call to a call to get_asset_by_id asset = list(bigchain.get_assets([tx_dict['id']]))[0] + del asset['id'] tx_dict.update({'asset': asset}) return cls.from_dict(tx_dict)
codereview_python_data_10189
from mmdet.utils import Registry -IOUCALCULATOR = Registry('iou_calculator') We may rename to `IOU_CALCULATOR`. from mmdet.utils import Registry +IOU_CALCULATOR = Registry('iou_calculator')
codereview_python_data_10194
""" Sets the status message of the task to message, i.e., invokes _status_message_callback if it is a callable. This propagates the message down to the scheduler. """ if hasattr(self._status_message_callback, "__call__"): self._status_message_callback(message) In the docs here, can you add a reference the `_Task.set_status_message:` section? I think there are a few example in this file you can copy paste. """ Sets the status message of the task to message, i.e., invokes _status_message_callback if it is a callable. This propagates the message down to the scheduler. + + See :ref:`Task.set_status_message` """ if hasattr(self._status_message_callback, "__call__"): self._status_message_callback(message)
codereview_python_data_10197
return self.text else: try: - return self.translations.filter(locale=locale).first().text - except AttributeError: return None def __str__(self): Can there be several translations for a single term? If so, what makes you sure that the `first` is the one to use? If not, shouldn't it be better to use something like `get` or another function that ensures there's a single result? return self.text else: try: + return self.translations.get(locale=locale).text + except (AttributeError, TermTranslation.DoesNotExist): return None def __str__(self):
codereview_python_data_10207
async def start(self): loop = asyncio.get_event_loop() tcp = self.get_config('app.contact.tcp') - loop.create_task(asyncio.start_server(self.tcp_handler.accept, '127.0.0.1', tcp.split(':')[1], loop=loop)) loop.create_task(self.operation_loop()) async def operation_loop(self): actually... we can't make this change.. we need to serve the port on all interfaces so external agents can connect.. can you find a different approach to work? async def start(self): loop = asyncio.get_event_loop() tcp = self.get_config('app.contact.tcp') + loop.create_task(asyncio.start_server(self.tcp_handler.accept, '0.0.0.0', tcp.split(':')[1], loop=loop)) loop.create_task(self.operation_loop()) async def operation_loop(self):
codereview_python_data_10211
for include in self.parse_dependencies(filename)[1]: include_path = join_path(os.path.dirname(filename), include) if not path_exists(include_path): - include_path = self.context.find_include_file(include, (FileSourceDescriptor(filename),)) if include_path: if '.' + os.path.sep in include_path: include_path = os.path.normpath(include_path) Hmm I'd rather not create a fake code position tuple here. I'll see what I can do about it. for include in self.parse_dependencies(filename)[1]: include_path = join_path(os.path.dirname(filename), include) if not path_exists(include_path): + include_path = self.context.find_include_file(include, source_file_path = filename) if include_path: if '.' + os.path.sep in include_path: include_path = os.path.normpath(include_path)
codereview_python_data_10213
if not isinstance(self._client_stat, ClientStatistic): raise RuntimeError("Bad Statistic obj") random.seed() - self.file_name = check_fs(is_dir=False, fs_name=file_name) if file_name is not None else None # Copied from Plenum def random_string(self, sz: int) -> str: Where do we stop the loop? Is it intended that there is `while True` here? if not isinstance(self._client_stat, ClientStatistic): raise RuntimeError("Bad Statistic obj") random.seed() + self._data_file = None + if file_name is not None: + self._data_file = open(check_fs(is_dir=False, fs_name=file_name), "rt") # Copied from Plenum def random_string(self, sz: int) -> str:
codereview_python_data_10215
Parameters ---------- row_labels : list-like, slice or label - The indices for the rows to extract. col_labels : list-like, slice or label - The indices for the columns to extract. Returns ------- docstring seems to be wrong now Parameters ---------- row_labels : list-like, slice or label + The row labels for the rows to extract. col_labels : list-like, slice or label + The column labels for the columns to extract. Returns -------
codereview_python_data_10223
'strcmp', 'strcpy', 'strlen', - 'tcmalloc', - 'tc_malloc', ] IGNORE_CONTAINS_IF_SYMBOLIZED = [ 'libc.so', instead of these 2, better to use '/tcmalloc/', similar to jemalloc above. see file paths section. 'strcmp', 'strcpy', 'strlen', ] IGNORE_CONTAINS_IF_SYMBOLIZED = [ 'libc.so',
codereview_python_data_10225
else: res = http.get(self.url) - status = _status_re.search(res.text).group(1) - if status != 'true': self.logger.info("Stream currently unavailable.") return This will raise `AttributeError: 'NoneType' object has no attribute 'group'` if the page does not contain a stream. You could do something like: ```python status_m = _status_re.search(res.text) if not status_m: return if status_m.group(1) != 'true': ... ``` else: res = http.get(self.url) + status = _status_re.search(res.text) + if not status: + return + + if status.group(1) != 'true': self.logger.info("Stream currently unavailable.") return
codereview_python_data_10227
if not os.path.exists(FLAGS.output_path): os.makedirs(output_path) output_path = os.path.abspath(output_path) - _upload_csv_to_gcs(output_path, now_utc, csv_file.name) # Send summary email. if FLAGS.email_recipient is not None: resource_counts = kwargs.get('resource_counts', {}) _send_email(csv_file.name, now_utc, all_violations, resource_counts) -def _upload_csv_to_gcs(output_path, now_utc, csv_name): """Upload CSV to Cloud Storage. Args: nit: I think this method should be better named as "_upload_csv()", since this method seems to do both. By renaming, the code block here will also make a little more sense, i.e. not passing a non-gcs output_path to _upload_csv_to_gcs() if not os.path.exists(FLAGS.output_path): os.makedirs(output_path) output_path = os.path.abspath(output_path) + _upload_csv(output_path, now_utc, csv_file.name) # Send summary email. if FLAGS.email_recipient is not None: resource_counts = kwargs.get('resource_counts', {}) _send_email(csv_file.name, now_utc, all_violations, resource_counts) +def _upload_csv(output_path, now_utc, csv_name): """Upload CSV to Cloud Storage. Args:
codereview_python_data_10229
tokens[-1] = fn._to_snake_case(tokens[-1]) return '.'.join(tokens) def name_sort(op_name): _, module, name = ops._process_op_name(op_name) return '.'.join(module + [name.upper()]) Maybe add `to_fn_module`, I see that `module_name.replace('.ops', '.fn')` appears several times in this file. tokens[-1] = fn._to_snake_case(tokens[-1]) return '.'.join(tokens) +def to_fn_module(module_name): + return module_name.replace('.ops', '.fn') + def name_sort(op_name): _, module, name = ops._process_op_name(op_name) return '.'.join(module + [name.upper()])
codereview_python_data_10230
import unittest import luigi from luigi.contrib.k8s_job import KubernetesJobTask try: from pykube.config import KubeConfig from pykube.http import HTTPClient Remove last 2 lines please. :) import unittest import luigi +import logging from luigi.contrib.k8s_job import KubernetesJobTask +logger = logging.getLogger('luigi-interface') + try: from pykube.config import KubeConfig from pykube.http import HTTPClient
codereview_python_data_10251
if action_config.get("type"): return self._parse_dict_action(action_config) block = self._get_execution_block(action_config) - if block and len(block) == 1: name, param = (block[0], action_config.get(block[0])) else: name, param = next(iteritems(action_config)) as _get_execution_block always returns list, only second part of condition (len(block)..) is necessary if action_config.get("type"): return self._parse_dict_action(action_config) block = self._get_execution_block(action_config) + if len(block) == 1: name, param = (block[0], action_config.get(block[0])) else: name, param = next(iteritems(action_config))
codereview_python_data_10254
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd"> <metadata> <id>LightGBM</id> - <version>{version, datetime.datetime.now().year}</version> <authors>Guolin Ke</authors> <owners>Guolin Ke</owners> <licenseUrl>https://github.com/microsoft/LightGBM/blob/master/LICENSE</licenseUrl> <projectUrl>https://github.com/microsoft/LightGBM</projectUrl> <requireLicenseAcceptance>false</requireLicenseAcceptance> <description>A fast, distributed, high performance gradient boosting framework</description> - <copyright>Copyright %d @ Microsoft</copyright> <tags>machine-learning data-mining distributed native boosting gbdt</tags> <dependencies> </dependencies> </metadata> This fix is not quite correct. The `%s` should be replaced with `version` and the `%d` on line 39 should be replaced with `datetime.datetime.now().year`. <package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd"> <metadata> <id>LightGBM</id> + <version>{version}</version> <authors>Guolin Ke</authors> <owners>Guolin Ke</owners> <licenseUrl>https://github.com/microsoft/LightGBM/blob/master/LICENSE</licenseUrl> <projectUrl>https://github.com/microsoft/LightGBM</projectUrl> <requireLicenseAcceptance>false</requireLicenseAcceptance> <description>A fast, distributed, high performance gradient boosting framework</description> + <copyright>Copyright {datetime.datetime.now().year}@ Microsoft</copyright> <tags>machine-learning data-mining distributed native boosting gbdt</tags> <dependencies> </dependencies> </metadata>
codereview_python_data_10255
) # normalize to get smoothed representation - degs = graph.in_degrees().float().clamp(min=1) - norm = th.pow(degs, -0.5) - norm = norm.to(feat.device).unsqueeze(1) feat = feat * norm graph.ndata["h"] = feat Should edge weight also participate in symmetric adjacency normalization? ) # normalize to get smoothed representation + if edge_weight is None: + degs = graph.in_degrees().float().clamp(min=1) + norm = th.pow(degs, -0.5) + norm = norm.to(feat.device).unsqueeze(1) + else: + norm = EdgeWeightNorm('both')(graph, edge_weight) feat = feat * norm graph.ndata["h"] = feat
codereview_python_data_10259
g : DistGraph The distributed graph. nodes : tensor or dict - Node ids to sample neighbors from. fanout : int The number of sampled neighbors for each node. edge_dir : str, optional Add docstring for dict g : DistGraph The distributed graph. nodes : tensor or dict + Node ids to sample neighbors from. If it's a dict, it should contain only + one key-value pair to make this API consistent with dgl.sampling.sample_neighbors. fanout : int The number of sampled neighbors for each node. edge_dir : str, optional
codereview_python_data_10267
def is_tax_known(self): return self.method.is_tax_known @property def charge_excl_tax(self): raise NotImplemented() `self.discount` property will fail, because it requires price with the tax. Couple lines above: ``` @property def discount(self): return self.get_discount()['discount'] def get_discount(self): # Return a 'discount' dictionary in the same form as that used by the # OfferApplications class parent_charge = self.method.charge_incl_tax # ... ``` `is_discounted` should be refactored to ``` @property def is_discounted(self): # We check to see if the discount is non-zero. It is possible to have # zero shipping already in which case this the offer does not lead to # any further discount. return self.discount > 0 ``` def is_tax_known(self): return self.method.is_tax_known + @property + def effective_discount(self): + """ + The discount value. + """ + raise NotImplemented() + @property def charge_excl_tax(self): raise NotImplemented()
codereview_python_data_10273
""" self.rule_book = BigqueryRuleBook(self._load_rule_definitions()) - # TODO: The naming is confusing and needs to be fixed in all scanners. def find_violations(self, parent_project, bq_acl, force_rebuild=False): """Determine whether Big Query datasets violate rules. Please remove this TODO, since they will not apply anymore after you are done. :) Can you please remove this everywhere else in this PR? """ self.rule_book = BigqueryRuleBook(self._load_rule_definitions()) def find_violations(self, parent_project, bq_acl, force_rebuild=False): """Determine whether Big Query datasets violate rules.
codereview_python_data_10277
return platforms.Platforms(config_instance) -@pytest.fixture -def _instance_with_platform_name(config_instance): - return platforms.Platforms(config_instance, platform_name="instance-1") - - def test_instances_property(_instance): x = [ {"groups": ["foo", "bar"], "name": "instance-1", "children": ["child1"]}, `instance-1` is hardcoded in the test body and the detached fixture. It is best to keep such coupled things in the same place. I think that having an extra fixture is not warranted in this case. But if it was, this should've definitely gone to "parameterize" where it could be evident/visible that these values are connected. return platforms.Platforms(config_instance) def test_instances_property(_instance): x = [ {"groups": ["foo", "bar"], "name": "instance-1", "children": ["child1"]},
codereview_python_data_10279
[2.], [3.]]) """ - assert g.batch_size == 1, \ - 'reverse is not supported for a BatchedDGLGraph object' g_reversed = DGLGraph(multigraph=g.is_multigraph) g_reversed.add_nodes(g.number_of_nodes()) g_edges = g.all_edges(order='eid') Can support instead? just set the same `batch_num_nodes` and `batch_num_edges`. [2.], [3.]]) """ g_reversed = DGLGraph(multigraph=g.is_multigraph) g_reversed.add_nodes(g.number_of_nodes()) g_edges = g.all_edges(order='eid')
codereview_python_data_10283
Returns ------- - numpy.array - array of the centroid frame indices """ raise NotImplementedError("Class {0} doesn't implement __call__()" .format(self.__class__.__name__)) This method doesn't return anything, it raises a NotImplementedError Returns ------- + This method doesn't return anything, it raises a NotImplementedError + """ raise NotImplementedError("Class {0} doesn't implement __call__()" .format(self.__class__.__name__))
codereview_python_data_10285
"Couldn't compute ratio for dividend sid=2, ex_date=1990-10-19," " amount=0.100", )) self.assertTrue(self.log_handler.has_warning( 'Dividend ratio <= 0 for dividend sid=1, ex_date=1990-10-17,' ' amount=0.510', For paranoia's sake, can we also check that we handle the case where this is well beyond the end of the pricing data (e.g., 10 days or something)? Since we're fiddling with indices in the code under test here, I could imagine a world where this worked for dates one past the end of the array, but failed if we were well after the end. "Couldn't compute ratio for dividend sid=2, ex_date=1990-10-19," " amount=0.100", )) + self.assertTrue(self.log_handler.has_warning( + "Couldn't compute ratio for dividend sid=2, ex_date=1990-11-01," + " amount=0.100", + )) self.assertTrue(self.log_handler.has_warning( 'Dividend ratio <= 0 for dividend sid=1, ex_date=1990-10-17,' ' amount=0.510',
codereview_python_data_10296
"The mitmproxy certificate authority has expired!\n" "Please delete all CA-related files in your ~/.mitmproxy folder.\n" "The CA will be regenerated automatically after restarting mitmproxy.\n" - "Then make sure all your clients have the new CA installed.", ) for certspec in ctx.options.certs: Let's point them to the docs to use mitm.it et al. "The mitmproxy certificate authority has expired!\n" "Please delete all CA-related files in your ~/.mitmproxy folder.\n" "The CA will be regenerated automatically after restarting mitmproxy.\n" + "See https://docs.mitmproxy.org/stable/concepts-certificates/ for additional help.", ) for certspec in ctx.options.certs:
codereview_python_data_10301
disposition='attachment', content_id=None): """Create a SendGrid attachment. - Email connector attachments file content must be base64 encoded. Args: file_location (str): The path of the file. Revert to `SendGrid` since in this class, that is correct. disposition='attachment', content_id=None): """Create a SendGrid attachment. + SendGrid attachments file content must be base64 encoded. Args: file_location (str): The path of the file.
codereview_python_data_10307
) def _resolve_hooks(self, hooks): - return DelegatingHooks(list(hooks) + self._default_hooks) we talked in person, and decided to put the defaults first. ) def _resolve_hooks(self, hooks): + if hooks is None: + hooks = [] + return DelegatingHooks(self._default_hooks + hooks)
codereview_python_data_10308
def test_config_py_arg_source(self, commands, config_py_arg, config_stub): assert config_stub.val.content.javascript.enabled - config_stub.val.search.ignore_case = 'always' config_py_arg.write_text('c.content.javascript.enabled = False\n', encoding='utf-8') commands.config_source() Is this needed? def test_config_py_arg_source(self, commands, config_py_arg, config_stub): assert config_stub.val.content.javascript.enabled config_py_arg.write_text('c.content.javascript.enabled = False\n', encoding='utf-8') commands.config_source()
codereview_python_data_10311
def _community(G, u, community): """Get the community of the given node.""" - if community not in G.node[u]: raise nx.NetworkXAlgorithmError('No community information') - return G.node[u][community] ``` python node_u = G.node[u] try: return node_u[community] except KeyError: raise nx.NetworkXAlgorithmError('No community information', u) ``` This avoids punishing the common case where community information is present. def _community(G, u, community): """Get the community of the given node.""" + node_u = G.node[u] + try: + return node_u[community] + except KeyError: raise nx.NetworkXAlgorithmError('No community information')
codereview_python_data_10319
asset.sid, dt, column ) - if result == 0 or np.isnan(result): - if column == "volume": return 0 - - if not ffill: - return np.nan # we are looking for price, and didn't find one. have to go hunting. last_traded_dt = \ I'd recommend restructuring so that volume and OHLC handling are not comingled. With the implementation above when `volume` is non-zero `np.isnan(result)` is also called. That extra call would be removed with: ``` if column == "volume": if result == 0: return 0 elif np.isnan(result) and not ffill: return np.nan ``` asset.sid, dt, column ) + if column == "volume": + if result == 0: return 0 + elif not ffill and np.isnan(result): + return np.nan # we are looking for price, and didn't find one. have to go hunting. last_traded_dt = \
codereview_python_data_10321
def union(G, H, rename=(None, None)): """Return the union of graphs G and H. - Graphs G and H must be disjoint, otherwise an exception is raised. Parameters ---------- This is a good change -- but it might need a deprecation warning until v3.0 because it is a change in the API. If we decide that is the better way forward, the parameter should stay for now. The code should then check if `name is not None` and if so, raise a deprecation warning. The function should then be listed in the `doc/developer/deprecations.rst` file, and an entry in `networkx/conftest.py` I believe... def union(G, H, rename=(None, None)): """Return the union of graphs G and H. + Graphs G and H must be disjoint after the renaming takes place, + otherwise an exception is raised. Parameters ----------
codereview_python_data_10325
out[in_size-1, in_size-1, c] = c return [out] - pipe = Pipeline(1, 3, 0) input = fn.external_source(source=get_data, device=device) rotated = fn.warp_affine(input, matrix=[-1, 0, in_size, 0, -1, in_size], I would set prefetch_queue_depth to 1 to reduce the number of output buffers. out[in_size-1, in_size-1, c] = c return [out] + pipe = Pipeline(1, 3, 0, prefetch_queue_depth=1) input = fn.external_source(source=get_data, device=device) rotated = fn.warp_affine(input, matrix=[-1, 0, in_size, 0, -1, in_size],
codereview_python_data_10328
If the config has a special prefix for emails then this function adds this prefix. """ - prefix = email().prefix - if prefix is not None and prefix != '': return "{} {}".format(email().prefix, subject) else: return subject Thanks. Though these two lines can be simplified to simply: `if email().prefix:` This is awesome. As we also get rid of the `is not None` antipattern in python! If the config has a special prefix for emails then this function adds this prefix. """ + if email().prefix: return "{} {}".format(email().prefix, subject) else: return subject
codereview_python_data_10337
content = fds.read() target_lines = [ - "var_loc_keys=self.loc_mng.get_locator([{'name':'btn1',}],30.0)self.driver.find_element" "(var_loc_keys[0],var_loc_keys[1]).click()", - "var_loc_keys=self.loc_mng.get_locator([{'id':'Id_123',}],30.0)self.driver.find_element" "(var_loc_keys[0],var_loc_keys[1]).clear()", "self.driver.find_element(var_loc_keys[0],var_loc_keys[1]).send_keys('London')" ] For me would be great don't repeat timeout sending in every get_locator call. Looks like it should be sent into LocatorsManager init. And possibly it should me non mandatory, something like that: `def __init__ (self, locators, timeout=60):` content = fds.read() target_lines = [ + "var_loc_keys=self.loc_mng.get_locator([{'name':'btn1',}])self.driver.find_element" "(var_loc_keys[0],var_loc_keys[1]).click()", + "var_loc_keys=self.loc_mng.get_locator([{'id':'Id_123',}])self.driver.find_element" "(var_loc_keys[0],var_loc_keys[1]).clear()", "self.driver.find_element(var_loc_keys[0],var_loc_keys[1]).send_keys('London')" ]
codereview_python_data_10341
The platform that the code is running on. By default this will be the string 'zipline'. This can allow algorithms to know if they are running on the Quantopian platform instead. Returns ------- What does the star mean here? The platform that the code is running on. By default this will be the string 'zipline'. This can allow algorithms to know if they are running on the Quantopian platform instead. + * : dict[str -> any] + Returns all of the fields in a dictionary. Returns -------
codereview_python_data_10343
class PersistenceContext: def __init__(self, state_dir: str = None, lock: rwlock.RWLockable = None): # state dir (within DATA_DIR) of currently processed API in local file system - self.state_dir: str = state_dir # read-write lock for concurrency control of incoming requests - self.lock: rwlock.RWLockable = lock class StateSerializer(abc.ABC): in general it makes more sense to me to add type hints to member variables in the class declaration. that way you can get the type hints through the `__annotations__` magic var (which can be helpful for docs and other reflective mechanisms): ```python class Foo: bar: str def __init__(self): self.baz: int = 0 if __name__ == '__main__': print(Foo.__annotations__) ``` will output `{'bar': <class 'str'>}` class PersistenceContext: + state_dir: str + lock: rwlock.RWLockable + def __init__(self, state_dir: str = None, lock: rwlock.RWLockable = None): # state dir (within DATA_DIR) of currently processed API in local file system + self.state_dir = state_dir # read-write lock for concurrency control of incoming requests + self.lock = lock class StateSerializer(abc.ABC):
codereview_python_data_10344
except Exception: pass -try: - from collections import OrderedDict # For Py 2.7+ -except ImportError: - from ordereddict import OrderedDict # Works only on 2.6 - """ Add alias for python2 and python3 libs and functions. """ can we also move this import to the if sys.version check below? except Exception: pass """ Add alias for python2 and python3 libs and functions. """
codereview_python_data_10345
def set_key_columns(self, frame): if frame.ncols == 0: return - nkeys = random.randint(1, frame.ncols) keys = random.sample(range(0, frame.ncols), nkeys) names = [frame.names[i] for i in keys] Randomly chosen columns may or may not be valid for setting a key. However, at this point it is definitely knowable which case it is. Thus, the python output should be either ``` DT.key = %r ``` or ``` with check_raises(ValueError('Cannot set a key: the values are not unique')): DT.key = %r ``` (this context manager can be added trivially). def set_key_columns(self, frame): if frame.ncols == 0: return + nkeys = min(int(random.expovariate(1.0)) + 1, frame.ncols) keys = random.sample(range(0, frame.ncols), nkeys) names = [frame.names[i] for i in keys]
codereview_python_data_10352
v_k_0 = 0 for n in G: weighted_cost += d[n] * (arborescence.degree(n) - 2) - if arborescence.degree(n) - 2 == 0: - v_k_0 += 1 if weighted_cost < min_k_d_weight: min_k_d_weight = weighted_cost min_k_d = arborescence if v_k_0 == len(G): min_k_d_weight = weighted_cost min_k_d = arborescence This code sets the `min_k_d` to the current arborescence whenever all the nodes in the arborescence have degree 2. But that might overwrite a result with a lower weight arborescence... Am I missing something? v_k_0 = 0 for n in G: weighted_cost += d[n] * (arborescence.degree(n) - 2) + v_k_0 += 1 if arborescence.degree(n) - 2 == 0 else 0 if weighted_cost < min_k_d_weight: min_k_d_weight = weighted_cost min_k_d = arborescence + # If d is the zero vector, then all minimum arborescences + # will have the same weighted cost of 0, but if there is a + # cycle among them, that needs to be the one we pick if v_k_0 == len(G): min_k_d_weight = weighted_cost min_k_d = arborescence
codereview_python_data_10360
self.clear_keychain() self.mode = usertypes.KeyMode.normal self.left.emit(mode, self.mode, self._win_id) - if mode in [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]: self.enter(self._prev_mode, reason='restore mode before {}'.format(mode.name)) This has the same bug your earlier PR had initially: It enters modes like the hint mode without having the necessary preparation. Also, I really don't think it's worth the complexity: You can see for yourself how easy it is to introduce bugs, and I'm almost sure there are more corner cases when saving modes in different places. I also can't really see many cases where this would be useful (if I'm getting a quit confirmation, I wanted to quit anyways, so I don't care what mode I was in before). self.clear_keychain() self.mode = usertypes.KeyMode.normal self.left.emit(mode, self.mode, self._win_id) + if mode in PROMPT_MODES: self.enter(self._prev_mode, reason='restore mode before {}'.format(mode.name))
codereview_python_data_10366
from google.cloud.security.notifier import notifier from google.cloud.security.common.data_access import csv_writer -from google.cloud.security.scanner.audit import fw_rules_engine -from google.cloud.security.common.gcp_type import resource_util from google.cloud.security.common.data_access import firewall_rule_dao from google.cloud.security.scanner.scanners import base_scanner LOGGER = log_util.get_logger(__name__) pylint might complain that this isn't alphasorted? from google.cloud.security.notifier import notifier from google.cloud.security.common.data_access import csv_writer from google.cloud.security.common.data_access import firewall_rule_dao +from google.cloud.security.common.gcp_type import resource_util +from google.cloud.security.scanner.audit import fw_rules_engine from google.cloud.security.scanner.scanners import base_scanner LOGGER = log_util.get_logger(__name__)
codereview_python_data_10368
logs_file.close() except: print "Unknown error while writing logs file!" colorHex = { 'red': '91m', Don't do this blind exception swallow. Let exceptions be raised if any error happens on logging to file, please. logs_file.close() except: print "Unknown error while writing logs file!" + raise colorHex = { 'red': '91m',
codereview_python_data_10370
def validate_transaction_schema(tx): - """ Validate a transaction dict """ - _validate_schema(TX_SCHEMA, tx) if tx['operation'] == 'TRANSFER': _validate_schema(TX_SCHEMA_TRANSFER, tx) else: This would be a good place to explain why the 3 different schemas def validate_transaction_schema(tx): + """ + Validate a transaction dict. + + TX_SCHEMA_COMMON contains properties that are common to all types of + transaction. TX_SCHEMA_[TRANSFER|CREATE] add additional constraints on top. + """ + _validate_schema(TX_SCHEMA_COMMON, tx) if tx['operation'] == 'TRANSFER': _validate_schema(TX_SCHEMA_TRANSFER, tx) else:
codereview_python_data_10373
@raises(RuntimeError) -def test_python_operator_error(): pipeline = Pipeline(1, 1, 0, 0) with pipeline: output = fn.python_function(function=lambda: np.zeros((3, 3, 3))) it's not clear to me what is the expected error. Maybe the name of the test case should say it @raises(RuntimeError) +def test_python_operator_not_allowed_in_tf_dataset_error(): pipeline = Pipeline(1, 1, 0, 0) with pipeline: output = fn.python_function(function=lambda: np.zeros((3, 3, 3)))
codereview_python_data_10380
@given(st.data()) def test_constructor_is_more_important(data): - """Constructor types should take presence over all other annotations.""" data.draw(st.builds(AnnotatedConstructor)) Oops, I missed this one in review :sweat_smile: @given(st.data()) def test_constructor_is_more_important(data): + """Constructor types should take precedence over all other annotations.""" data.draw(st.builds(AnnotatedConstructor))
codereview_python_data_10389
fitted = mda.Universe(PSF, outfile) # ensure default file exists - with mda.Writer(os.path.join(tmpdir, "rmsfit_align_test.dcd"), n_atoms=fitted.atoms.n_atoms) as w: w.write(fitted.atoms) not calling `str()` is messing up the py2.7 tests fitted = mda.Universe(PSF, outfile) # ensure default file exists + with mda.Writer(str(tmpdir.join("rmsfit_align_test.dcd")), n_atoms=fitted.atoms.n_atoms) as w: w.write(fitted.atoms)
codereview_python_data_10393
auth_action: AbstractAuthAction=None): is_role_accepted = self.is_role_accepted(request, auth_constraint) if is_role_accepted is None: - return False, "role is not found" if not is_role_accepted: return False, "role is not accepted" if not self.is_sig_count_accepted(request, auth_constraint): The method can return None if the role is None. But this can be a valid role, if auth constraint has `*` as a role. So, `is_role_accepted` can return two possible errors: - did doesn't exist on the ledger (if there is a KeyError in `get_role`) - role X can not do the operation (if role doesn't match the auth constraint, regardless of whether the role is None or not) auth_action: AbstractAuthAction=None): is_role_accepted = self.is_role_accepted(request, auth_constraint) if is_role_accepted is None: + return False, "sender's DID {} is not found in the Ledger".format(request.identifier) if not is_role_accepted: return False, "role is not accepted" if not self.is_sig_count_accepted(request, auth_constraint):
codereview_python_data_10394
def _make_csv_file(filenames): def _csv_file_maker( - filename=TEST_CSV_FILENAME, row_size=NROWS, force=True, delimiter=",", Does this occur when files with the same names are created in parallel in different processes? def _make_csv_file(filenames): def _csv_file_maker( + filename, row_size=NROWS, force=True, delimiter=",",
codereview_python_data_10396
self.connector = email_factory.EmailFactory( self.notification_config).get_connector() except Exception: - LOGGER.exception( - 'Error occurred to instantiate connector.') raise InvalidInputError(self.notifier_config) def _make_attachment_csv(self): Move this to the previous line. self.connector = email_factory.EmailFactory( self.notification_config).get_connector() except Exception: + LOGGER.exception('Error occurred to instantiate connector.') raise InvalidInputError(self.notifier_config) def _make_attachment_csv(self):
codereview_python_data_10407
class MoveToFort(BaseTask): def should_run(self): - if not self.bot.has_space_for_loot(): logger.log("Not moving to any forts as there aren't enough space. You might want to change your config to recycle more items if this message appears consistently.", 'yellow') - return (self.bot.has_space_for_loot()) or self.bot.softban def work(self): if not self.should_run(): I think you can just return False after this line in both workers and you'll have the behavior we are looking for. class MoveToFort(BaseTask): def should_run(self): + has_space_for_loot = self.bot.has_space_for_loot() + if not has_space_for_loot: logger.log("Not moving to any forts as there aren't enough space. You might want to change your config to recycle more items if this message appears consistently.", 'yellow') + return has_space_for_loot or self.bot.softban def work(self): if not self.should_run():
codereview_python_data_10410
dead_letter_source_queues = [] for k, v in queues.items(): for i, j in v.items(): - if(i == 'RedrivePolicy'): f = json.loads(v[i]) queue_url_split = queue_url.split('/') if(queue_url_split[len(queue_url_split) - 1] in f['deadLetterTargetArn']): nitpick: we can remove the braces here (same in line 190): ``` if i == 'RedrivePolicy': ... ``` dead_letter_source_queues = [] for k, v in queues.items(): for i, j in v.items(): + if i == 'RedrivePolicy': f = json.loads(v[i]) queue_url_split = queue_url.split('/') if(queue_url_split[len(queue_url_split) - 1] in f['deadLetterTargetArn']):
codereview_python_data_10426
def UniprotIterator(handle, alphabet=Alphabet.ProteinAlphabet(), return_raw_comments=False): - """Parse UniProt XML as SeqRecord objects. parses an XML entry at a time from any UniProt XML file returns a SeqRecord for each iteration How about "Iterate over UniProt XML as SeqRecord objects."? def UniprotIterator(handle, alphabet=Alphabet.ProteinAlphabet(), return_raw_comments=False): + """Iterate over UniProt XML as SeqRecord objects. parses an XML entry at a time from any UniProt XML file returns a SeqRecord for each iteration
codereview_python_data_10434
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4457-SEA 1645542722 1213049562</p> <hr> <p>Varnish cache server</p> </body> Have you tried to see how many seconds a delete may take? Because right now after 25 seconds, this will throw an error and then require us to intervene and see if things got deleted ok. Not sure how to improve this though... <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4448-SEA 1645542722 2410604392</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_10441
def check_if_installed(self): self.log.debug("Trying Gatling...") try: - out, err = self.call([self.tool_path, '--help'], env={"JAVA_OPTS": "-Dtaurus.dummy=1"}) self.log.debug("Gatling check output: %s", out) except CALL_PROBLEMS as exc: self.log.info("Gatling check failed: %s", exc) Is this prop still needed? def check_if_installed(self): self.log.debug("Trying Gatling...") try: + out, err = self.call([self.tool_path, '--help']) self.log.debug("Gatling check output: %s", out) except CALL_PROBLEMS as exc: self.log.info("Gatling check failed: %s", exc)
codereview_python_data_10443
# Get the projects for which we will retrieve the buckets. try: - raw_buckets = self.dao.get_raw_buckets( - 'buckets', self.cycle_timestamp) except data_access_errors.MySQLError as e: raise inventory_errors.LoadDataPipelineError(e) can we replace the hardcoded 'buckets' string with a variable for code style consistency? Also, does it make sense to have the resource name as an argument, maybe in can be a static string in get_raw_buckets? # Get the projects for which we will retrieve the buckets. try: + raw_buckets = self.dao.get_raw_buckets(self.cycle_timestamp) except data_access_errors.MySQLError as e: raise inventory_errors.LoadDataPipelineError(e)
codereview_python_data_10445
""" self.coco = COCO(ann_file) - self.cat_ids = [] - - # make cat_ids consistent with the order of class_name - for class_name in self.CLASSES: - self.cat_ids.append(self.coco.get_cat_ids(cat_names=class_name)[0]) - self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] Since we already wrapped coco api in mmdet. Maybe we could add an argument to get some thing like `self.coco.get_cat_ids(sorted=True)`? """ self.coco = COCO(ann_file) + self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = []
codereview_python_data_10447
try: entry_args = [v() if isinstance(v, BeatLazyFunc) else v for v in (entry.args or [])] - entry_kwargs = {k: v() if isinstance(v, BeatLazyFunc) else v for k, v in (entry.kwargs.items() or {})} if task: return task.apply_async(entry_args, entry_kwargs, producer=producer, How can this be right? `kwargs` is `None` so it has no `items()`. You should check if `kwargs` is `None` and if it is call `items()` on an empty dictionary. ```suggestion entry_kwargs = {k: v() if isinstance(v, BeatLazyFunc) else v for k, v in (entry.kwargs or {}).items()} ``` try: entry_args = [v() if isinstance(v, BeatLazyFunc) else v for v in (entry.args or [])] + entry_kwargs = {k: v() if isinstance(v, BeatLazyFunc) else v for k, v in (entry.kwargs or {}).items()} if task: return task.apply_async(entry_args, entry_kwargs, producer=producer,
codereview_python_data_10457
We use __ here to avoid accidentally overriding it in subclasses. """ - if self.hasSelectedText(): - # Let __on_selection_changed handle it - return - if new < self._promptlen: self.setCursorPosition(self._promptlen) Why not just replace this with `self.cursorForward(self.hasSelectedText(), self._promptlen - new)`? We use __ here to avoid accidentally overriding it in subclasses. """ if new < self._promptlen: self.setCursorPosition(self._promptlen)
codereview_python_data_10459
return scenarios def convert_path(self, swagger_path): with open(swagger_path) as swagger_fd: return self.convert(swagger_fd) You ate the friendly error message, it will fail with ugly "IOError" instead. return scenarios def convert_path(self, swagger_path): + if not os.path.exists(swagger_path): + raise ValueError("Swagger file %s doesn't exist" % swagger_path) with open(swagger_path) as swagger_fd: return self.convert(swagger_fd)
codereview_python_data_10470
import asyncio from marshmallow.schema import SchemaMeta from typing import Any is there a way we can pre-process `data` to look up adversaries, planners, and other nested schema objects by ID and then load the appropriate data? import asyncio +import copy from marshmallow.schema import SchemaMeta from typing import Any
codereview_python_data_10475
variables = re.findall(self.re_variable, decoded_test) if variables: relevant_facts = await self._build_relevant_facts([x for x in variables if '_' not in x], facts) - if all([x for x in relevant_facts]): good_facts = [await RuleSet(rules=rules).apply_rules(facts=fact_set) for fact_set in relevant_facts] valid_facts = [await self._trim_by_limit(decoded_test, g_fact[0]) for g_fact in good_facts] for combo in list(itertools.product(*valid_facts)): Can you elaborate on this? I think relevant_facts should already be a list. Do we get anything extra by building a new list with the comprehension? What are the situations where all will return false? variables = re.findall(self.re_variable, decoded_test) if variables: relevant_facts = await self._build_relevant_facts([x for x in variables if '_' not in x], facts) + if all(relevant_facts): good_facts = [await RuleSet(rules=rules).apply_rules(facts=fact_set) for fact_set in relevant_facts] valid_facts = [await self._trim_by_limit(decoded_test, g_fact[0]) for g_fact in good_facts] for combo in list(itertools.product(*valid_facts)):
codereview_python_data_10477
elif (num_first_testcase_hangs == self.MAX_FIRST_HANGS_WITH_DEFERRED_FORKSERVER): - environment.set_value(constants.AFL_DRIVER_DONT_DEFER, 1) print('Instructing AFL not to defer forkserver.\nIf this fixes the ' 'fuzzer, you should add this to the .options file:\n' '[env]\n' I think this is still DONT_DEFER_ENV_VAR ? elif (num_first_testcase_hangs == self.MAX_FIRST_HANGS_WITH_DEFERRED_FORKSERVER): + environment.set_value(constants.DONT_DEFER_ENV_VAR, 1) print('Instructing AFL not to defer forkserver.\nIf this fixes the ' 'fuzzer, you should add this to the .options file:\n' '[env]\n'
codereview_python_data_10478
import copy import json import sys -import signal from bigchaindb.common import crypto from bigchaindb.common.exceptions import (StartupError, DatabaseAlreadyExists, Seems like `signal` is not used in this module. import copy import json import sys + from bigchaindb.common import crypto from bigchaindb.common.exceptions import (StartupError, DatabaseAlreadyExists,
codereview_python_data_10483
pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_ini = iou_targets_ini.clone().detach() - iou_targets_ini_avg_per_gpu = reduce_mean( - bbox_weights_ini.sum()).item() - bbox_avg_factor_ini = max(iou_targets_ini_avg_per_gpu, 1.0) pos_decoded_bbox_preds_refine = \ distance2bbox(pos_points, pos_bbox_preds_refine) Use `reduce_mean(bbox_weights_rf.sum()).clamp_(min=1).item()` and skip `max()` operation. pos_decoded_target_preds.detach(), is_aligned=True).clamp(min=1e-6) bbox_weights_ini = iou_targets_ini.clone().detach() + bbox_avg_factor_ini = reduce_mean( + bbox_weights_ini.sum()).clamp_(min=1).item() pos_decoded_bbox_preds_refine = \ distance2bbox(pos_points, pos_bbox_preds_refine)
codereview_python_data_10485
- filename - name of mmCIF file, OR an open text mode file handle """ - with warnings.catch_warnings(): if self.QUIET: warnings.filterwarnings("ignore", category=PDBConstructionWarning) You need to remove this new blank line, it is one of the reason the style checks on TravisCI are failing: ``` $ flake8 Bio/ Bio/PDB/MMCIFParser.py:52:1: D202 No blank lines allowed after function docstring ... ``` - filename - name of mmCIF file, OR an open text mode file handle """ with warnings.catch_warnings(): if self.QUIET: warnings.filterwarnings("ignore", category=PDBConstructionWarning)
codereview_python_data_10488
progress_queue.put('Running {}...'.format( scanner.__class__.__name__)) except Exception: # pylint: disable=broad-except - log_message = 'Error running scanner: {}'.format( - scanner.__class__.__name__) - progress_queue.put('{}: \'{}\''.format( - log_message, traceback.format_exc())) LOGGER.exception(log_message) failed.append(scanner.__class__.__name__) else: Instead of using `.format()` a second time, can you combine this into one? Something like this: ``` log_message = 'Error running scanner: {}: {}'.format( scanner.__class__.__name__, traceback.format_exc()) progress_queue.put(log_message) ``` progress_queue.put('Running {}...'.format( scanner.__class__.__name__)) except Exception: # pylint: disable=broad-except + log_message = 'Error running scanner: {}: \'{}\''.format( + scanner.__class__.__name__, traceback.format_exc()) + progress_queue.put(log_message) LOGGER.exception(log_message) failed.append(scanner.__class__.__name__) else:
codereview_python_data_10494
"--platform-name", "-p", default=MOLECULE_PLATFORM_NAME, - help=f"Name of the platform to target only. ({MOLECULE_PLATFORM_NAME} means all)", ) @click.option( "--driver-name", This will say "{whatever is in env} means all" which is probably not what's intended. Am I reading this correctly? Should this say something like "XXX by default"? "--platform-name", "-p", default=MOLECULE_PLATFORM_NAME, + help=f"Name of the platform to target only. Default is None", ) @click.option( "--driver-name",
codereview_python_data_10495
def serialize_pipeline(pipeline): try: return pipeline.serialize() - except: - print("Error during pipeline initialization. Note that some operators (e.g. Python Operators) " - "cannot be used with tensorflow data set API and DALIIterator.") - raise def DALIIteratorWrapper(pipeline = None, serialized_pipeline = None, sparse = [], shapes = [], dtypes = [], batch_size = -1, prefetch_queue_depth = 2, **kwargs): Here I would propose modified exception message rather then print. It should play nicer with various debuggers and stuff, I think. def serialize_pipeline(pipeline): try: return pipeline.serialize() + except RuntimeError as e: + raise RuntimeError("Error during pipeline initialization. Note that some operators " + "(e.g. Python Operators) cannot be used with " + "tensorflow data set API and DALIIterator.") from e + def DALIIteratorWrapper(pipeline = None, serialized_pipeline = None, sparse = [], shapes = [], dtypes = [], batch_size = -1, prefetch_queue_depth = 2, **kwargs):
codereview_python_data_10503
* The output of its forward function is the logits for the predicted node/graph classes. - See also the example below. num_hops : int The number of hops for GNN information aggregation. lr : float, optional But eweight is also required right? Does it have to be a keyword argument or positional argument as the 3rd? * The output of its forward function is the logits for the predicted node/graph classes. + See also the example in :func:`explain_node` and :func:`explain_graph`. num_hops : int The number of hops for GNN information aggregation. lr : float, optional
codereview_python_data_10507
def clear_keystring(self): """Clear the currently entered key sequence.""" if self._keystring: - self._debug_log("discarding keystring '{}'.".format(self._keystring)) self._keystring = '' self.keystring_updated.emit(self._keystring) I think this could cause `keystring_updated` to be emitted twice now in some situations, no? Once when it wasn't before in `_handle_single_key` or `_handle_ambiguous_match`, and then again in `handle` (which unconditionally emits it after calling `_handle_*`) def clear_keystring(self): """Clear the currently entered key sequence.""" if self._keystring: + self._debug_log("discarding keystring '{}'.".format( + self._keystring)) self._keystring = '' self.keystring_updated.emit(self._keystring)
codereview_python_data_10511
self, sampling_probability: TensorLike, time_major: bool = False, - seed: Optional[TensorLike] = None, - next_inputs_fn: Optional[Callable] = None ): """Initializer. ```suggestion seed: Optional[int] = None, ``` Maybe too? I'm not sure. self, sampling_probability: TensorLike, time_major: bool = False, + seed: Optional[int] = None, + next_inputs_fn: Optional[Callable] = None, ): """Initializer.
codereview_python_data_10513
records = [] with openany(filename) as data: for line in data: - if line.startswith('#'): - continue records.append(map(float, line.split())) - self.timeseries = np.array(records).T - try: - self.qavg = np.loadtxt(self.outarray) - except IOError as err: - if err.errno != errno.ENOENT: - raise def _single_frame(self): grA, grB, r0, mask = self.grA, self.grB, self.r0, self.mask This method uses self, it cannot be a static method. records = [] with openany(filename) as data: for line in data: + if line.startswith('#'): continue records.append(map(float, line.split())) + return np.array(records) def _single_frame(self): grA, grB, r0, mask = self.grA, self.grB, self.r0, self.mask
codereview_python_data_10519
the matches list. Args: - key (dict): plist key. names (list[str]): names of the keys to match. matches (list[str]): keys with matching names. """ Do we know what is in the dict? e.g. `dict[str, object]` the matches list. Args: + key (dict[str, object]): plist key. names (list[str]): names of the keys to match. matches (list[str]): keys with matching names. """
codereview_python_data_10522
class Template: - def __init__(self, variables=None): - if dict: - self.variables = variables - else: - self.variables = {} self.tmpl = Apply("") def apply(self, template): This should probably have been `if variables` or `if variables is not None`. class Template: + def __init__(self, variables): + self.variables = variables self.tmpl = Apply("") def apply(self, template):
codereview_python_data_10529
query = """SELECT * FROM listens WHERE uid = %(uid)s AND """ + \ range_keys(len(date_range)) + \ """ AND id > %(from_id)s AND id < %(to_id)s - ORDER BY id """ + ORDER_TEXT[order] + """ LIMIT %(limit)s;""" fetched_rows = 0 # Total number of rows fetched for this range I'm pretty sure you don't need a `;` here query = """SELECT * FROM listens WHERE uid = %(uid)s AND """ + \ range_keys(len(date_range)) + \ """ AND id > %(from_id)s AND id < %(to_id)s + ORDER BY id """ + ORDER_TEXT[order] + """ LIMIT %(limit)s""" fetched_rows = 0 # Total number of rows fetched for this range
codereview_python_data_10536
</ItemGroup> </Project> """ - target_str = f""" <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <PropertyGroup> <EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck> ```suggestion target_str = r""" ``` This needs to be a raw string and does not rely on any Python template. Please change this back. </ItemGroup> </Project> """ + target_str = r""" <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <PropertyGroup> <EnableLightGBMUnsupportedPlatformTargetCheck Condition="'$(EnableLightGBMUnsupportedPlatformTargetCheck)' == ''">true</EnableLightGBMUnsupportedPlatformTargetCheck>
codereview_python_data_10538
pass else: return - echo(format.format(**vars(self)), replace=self.dynamic, newline=newline) This will break a lot of progmeter code using the old formatting style that's needs to be updated as well. It should also be mentioned in the changelog explicitly pass else: return + echo(self.format_handler(format, vars(self)), replace=self.dynamic, newline=newline)