Dataset Viewer
Auto-converted to Parquet
id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_0
self.redirect("/static/visualiser/index.html") def head(self): self.set_status(204) self.finish() Is the name "head" a convention for health checking? Regardless it caught me by surprise, maybe add some docs to this function on why it exist? It should also say what 204. self.redirect("/static/visualiser/index.html") def head(self): + """HEAD endpoint for health checking the scheduler""" self.set_status(204) self.finish()
codereview_python_data_1
backward_time.append(t2 - t1) print("Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}". format(epoch, forward_time[-1], backward_time[-1])) - cross_entropy(logits[val_idx], labels[val_idx]) val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx]).item() / len(val_idx) print("Train Accuracy: {:.4f} | Train Loss: {:.4f} | Validation Accuracy: {:.4f} | Validation loss: {:.4f}". format(train_acc, loss.item(), val_acc, val_loss.item())) `F.cross_entropy`? Also, isn't `tran_acc` required in L123? backward_time.append(t2 - t1) print("Epoch {:05d} | Train Forward Time(s) {:.4f} | Backward Time(s) {:.4f}". format(epoch, forward_time[-1], backward_time[-1])) + train_acc = torch.sum(logits[train_idx].argmax(dim=1) == labels[train_idx]).item() / len(train_idx) + val_loss = F.cross_entropy(logits[val_idx], labels[val_idx]) val_acc = torch.sum(logits[val_idx].argmax(dim=1) == labels[val_idx]).item() / len(val_idx) print("Train Accuracy: {:.4f} | Train Loss: {:.4f} | Validation Accuracy: {:.4f} | Validation loss: {:.4f}". format(train_acc, loss.item(), val_acc, val_loss.item()))
codereview_python_data_4
execute = pyqtSignal(str) - STYLESHEET = """ - ConsoleLineEdit { - font: {{ conf.fonts.debug_console }}; - } - """ - def __init__(self, _namespace, parent): """Constructor. Couldn't you move the stylesheet to `ConsoleWidget` with a `ConsoleLineEdit, ConsoleTextEdit` selector so that it applies to both without needing to duplicate it? execute = pyqtSignal(str) def __init__(self, _namespace, parent): """Constructor.
codereview_python_data_20
'this API client') def iter_kubernetes_nodes(self, project_id, zone, cluster): - """Iterate k8s nodes in an organization from GCP API. Args: project_id (str): id of the project to query. zone (str): The zone the cluster is in. please update all the docstring below in other methods to reflect the proper parent, instead of organization. 'this API client') def iter_kubernetes_nodes(self, project_id, zone, cluster): + """Iterate k8s nodes in a cluster from GCP API. Args: project_id (str): id of the project to query. zone (str): The zone the cluster is in.
codereview_python_data_21
eps=1e-3, reduction='mean', avg_factor=None): - """Calculate dice loss, the coefficient in the denominator is the first - power instead of the second power. Args: pred (torch.Tensor): The prediction, has a shape (n, *) use_second_power=True -> naive_dice=False eps=1e-3, reduction='mean', avg_factor=None): + """Calculate naive dice loss, the coefficient in the denominator is the + first power instead of the second power. Args: pred (torch.Tensor): The prediction, has a shape (n, *)
codereview_python_data_25
The :class:`~MDAnalysis.core.groups.AtomGroup` or :class:`~MDAnalysis.core.universe.Universe` to write. """ - warnings.warn("Using the last letter of the segid for the chainID " - "is now deprecated and will be changed in 2.0. " - "In 2.0, the chainID attribute will be used if it " - "exists, or a placeholder value.", DeprecationWarning) self._update_frame(obj) self._write_pdb_header() ```suggestion ``` I can't find the versionchanged for this, has this been fixed @lilyminium @jbarnoud ? The :class:`~MDAnalysis.core.groups.AtomGroup` or :class:`~MDAnalysis.core.universe.Universe` to write. """ self._update_frame(obj) self._write_pdb_header()
codereview_python_data_30
:return: """ - self._compact_times() - if self[self.SAMPLE_COUNT]: self[self.AVG_CONN_TIME] = self.sum_cn / self[self.SAMPLE_COUNT] self[self.AVG_LATENCY] = self.sum_lt / self[self.SAMPLE_COUNT] We don't need to compact times for each KPIset. We only need to do it for cumulative KPIsets. :return: """ if self[self.SAMPLE_COUNT]: self[self.AVG_CONN_TIME] = self.sum_cn / self[self.SAMPLE_COUNT] self[self.AVG_LATENCY] = self.sum_lt / self[self.SAMPLE_COUNT]
codereview_python_data_42
if not self._r.headers.get('Host'): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain - host = self._r.headers['Host'] parsed = urlparse(self._r.url) - - # If parsed url is str type, ensure that host is also str type - if isinstance(parsed.scheme, str) and not isinstance(host, str)\ - and isinstance(host, bytes): - host = host.decode('ascii') - # Reconstruct the URL as we expect it return urlunparse([ parsed.scheme, host, parsed.path, parsed.params, parsed.query, A quick dive into the CPython codebase shows that the current behaviour of the cookiejar module uses native strings (`str`). The same is true on Python 2. That means that what we really want to do is just to call `to_native_str` on `host`. That is a Requests-specific function that lives in `utils`. That can skip a lot of this if statement. While we're here, I should also note that `ascii` is probably a bad default encoding choice: we should at least consider defaulting to something a bit more sensible, like UTF-8. if not self._r.headers.get('Host'): return self._r.url # If they did set it, retrieve it and reconstruct the expected domain + host = utils.to_native_string(self._r.headers['Host'], encoding='utf-8') parsed = urlparse(self._r.url) # Reconstruct the URL as we expect it return urlunparse([ parsed.scheme, host, parsed.path, parsed.params, parsed.query,
codereview_python_data_59
""" cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) - if getattr(cls, "task_namespace", None) is None: cls.task_namespace = metacls._default_namespace metacls._reg.append(cls) instead of a `getattr` call here, can you just set `task_namespace = None` on the base class? thanks """ cls = super(Register, metacls).__new__(metacls, classname, bases, classdict) + if cls.task_namespace is None: cls.task_namespace = metacls._default_namespace metacls._reg.append(cls)
codereview_python_data_60
self.assertEqual(record["AC"], "CVCL_2260") self.assertEqual( record["SY"], - "Clone 1-5c-4; Clone 1-5c-4 WKD of " - "Chang Conjunctiva; " - "Wong-Kilbourne derivative of " - "Chang conjunctiva; ChWK", ) self.assertEqual(len(record["DR"]), 10) self.assertEqual(record["DR"][0], ("CLO", "CLO_0002500")) I would suggest splitting that long string into just two lines (at the semi colon), rather than four lines? self.assertEqual(record["AC"], "CVCL_2260") self.assertEqual( record["SY"], + "Clone 1-5c-4; Clone 1-5c-4 WKD of Chang Conjunctiva; " + "Wong-Kilbourne derivative of Chang conjunctiva; ChWK", ) self.assertEqual(len(record["DR"]), 10) self.assertEqual(record["DR"][0], ("CLO", "CLO_0002500"))
codereview_python_data_62
# package. """Simple protein analysis. -Example: >>> from Bio.SeqUtils.ProtParam import ProteinAnalysis >>> X = ProteinAnalysis("MAEGEITTFTALTEKFNLPPGNYKKPKLLYCSNGGHFLRILPDGTVDGT" ... "RDRSDQHIQLQLSAESVGEVYIKSTETGQYLAMDTSGLLYGSQTPSEEC" Does this render correctly? Do we need something like: ```rst Examples -------- >>> print(123) 123 ``` Note this is deliberately with a "s" and no colon as in many other places in Biopython - to match the numpydoc standard, which seems the most likely standard we might want to adopt. # package. """Simple protein analysis. +Examples +-------- >>> from Bio.SeqUtils.ProtParam import ProteinAnalysis >>> X = ProteinAnalysis("MAEGEITTFTALTEKFNLPPGNYKKPKLLYCSNGGHFLRILPDGTVDGT" ... "RDRSDQHIQLQLSAESVGEVYIKSTETGQYLAMDTSGLLYGSQTPSEEC"
codereview_python_data_63
""" result = reply.result if result[TXN_TYPE] in (SCHEMA, ISSUER_KEY): - result = deepcopy(result) result[DATA] = jsonSerz.serialize(result[DATA], toBytes=False) txnWithMerkleInfo = self.storeTxnInLedger(result) Why this line is necessary? """ result = reply.result if result[TXN_TYPE] in (SCHEMA, ISSUER_KEY): result[DATA] = jsonSerz.serialize(result[DATA], toBytes=False) txnWithMerkleInfo = self.storeTxnInLedger(result)
codereview_python_data_68
return tasks_host.do_corpus_pruning(context, last_execution_failed, revision) - build_setup_result = build_manager.setup_build(revision=revision) - build_directory = environment.get_value('BUILD_DIR') - if not build_setup_result or not build_directory: raise CorpusPruningException('Failed to setup build.') start_time = datetime.datetime.utcnow() runner = Runner(build_directory, context) pruner = CorpusPruner(runner) do we even need to check build_directory ? return tasks_host.do_corpus_pruning(context, last_execution_failed, revision) + if not build_manager.setup_build(revision=revision): raise CorpusPruningException('Failed to setup build.') + build_directory = environment.get_value('BUILD_DIR') start_time = datetime.datetime.utcnow() runner = Runner(build_directory, context) pruner = CorpusPruner(runner)
codereview_python_data_69
amount = self.parent.format_amount(utxo.value_sats(), whitespaces=True) labels = [name_short, address, label, amount, '%d'%height] utxo_item = [QStandardItem(x) for x in labels] - # Storing the full outpoint as the data payload of this item - utxo_item[0].setData(name) self.set_editability(utxo_item) utxo_item[self.Columns.ADDRESS].setFont(QFont(MONOSPACE_FONT)) utxo_item[self.Columns.AMOUNT].setFont(QFont(MONOSPACE_FONT)) utxo_item[self.Columns.OUTPOINT].setFont(QFont(MONOSPACE_FONT)) There are already a few similar lines below, in particular `utxo_item[self.Columns.ADDRESS].setData(name, Qt.UserRole)`. Let's move this below that one. Also, like those lines below, please do not use a magic number here (`0`). Along with the suggestion for the other file, we could have: ```suggestion utxo_item[self.Columns.OUTPOINT].setData(name, self.ROLE_CLIPBOARD_DATA) ``` amount = self.parent.format_amount(utxo.value_sats(), whitespaces=True) labels = [name_short, address, label, amount, '%d'%height] utxo_item = [QStandardItem(x) for x in labels] self.set_editability(utxo_item) + utxo_item[self.Columns.OUTPOINT].setData(name, self.ROLE_CLIPBOARD_DATA) utxo_item[self.Columns.ADDRESS].setFont(QFont(MONOSPACE_FONT)) utxo_item[self.Columns.AMOUNT].setFont(QFont(MONOSPACE_FONT)) utxo_item[self.Columns.OUTPOINT].setFont(QFont(MONOSPACE_FONT))
codereview_python_data_71
code.putln( "};") - def generate_dict_getter(self, scope, code): - #if scope.name == 'QApplication': - # import ipdb;ipdb.set_trace() func_name = scope.mangle_internal("__dict__getter") dict_attr = scope.lookup_here("__dict__") dict_name = dict_attr.cname No need for an "else" after "continue". And: did you consider merging this feature into the property support to avoid this special casing? (Just asking - I don't say it's a good idea, but it might be.) code.putln( "};") + def generate_dict_getter_function(self, scope, code): func_name = scope.mangle_internal("__dict__getter") dict_attr = scope.lookup_here("__dict__") dict_name = dict_attr.cname
codereview_python_data_82
content_type='text/html') LOGGER.debug('Inventory summary sent successfully by email.') except util_errors.EmailSendError: - LOGGER.warn('Unable to send Violations email') @staticmethod def transform_to_template(data): Keep this at exception level so that the stacktrace is captured. content_type='text/html') LOGGER.debug('Inventory summary sent successfully by email.') except util_errors.EmailSendError: + LOGGER.exception('Unable to send Violations email') @staticmethod def transform_to_template(data):
codereview_python_data_84
raise NotImplementedError -def _is_spmv_supported_node_feat(g, field): - """Return whether the node feature shape supports SPMV optimization.""" - return True - def _is_spmv_supported_edge_feat(g, field): """Return whether the edge feature shape supports SPMV optimization. Since it's always returning True, we can remove it... raise NotImplementedError def _is_spmv_supported_edge_feat(g, field): """Return whether the edge feature shape supports SPMV optimization.
codereview_python_data_86
'25', '--dict', os.path.join(DATA_DIR, 'test_fuzzer.dict'), - '-f', os.path.join(TEMP_DIR, 'corpus'), - '-W', TEMP_DIR, '--run_time', '10', need to add check to make sure that crash.input_input exists on disk. '25', '--dict', os.path.join(DATA_DIR, 'test_fuzzer.dict'), + '--input', os.path.join(TEMP_DIR, 'corpus'), + '--workspace', TEMP_DIR, '--run_time', '10',
codereview_python_data_87
from allauth.account import views as account_views from allauth.socialaccount import views as socialaccount_views, providers -if settings.AUTHENTICATION_METHOD == 'django' or settings.HEROKU_DEMO: urlpatterns = [ url(r'^standalone-login/$', login, name='standalone_login'), url(r'^standalone-logout/$', logout, name='standalone_logout', kwargs={'next_page': '/'}), We don't need `or settings.HEROKU_DEMO` here or anywhere else, because `django` is now the default authentication method. Previously it wasn't, so we had to set it to `django` if `HEROKU_DEMO` was `True`. from allauth.account import views as account_views from allauth.socialaccount import views as socialaccount_views, providers +if settings.AUTHENTICATION_METHOD == 'django': urlpatterns = [ url(r'^standalone-login/$', login, name='standalone_login'), url(r'^standalone-logout/$', logout, name='standalone_logout', kwargs={'next_page': '/'}),
codereview_python_data_90
emitters.commit() except dnf.exceptions.ExitOnLock as exc: logger.info(ucd(exc)) - return 0 except dnf.exceptions.Error as exc: logger.error(_('Error: %s'), ucd(exc)) return 1 I suppose that 1 would be better here. Please take into account also YUM behavior. emitters.commit() except dnf.exceptions.ExitOnLock as exc: logger.info(ucd(exc)) + return 1 except dnf.exceptions.Error as exc: logger.error(_('Error: %s'), ucd(exc)) return 1
codereview_python_data_91
dist.broadcast(module.running_mean, 0) def after_train_epoch(self, runner): - if self.broadcast_bn_buffer: - self._broadcast_bn_buffer(runner) - if not self.by_epoch or not self.evaluation_flag(runner): return from mmdet.apis import multi_gpu_test tmpdir = self.tmpdir if tmpdir is None: We can move `broadcast_bn_buffer` after ``` if not self.by_epoch or not self.evaluation_flag(runner): return ```` like `after_train_iter` dist.broadcast(module.running_mean, 0) def after_train_epoch(self, runner): if not self.by_epoch or not self.evaluation_flag(runner): return + if self.broadcast_bn_buffer: + self._broadcast_bn_buffer(runner) + from mmdet.apis import multi_gpu_test tmpdir = self.tmpdir if tmpdir is None:
codereview_python_data_92
icon_changed = pyqtSignal(QIcon) #: Signal emitted when a page's title changed (new title as str) title_changed = pyqtSignal(str) - #: Signal emitted when a page's currently active search match changed (match as current/total) - search_match_changed = pyqtSignal(int, int) #: Signal emitted when this tab was pinned/unpinned (new pinned state as bool) pinned_changed = pyqtSignal(bool) #: Signal emitted when a new tab should be opened (url as QUrl) This signal should be on the `AbstractSearch` rather than here, no? icon_changed = pyqtSignal(QIcon) #: Signal emitted when a page's title changed (new title as str) title_changed = pyqtSignal(str) #: Signal emitted when this tab was pinned/unpinned (new pinned state as bool) pinned_changed = pyqtSignal(bool) #: Signal emitted when a new tab should be opened (url as QUrl)
codereview_python_data_96
try: self.base.conf._configure_from_options(opts) self._read_conf_file(opts.releasever) - self.base.conf.read_dropin() self.base.conf._adjust_conf_options() except (dnf.exceptions.ConfigError, ValueError) as e: logger.critical(_('Config error: %s'), e) Please rename to `self.base.conf.read_dropin_dir()` try: self.base.conf._configure_from_options(opts) self._read_conf_file(opts.releasever) + self.base.conf.read_dropin_dir() self.base.conf._adjust_conf_options() except (dnf.exceptions.ConfigError, ValueError) as e: logger.critical(_('Config error: %s'), e)
codereview_python_data_99
""" self.log("serverdisconnect", "debug", [repr(self.server_conn.address)]) address = self.server_conn.address - source_address = self.server_conn.source_address()[0] self.server_conn.finish() self.server_conn.close() self.channel.tell("serverdisconnect", self.server_conn) Very minor nitpick: please use `source_address.host` instead. """ self.log("serverdisconnect", "debug", [repr(self.server_conn.address)]) address = self.server_conn.address + source_address = self.server_conn.source_address self.server_conn.finish() self.server_conn.close() self.channel.tell("serverdisconnect", self.server_conn)
codereview_python_data_113
# log encounter self.emit_event( 'pokemon_appeared', - formatted='A wild {} appeared! (CP: {}) (IV: {}) (A/D/S {}) (NCP: {})'.format(pokemon.name, pokemon.cp, pokemon.iv, pokemon.iv_display, round(pokemon.cp_percent, 2),), data={ 'pokemon': pokemon.name, 'ncp': round(pokemon.cp_percent, 2), Doesn't need all those parentheses. Change to (CP: {} IV: {} A/D/S {} NCP: {}) # log encounter self.emit_event( 'pokemon_appeared', + formatted='A wild {} appeared! (CP: {} IV: {} A/D/S {} NCP: {}'.format(pokemon.name, pokemon.cp, pokemon.iv, pokemon.iv_display, round(pokemon.cp_percent, 2),), data={ 'pokemon': pokemon.name, 'ncp': round(pokemon.cp_percent, 2),
codereview_python_data_115
def crontab(self, *args, **kwargs): return crontab(*args, **dict(kwargs, app=self.app)) - def next_occurrence(self, crontab, now): crontab.nowfun = lambda: now return now + crontab.remaining_estimate(now) make sure this won't regress def crontab(self, *args, **kwargs): return crontab(*args, **dict(kwargs, app=self.app)) + def next_ocurrance(self, crontab, now): crontab.nowfun = lambda: now return now + crontab.remaining_estimate(now)
codereview_python_data_117
# data that needs to be added to them. # We postpone processing of this in order to do type inference/generalization. # See self.attr_type - for (xml_obj, data) in self.attributes.iteritems(): for (k, v, scope, default) in data: xml_obj.append(self.add_data(make_str(k), self.attr_type(k, scope, v), make_str(v), scope, default)) use `.items()` for both `py2` and `py3` compatibility. # data that needs to be added to them. # We postpone processing of this in order to do type inference/generalization. # See self.attr_type + for (xml_obj, data) in self.attributes.items(): for (k, v, scope, default) in data: xml_obj.append(self.add_data(make_str(k), self.attr_type(k, scope, v), make_str(v), scope, default))
codereview_python_data_121
if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] - x2 = x1 + w - y2 = y1 + h - if ((x1 < 0 or x1 >= img_info['width'] or y1 < 0 - or y1 >= img_info['height']) - and (x2 < 0 or x2 >= img_info['width'] or y2 < 0 - or y2 >= img_info['height'])): continue if ann['area'] <= 0 or w < 1 or h < 1: continue I have some questions about the condition statement, 1. The conditions seem are redundant. For example if `x1 >= img_info['width']` then x2 must `>= img_info['width']`, so we only need to check `x2 >= img_info['width']`. 2. Why you use `and` between the first condition and the second condition, in my opinion, if one of two conditions satisfy then the image is out-of-frame. if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] + inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) + inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) + if inter_w * inter_h == 0: continue if ann['area'] <= 0 or w < 1 or h < 1: continue
codereview_python_data_126
-def hey(incoming_words): pass Maybe just `words` or even `phrase` is a better choice? +def hey(phrase): pass
codereview_python_data_127
# Fork mode is disabled on ephemeral bots due to a bug on the platform. is_ephemeral = environment.is_ephemeral() if (not is_fuchsia and not is_android and not is_ephemeral and not use_dataflow_tracing and strategy_pool.do_strategy(strategy.FORK_STRATEGY)): So, you don't want fork mode to be used in dataflow tracing anymore ? # Fork mode is disabled on ephemeral bots due to a bug on the platform. is_ephemeral = environment.is_ephemeral() + # Do not use fork mode for DFT-based fuzzing. This is needed in order to + # collect readable and actionable logs from fuzz targets running with DFT. if (not is_fuchsia and not is_android and not is_ephemeral and not use_dataflow_tracing and strategy_pool.do_strategy(strategy.FORK_STRATEGY)):
codereview_python_data_131
import os import os.path - -from itertools import chain, dropwhile, takewhile import sip from PyQt5.QtCore import QUrl, QObject, QPoint, QTimer Please import modules (`import itertools`) and then use `itertools.chain`, etc. import os import os.path +import itertools import sip from PyQt5.QtCore import QUrl, QObject, QPoint, QTimer
codereview_python_data_132
Chem = pytest.importorskip("rdkit.Chem") mol = Chem.Mol() u = mda.Universe(mol, format="RDKIT") class TestUniverseFromSmiles(object): def setup_class(self): I suppose you could also check that the universe is empty--has `0` atoms for example. Chem = pytest.importorskip("rdkit.Chem") mol = Chem.Mol() u = mda.Universe(mol, format="RDKIT") + assert len(u.atoms) == 0 + class TestUniverseFromSmiles(object): def setup_class(self):
codereview_python_data_134
if not pat.match(value): raise InvalidHeader("Invalid return character or leading space in header: %s" % name) except TypeError: - raise InvalidHeader("Header %s value %s must be of type str or bytes, " - "not %s" % (name, value, type(value))) def urldefragauth(url): This may read a little clear if the name and/or value are wrapped in quotes. Perhaps even adjusting the wording to something like "Header {name}: {value} must have a value of string or bytes, not {type}". if not pat.match(value): raise InvalidHeader("Invalid return character or leading space in header: %s" % name) except TypeError: + raise InvalidHeader("Value for header {%s:%s} must be of type str or " + "bytes, not %s" % (name, value, type(value))) def urldefragauth(url):
codereview_python_data_139
class RekognitionStubber(ExampleStubber): """ - A class that implements a variety of stub functions that are used by the - Amazon Rekognition unit tests. - The stubbed functions all expect certain parameters to be passed to them as - part of the tests, and will raise errors when the actual parameters differ from - the expected. """ def __init__(self, client, use_stubs=True): """ Suggestion: A class that implements stub functions used by Amazon Rekognition unit tests. The stubbed functions expect certain parameters to be passed to them as part of the tests, and raise errors if the parameters are not as expected. class RekognitionStubber(ExampleStubber): """ + A class that implements stub functions used by Amazon Rekognition unit tests. + The stubbed functions expect certain parameters to be passed to them as + part of the tests, and raise errors if the parameters are not as expected. """ def __init__(self, client, use_stubs=True): """
codereview_python_data_143
objreg.register('message-bridge', message_bridge, scope='window', window=self.win_id) - if config.val.window.hide_decoration: - window_flags = Qt.CustomizeWindowHint | Qt.NoDropShadowWindowHint - self.setWindowFlags(Qt.Window | window_flags) self.setWindowTitle('qutebrowser') self._vbox = QVBoxLayout(self) self._vbox.setContentsMargins(0, 0, 0, 0) This should be handled in `_on_config_changed` too if possible so it can be set without requiring a restart. Not sure if it's possible to just change the flags after the window has been shown though - but maybe a `self.hide()` and `self.show()` helps. objreg.register('message-bridge', message_bridge, scope='window', window=self.win_id) self.setWindowTitle('qutebrowser') self._vbox = QVBoxLayout(self) self._vbox.setContentsMargins(0, 0, 0, 0)
codereview_python_data_146
return dict(name=self.name, module=self.module, params=self.params, description=self.description, stopping_conditions=[fact.display for fact in self.stopping_conditions]) - def __init__(self, id, name, module, params, stopping_conditions=None, description=None): super().__init__() - self.id = id self.name = name self.module = module self.params = params send this parameter in as identifier instead of id, which is a reserved python keyword return dict(name=self.name, module=self.module, params=self.params, description=self.description, stopping_conditions=[fact.display for fact in self.stopping_conditions]) + def __init__(self, planner_id, name, module, params, stopping_conditions=None, description=None): super().__init__() + self.planner_id = planner_id self.name = name self.module = module self.params = params
codereview_python_data_147
return data def readline(self): - """Read single line for the BGZF module.""" i = self._buffer.find(self._newline, self._within_block_offset) # Three cases to consider, if i == -1: This isn't for the module, its for the class. How about just: ```python """Read a single line from the BGZF file.""" ``` return data def readline(self): + """Read a single line for the BGZF file.""" i = self._buffer.find(self._newline, self._within_block_offset) # Three cases to consider, if i == -1:
codereview_python_data_148
print("\nSetting up cluster\n") redis_address = cluster.setup_cluster(config) print("\nLaunching notebook\n") - print("*" * 68) - print(("To connect to the cluster, run the following commands in the " - "notebook:\n" - "\t\timport ray\n" - "\t\tray.init(redis_address=\"{0}\")\n" - "\t\timport modin").format(redis_address)) - print("*" * 68) - - cluster.launch_notebook(config, port) cli.add_command(notebook) Ideally this would go in our `__init__` so we should be able to detect if the cli was used. print("\nSetting up cluster\n") redis_address = cluster.setup_cluster(config) print("\nLaunching notebook\n") + + cluster.launch_notebook(config, port, redis_address=redis_address) cli.add_command(notebook)
codereview_python_data_152
wrap_fp16_model) -class Depr_Fp16OptimizerHook(Fp16OptimizerHook): """A wrapper class for the FP16 optimizer hook. This class wraps :class:`Fp16OptimizerHook` in `mmcv.runner` and shows a warning that the :class:`Fp16OptimizerHook` from `mmdet.core` will be deprecated. Just use the complete word DeprecatedFp16OptimizerHook wrap_fp16_model) +class DeprecatedFp16OptimizerHook(Fp16OptimizerHook): """A wrapper class for the FP16 optimizer hook. This class wraps :class:`Fp16OptimizerHook` in `mmcv.runner` and shows a warning that the :class:`Fp16OptimizerHook` from `mmdet.core` will be deprecated.
codereview_python_data_155
for a in agents: executors = await self.dao.get('core_executor', criteria=dict(agent_id=a['id'])) a['executors'] = [dict(executor=e['executor'], preferred=e['preferred']) for e in executors] - a['sleep'] = self.jitter('{}/{}'.format(a['sleep_min'], a['sleep_max'])) return agents async def explode_results(self, criteria=None): why this? this change is in explode_agents, which gets called as a read-only function. this change sets a temporary sleep value for agents (not persisted)... right? for a in agents: executors = await self.dao.get('core_executor', criteria=dict(agent_id=a['id'])) a['executors'] = [dict(executor=e['executor'], preferred=e['preferred']) for e in executors] return agents async def explode_results(self, criteria=None):
codereview_python_data_159
display_name (str): display name. Returns: - firefox__cache_config: namedtuple containing the block size and first record offset. Raises: `firefox__cache_config` => `firefox_cache_config` (remove the double underscore) display_name (str): display name. Returns: + firefox_cache_config: namedtuple containing the block size and first record offset. Raises:
codereview_python_data_162
# ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. -"""Base Modin Dataframe class optimized for PyArrow on Ray execution.""" again, nothing about "experimental" in here # ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. +"""Experimental Base Modin Dataframe class optimized for PyArrow on Ray execution."""
codereview_python_data_164
stream_params = stream_parameters(streams) inds, dims = zip(*[(ind, kdim) for ind, kdim in enumerate(kdims) if kdim not in stream_params]) - get = itemgetter(*inds) keys = (get(k) for k in keys) return dims, ([wrap_tuple(k) for k in keys] if len(inds) == 1 else list(keys)) Don't often see a use for ``itemgetter`` and I'm trying to think whether you really need it here. I don't immediately have a better suggestion so it is probably ok... stream_params = stream_parameters(streams) inds, dims = zip(*[(ind, kdim) for ind, kdim in enumerate(kdims) if kdim not in stream_params]) + get = operator.itemgetter(*inds) # itemgetter used for performance keys = (get(k) for k in keys) return dims, ([wrap_tuple(k) for k in keys] if len(inds) == 1 else list(keys))
codereview_python_data_170
x.add_row([item.name, state, item.provider]) print(x) - print self.molecule._print_valid_platforms() - print self.molecule._print_valid_providers() Just to be consistent, can we use the print function `print()` instead of the keyword. Same goes for line 437. x.add_row([item.name, state, item.provider]) print(x) + print() self.molecule._print_valid_platforms() + print() self.molecule._print_valid_providers()
codereview_python_data_171
original_parsed = urlparse(resp.request.url) redirect_parsed = urlparse(url) - if original_parsed.hostname != redirect_parsed.hostname: - try: - del headers['Authorization'] - except KeyError: - pass # However, .netrc might have more auth for us. Let's get it if it # does. What about: ``` python if (original_parsed.hostname != redirect_parsed.hostname and 'Authorization' in headers): del headers['Authorization'] ``` original_parsed = urlparse(resp.request.url) redirect_parsed = urlparse(url) + if (original_parsed.hostname != redirect_parsed.hostname and + 'Authorization' in headers): + del headers['Authorization'] # However, .netrc might have more auth for us. Let's get it if it # does.
codereview_python_data_172
"method_%s_%s" % (type_name, attr_name), kwargs) if method_handler is None: # in this case we don't need the real directives on the call to get_slot_table - if (attr_name in TypeSlots.get_slot_table({}).method_name_to_slot or attr_name in ['__new__', '__class__']): method_handler = self._find_handler( "slot%s" % attr_name, kwargs) I'd rather make this as fast as before again. This is called a _lot_. We can have a global set `TypeSlots.special_method_names` or just use the default `method_name_to_slot` dict for it. In fact, since `old_binops=False` should be the default (also for Cython utility code, for example), we'll most likely need that setup anyway, so it won't hurt much to instantiate it on import. "method_%s_%s" % (type_name, attr_name), kwargs) if method_handler is None: # in this case we don't need the real directives on the call to get_slot_table + if (attr_name in TypeSlots.special_method_names or attr_name in ['__new__', '__class__']): method_handler = self._find_handler( "slot%s" % attr_name, kwargs)
codereview_python_data_179
elif self._config.batch_mode == 'family': return family elif self._config.batch_mode == 'unbatched_params': - param_str = ', '.join(f'{k}={v}' for k, v in six.iteritems(unbatched_args)) - return f'{family}({param_str})' else: raise ValueError('Unknown batch mode for batch notifier: {}'.format( self._config.batch_mode)) Please avoid reassigning to `lines`. They are not even the same type. I guess you forgot to change it to `lines_joined`. elif self._config.batch_mode == 'family': return family elif self._config.batch_mode == 'unbatched_params': + param_str = six.u(', ').join(six.u('{}={}').format(*kv) for kv in six.iteritems(unbatched_args)) + return six.u('{}({})').format(family, param_str) else: raise ValueError('Unknown batch mode for batch notifier: {}'.format( self._config.batch_mode))
codereview_python_data_183
in_channels=256, fc_out_channels=1024, roi_feat_size=7, - num_classes=81, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2], reg_class_agnostic=False, The num of classes you used still is 81. in_channels=256, fc_out_channels=1024, roi_feat_size=7, + num_classes=4, target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2], reg_class_agnostic=False,
codereview_python_data_184
async def test_edgeql_insert_unless_conflict_20a(self): # currently we reject ELSE in these cases with self.assertRaisesRegex( - edgedb.errors.QueryError, "UNLESS CONFLICT can not use ELSE when constraint is from a " "parent type", ): It's a little unintuitive, but semantically it makes sense that the conflicting object is restricted to the `Person` interface, i.e. the origin of the `name` constraint. Consequently, writing `ELSE (UPDATE DerivedPerson ...` should be an error here. async def test_edgeql_insert_unless_conflict_20a(self): # currently we reject ELSE in these cases with self.assertRaisesRegex( + edgedb.errors.UnsupportedFeatureError, "UNLESS CONFLICT can not use ELSE when constraint is from a " "parent type", ):
codereview_python_data_194
def test_that_backprop_runs(): - """Run optimization to ensure that gradients can be computed.""" batch_size = 1 image_height = 9 image_width = 12 Probably need to re-word this doc string now that we're directly checking backprop gradients instead of running an optimization def test_that_backprop_runs(): + """Making sure the gradients can be computed.""" batch_size = 1 image_height = 9 image_width = 12
codereview_python_data_196
>>> obj.unwrap(expires) 1 >>> obj.unwrap(expires + Timedelta('1 minute')) Traceback (most recent call last): ... Expired: 2014-01-01 00:00:00+00:00 We still need this line, this is what is causing the tests to fail in py3 >>> obj.unwrap(expires) 1 >>> obj.unwrap(expires + Timedelta('1 minute')) + # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... Expired: 2014-01-01 00:00:00+00:00
codereview_python_data_203
from __future__ import with_statement, unicode_literals from os import environ # a mapping (like a dict) -from sys import exit as sys_exit from fabric.api import sudo, env, hosts from fabric.api import task, parallel you can just do: ``` python import sys ``` and then in the code: ``` python sys.exit('...') ``` it's simpler from __future__ import with_statement, unicode_literals from os import environ # a mapping (like a dict) +import sys from fabric.api import sudo, env, hosts from fabric.api import task, parallel
codereview_python_data_206
def _setup_pipe_pool_dependency(self): if self._py_pool_started: - # Pipeline backend doesn't really do anything with the pool, sole point of this call - # is to ensure lifetime of the pool exceeds the lifetime of the pipeline's backend - # so that shared memory managed by the pool is not freed before pipline is garbage collected. - # Otherwise pipline may try to access freed memory which leads to crashes at the Python teardown self._pipe.SetPyObjDependency(self._py_pool) def _start_py_workers(self): ```suggestion # The sole point of this call is to ensure lifetime of the pool exceeds the lifetime of the pipeline's backend, # which runs external source operator that may access memory owned by the pool, # so that shared memory managed by the pool is not freed before pipline is garbage collected. # Otherwise pipline may try to access freed memory which leads to crashes at the Python teardown ``` def _setup_pipe_pool_dependency(self): if self._py_pool_started: + # The sole point of this call is to ensure the lifetime of the pool exceeds the lifetime + # of the pipeline's backend, so that shared memory managed by the pool is not freed + # before pipline's backend is garbage collected. + # Otherwise the backend may try to access unmmaped memory which leads to crashes at the Python teardown. self._pipe.SetPyObjDependency(self._py_pool) def _start_py_workers(self):
codereview_python_data_211
g_data = json.load(open(graph_file)) self._labels = np.load(label_file) self._feats = np.load(feat_file) - self._graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data))) graph_id = np.load(graph_id_file) # lo, hi means the range of graph ids for different portion of the dataset, if `graph` is a property, why not keep using it? g_data = json.load(open(graph_file)) self._labels = np.load(label_file) self._feats = np.load(feat_file) + self.graph = DGLGraph(nx.DiGraph(json_graph.node_link_graph(g_data))) graph_id = np.load(graph_id_file) # lo, hi means the range of graph ids for different portion of the dataset,
codereview_python_data_213
status = map_objects.get('status', None) cells = map_objects['map_cells'] - # insert detail info about gym to fort for cell in cells: if 'forts' in cell: for fort in cell['forts']: It's unfortunate that you have so many changes in this file. It makes it very difficult to read the diff here and likely that it will cause someone else merge conflicts. status = map_objects.get('status', None) cells = map_objects['map_cells'] + #insert detail info about gym to fort for cell in cells: if 'forts' in cell: for fort in cell['forts']:
codereview_python_data_218
if self.focus is None: self.set_focus(0) elif self.follow_focus: - self.set_focus(self.flow_count()) return f def set_limit(self, limit): Shouldn't we do the same we do in `add_flow` here? (Maybe we can even factor that out in its own `update_focus` method) if self.focus is None: self.set_focus(0) elif self.follow_focus: + self.update_focus() return f def set_limit(self, limit):
codereview_python_data_221
("float64", np.float64)) def test_gelu(self, dtype): x = np.random.rand(2, 3, 4).astype(dtype) - self.assertAllCloseAccordingToType(gelu.gelu(x), _ref_gelu(x)) - self.assertAllCloseAccordingToType( - gelu.gelu(x, False), _ref_gelu(x, False)) @parameterized.named_parameters(("float16", np.float16), ("float32", np.float32), What about testing integer and quantization types? For integer types, I believe that Gelu will simply behave as Relu. ("float64", np.float64)) def test_gelu(self, dtype): x = np.random.rand(2, 3, 4).astype(dtype) + self.assertAllCloseAccordingToType(gelu(x), _ref_gelu(x)) + self.assertAllCloseAccordingToType(gelu(x, False), _ref_gelu(x, False)) @parameterized.named_parameters(("float16", np.float16), ("float32", np.float32),
codereview_python_data_229
def setUp(self): AgentTestCase.setUp(self) - prefix = "ExtensionTelemetryUnitTest" - logger.DEFAULT_LOGGER = Logger(prefix=prefix) clear_singleton_instances(ProtocolUtil) # Create the log directory if not exists this should be reverted to the original value on cleanup def setUp(self): AgentTestCase.setUp(self) clear_singleton_instances(ProtocolUtil) # Create the log directory if not exists
codereview_python_data_232
if not params.has_objects(schema): return None diff_param = -1 overloads = [] sn = self.get_shortname(schema) You can only check against the first overload and then break the top loop. if not params.has_objects(schema): return None + new_params = params.objects(schema) + new_pt = tuple(p.get_type(schema) for p in new_params) + diff_param = -1 overloads = [] sn = self.get_shortname(schema)
codereview_python_data_233
INSTALL_DIR_ES = '%s/elasticsearch' % INSTALL_DIR_INFRA TMP_ARCHIVE_ES = '/tmp/localstack.es.zip' -ERROR_PROBABILITY = 0.05 - # set up logger LOGGER = logging.getLogger(__name__) Couldn't we be using `ERROR_PROBABILITY` in constants.py (or config.py) instead of `KINESIS_RETURN_ERRORS`? `ERROR_PROBABILITY == 1` would have the same effect as `KINESIS_RETURN_ERRORS == True` INSTALL_DIR_ES = '%s/elasticsearch' % INSTALL_DIR_INFRA TMP_ARCHIVE_ES = '/tmp/localstack.es.zip' # set up logger LOGGER = logging.getLogger(__name__)
codereview_python_data_239
self.CheckTimestamp(event.timestamp, '2015-11-22 17:53:29.305000') - expected_message = ('Setup Plug and Play Device Install') - expected_short_message = ('Setup Plug and Play Device Install') self._TestGetMessageStrings( event_data, expected_message, expected_short_message) remove bounding parenthesis self.CheckTimestamp(event.timestamp, '2015-11-22 17:53:29.305000') + expected_message = 'Setup Plug and Play Device Install' + expected_short_message = 'Setup Plug and Play Device Install' self._TestGetMessageStrings( event_data, expected_message, expected_short_message)
codereview_python_data_244
return self.get_urls(), self.label, self.namespace -class OscarConfig(AutoLoadURLsConfigMixin, OscarConfigMixin, AppConfig): """ Base Oscar app configuration. It is better to apply `AutoLoadURLsConfigMixin` to only the app config classes that use it (`oscar.apps.catalogue.apps.CatalogueReviewsOnlyConfig`, `oscar.apps.dashboard.apps.DashboardConfig`, and `oscar.config.Shop`), instead of to all of them. return self.get_urls(), self.label, self.namespace +class OscarConfig(OscarConfigMixin, AppConfig): """ Base Oscar app configuration.
codereview_python_data_245
assert_eq(s1, s2, atol=0.01) assert_eq(p1_proba, p2_proba, atol=0.8) else: assert_eq(p1, p2) assert_eq(p1_proba, p2_proba, atol=0.03) - assert_eq(y, p1) - assert_eq(y, p2) assert_eq(p1_local, p2) - assert_eq(y, p1_local) # pref_leaf values should have the right shape # and values that look like valid tree nodes Why are we dropping `assert_eq(s1, s2)` from the `else` branch? assert_eq(s1, s2, atol=0.01) assert_eq(p1_proba, p2_proba, atol=0.8) else: + assert_eq(s1, s2) assert_eq(p1, p2) + assert_eq(p1, y) + assert_eq(p2, y) assert_eq(p1_proba, p2_proba, atol=0.03) assert_eq(p1_local, p2) + assert_eq(p1_local, y) # pref_leaf values should have the right shape # and values that look like valid tree nodes
codereview_python_data_246
cols = ['Column_' + str(i) for i in range(X.shape[1])] imptcs = [] for col in cols: - try: - imptcs.append(impcts_dict[col]) - except KeyError: - imptcs.append(0.) return np.array(imptcs) X, y = load_breast_cancer(True) I think it can be replaced with one-line solution `imptcs.append(impcts_dict.get(col, 0.))` cols = ['Column_' + str(i) for i in range(X.shape[1])] imptcs = [] for col in cols: + imptcs.append(impcts_dict.get(col, 0.)) return np.array(imptcs) X, y = load_breast_cancer(True)
codereview_python_data_250
vrange = vrange or ( 0 if len(v) == 0 else (int(F.asnumpy(F.max(v.tousertensor(), dim=0))) + 1)) - if len(u) > 0: - assert urange > int(F.asnumpy(F.max(u.tousertensor(), dim=0))), \ - "The urange from card should be larger than max u node_id" - if len(v) > 0: - assert vrange > int(F.asnumpy(F.max(v.tousertensor(), dim=0))), \ - "The vrange from card should be larger than max v node_id" - if utype == vtype: urange = vrange = max(urange, vrange) num_ntypes = 1 Are these checks the same as L738-743? Can we merge them? vrange = vrange or ( 0 if len(v) == 0 else (int(F.asnumpy(F.max(v.tousertensor(), dim=0))) + 1)) if utype == vtype: urange = vrange = max(urange, vrange) num_ntypes = 1
codereview_python_data_251
parent_group = group_name(parent) - if parent_group not in self.membership_cache: - self.membership_cache[parent_group] = set() - if member not in self.membership_cache[parent_group]: - self.membership_cache[parent_group].add(member) - self.membership.append( (group_name(parent), member)) def _store_iam_policy_pre(self): what is the purpose of membership_cache? Also to avoid duplication? parent_group = group_name(parent) + if parent_group not in self.membership_map: + self.membership_map[parent_group] = set() + if member not in self.membership_map[parent_group]: + self.membership_map[parent_group].add(member) + self.membership_items.append( (group_name(parent), member)) def _store_iam_policy_pre(self):
codereview_python_data_254
sampling_probability: TensorLike, embedding_fn: Union[TensorLike, Callable] = None, time_major: bool = False, - seed: Optional[TensorLike] = None, scheduling_seed: Optional[TensorLike] = None, ): """Initializer. ```suggestion seed: Optional[int] = None, ``` Maybe? sampling_probability: TensorLike, embedding_fn: Union[TensorLike, Callable] = None, time_major: bool = False, + seed: Optional[int] = None, scheduling_seed: Optional[TensorLike] = None, ): """Initializer.
codereview_python_data_262
class BigqueryDaoTest(ForsetiTestCase): """Tests for the BigqueryDao.""" - #FAKE_PROJECT_NUMBERS = ['11111'] - @mock.patch.object(_db_connector.DbConnector, '__init__', autospec=True) def setUp(self, mock_db_connector): mock_db_connector.return_value = None nit: Remove if it's not needed. class BigqueryDaoTest(ForsetiTestCase): """Tests for the BigqueryDao.""" @mock.patch.object(_db_connector.DbConnector, '__init__', autospec=True) def setUp(self, mock_db_connector): mock_db_connector.return_value = None
codereview_python_data_271
root.debug("Already set up logging") -# setup_test_logging() logging.info("Bootstrapped test") This is needed for us to work in IDE UTs root.debug("Already set up logging") +setup_test_logging() logging.info("Bootstrapped test")
codereview_python_data_273
-# -*- coding: utf-8 -*- -""" -Created on Wed Jan 16 14:20:24 2019 - -""" - "`Learner` support for computer vision" from ..torch_core import * from ..basic_train import * I think this may automatically be added by your text editor, please remove lines 1 to 6 (it should begin with "`Learner` support for computer vision") "`Learner` support for computer vision" from ..torch_core import * from ..basic_train import *
codereview_python_data_276
'"algorithm":"GOOGLE_SYMMETRIC_ENCRYPTION",' '"protectionLevel":"SOFTWARE"}}') -NON_ROTATED_CRYPTO_KEY_STATE_DATA = ('{"createTime":"2019-01-22T23:30:18.939244464Z",' '"name":"projects/red2k18-224817/locations/global/keyRings/' 'red_key_ring/cryptoKeys/red_key1",' '"nextRotationTime":"2018-07-21T07:00:00Z",' The naming should be NON_ROTATED_CRYPTO_KEY_DESTROYED_DATA instead '"algorithm":"GOOGLE_SYMMETRIC_ENCRYPTION",' '"protectionLevel":"SOFTWARE"}}') +NON_ROTATED_CRYPTO_KEY_DESTROYED_STATE_DATA = ('{"createTime":"2019-01-22T23:30:18.939244464Z",' '"name":"projects/red2k18-224817/locations/global/keyRings/' 'red_key_ring/cryptoKeys/red_key1",' '"nextRotationTime":"2018-07-21T07:00:00Z",'
codereview_python_data_277
latest_listen = db_conn.fetch_listens( user_name, limit=1, - to_ts=int(time.time()), ) latest_listen_ts = latest_listen[0].ts_since_epoch if len(latest_listen) > 0 else 0 I think it'd make sense to only calculate time.time() once (it's also used if max_ts and min_ts aren't set) latest_listen = db_conn.fetch_listens( user_name, limit=1, + to_ts=current_time, ) latest_listen_ts = latest_listen[0].ts_since_epoch if len(latest_listen) > 0 else 0
codereview_python_data_289
Also cleans up the temp file if close is not invoked """ - def __init__(self, path, mode): - super(AtomicLocalFile, self).__init__(path, mode) - def move_to_final_destination(self): os.rename(self.tmp_path, self.path) What happens if you remove this? Can't you still initialize this atomic_file class, since it's just AtomicLocalFile with an additional method (move_to_final_destination)? Also cleans up the temp file if close is not invoked """ def move_to_final_destination(self): os.rename(self.tmp_path, self.path)
codereview_python_data_293
from isbn_verifier import verify -# Tests adapted from `problem-specifications//canonical-data.json` @ v2.0.0 class IsbnVerifierTests(unittest.TestCase): Apologies for the delay in reviewing, but since this was written the canonical data has been updated. Can you please update the tests to v2.2.0? from isbn_verifier import verify +# Tests adapted from `problem-specifications//canonical-data.json` @ v2.2.0 class IsbnVerifierTests(unittest.TestCase):
codereview_python_data_294
for dst_type in <list>dest_sig: found_matches = [] found_candidates = [] - # Make two seperate lists: One for for signature sub-trees with at least one definite match, and another for signature sub-trees with only ambiguous matches (where `dest_sig[i] is None`). if dst_type is None: for sn in <list>sigindex_matches: found_matches.extend((<dict>sn).values()) And this line too. for dst_type in <list>dest_sig: found_matches = [] found_candidates = [] + # Make two seperate lists: One for for signature sub-trees + # with at least one definite match, and another for + # signature sub-trees with only ambiguous matches + # (where `dest_sig[i] is None`). if dst_type is None: for sn in <list>sigindex_matches: found_matches.extend((<dict>sn).values())
codereview_python_data_297
"""Anchor-based head (RPN, RetinaNet, SSD, etc.). Args: - num_classes (int): Number of categories not including the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes. not including -> excluding """Anchor-based head (RPN, RetinaNet, SSD, etc.). Args: + num_classes (int): Number of categories excluding the background category. in_channels (int): Number of channels in the input feature map. feat_channels (int): Number of hidden channels. Used in child classes.
codereview_python_data_314
"-Wno-switch-enum", "-Wno-weak-template-vtables", "-Wno-weak-vtables", - "-Wno-implicit-int-float-conversion" ) else: ext.compiler.add_compiler_flag( This option is not recognized by `Apple clang version 11.0.0 (clang-1100.0.33.8)`, started to get zillions of similar messages ``` warning: unknown warning option '-Wno-implicit-int-float-conversion'; did you mean '-Wno-implicit-float-conversion'? [-Wunknown-warning-option] ``` "-Wno-switch-enum", "-Wno-weak-template-vtables", "-Wno-weak-vtables", ) else: ext.compiler.add_compiler_flag(
codereview_python_data_317
Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. """ bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero(rois[:, 0] == i).squeeze() Why delete these two statements? Returns: list[Tensor]: Refined bboxes of each image in a mini-batch. """ + img_ids = rois[:, 0].long().unique(sorted=True) + assert img_ids.numel() == len(img_metas) + bboxes_list = [] for i in range(len(img_metas)): inds = torch.nonzero(rois[:, 0] == i).squeeze()
codereview_python_data_324
logs.log_error('Unable to determine build parameters.') return - build_params_check_path = os.path.join(symbols_directory, - '.cached_build_params') - # Check if we already have the symbols locally. - cached_build_params = utils.read_data_from_file( - build_params_check_path, eval_data=True) - if cached_build_params and cached_build_params == build_params: - # No work to do, same system symbols already in local. return build_id = build_params.get('build_id') Can you add a helper for these rather than duplicate code? i.e. roughly: ```python def check_symbols_cached(symbols_directory, build_params): build_params_check_path = os.path.join(symbols_directory, ...) cached_build_params = ... return cached_build_params and cached_build_params == build_params) ``` logs.log_error('Unable to determine build parameters.') return + if check_symbols_cached(symbols_directory, build_params): return build_id = build_params.get('build_id')
codereview_python_data_326
super().__init__(database_manager, txn_type, CONFIG_LEDGER_ID) self.write_req_validator = write_req_validator self.constraint_serializer = ConstraintsSerializer(domain_state_serializer) - self.config = getConfig() - self._update_state_by_versions = self._get_update_state_by_versions() def _static_validation_for_rule(self, operation, identifier, req_id): try: Although this version processing can be fine for the fix, I think it would be great to eventually do it in a cleaner way using strategy pattern more explicitly: 1) Create AuthRule Handler classes for the version 1.9.1 (the only thing that they do differently is `update_state` method). 2) Add a possibility to register req handlers for the given state 3) Register AuthRule1.9.1 4) Apply a strategy in WriteRegManager's `restore_state`: - get a version for the txn - if there is a specific req handler for this version registered - call it - otherwise call a common (current) handler 5) In future: extend the versioning pattern to other or all WriteRegManager's methods super().__init__(database_manager, txn_type, CONFIG_LEDGER_ID) self.write_req_validator = write_req_validator self.constraint_serializer = ConstraintsSerializer(domain_state_serializer) def _static_validation_for_rule(self, operation, identifier, req_id): try:
codereview_python_data_330
agent_details = { "agent_name": AGENT_NAME, - "goal_state_version": str(GOAL_STATE_AGENT_VERSION), "python_version": "Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO), "crp_supported_features": [name for name, _ in get_agent_supported_features_list_for_crp().items()], "extension_supported_features": [name for name, _ in In case 'vmAgent' is not there, the default would return None. If the value is None, then 'None'.get('version') would throw too right? Why not have an if condition at the top and go like - ```python if 'vmAgent' in data: # Fetch other stuff ``` And maybe do this for all nested properties? agent_details = { "agent_name": AGENT_NAME, + "daemon_version": str(version.get_daemon_version()), "python_version": "Python: {0}.{1}.{2}".format(PY_VERSION_MAJOR, PY_VERSION_MINOR, PY_VERSION_MICRO), "crp_supported_features": [name for name, _ in get_agent_supported_features_list_for_crp().items()], "extension_supported_features": [name for name, _ in
codereview_python_data_331
nose_command_line += [self.script] self._start_subprocess(nose_command_line) - if self.__is_verbose() and is_linux(): self._tailer = FileTailer(self.stdout_file) def check(self): It should work cross-platform, not linux-only nose_command_line += [self.script] self._start_subprocess(nose_command_line) + if self.__is_verbose(): self._tailer = FileTailer(self.stdout_file) def check(self):
codereview_python_data_335
:: ANSIBLE_ROLES_PATH: - $ephemeral_directory/roles/:$project_directory/../:$HOME/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles ANSIBLE_LIBRARY: - $ephemeral_directory/modules/:$project_directory/library/:$HOME/.ansible/plugins/modules:/usr/share/ansible/plugins/modules ANSIBLE_FILTER_PLUGINS: - $ephemeral_directory/plugins/filter/:$project_directory/filter/plugins/:$HOME/.ansible/plugins/filter:/usr/share/ansible/plugins/modules Environment variables can be passed to the provisioner. Variables in this section which match the names above will be appened to the above defaults, The library -> modules and filters -> filter path changes are breaking changes or? :: ANSIBLE_ROLES_PATH: + $ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles ANSIBLE_LIBRARY: + $ephemeral_directory/modules/:$project_directory/library/:~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules ANSIBLE_FILTER_PLUGINS: + $ephemeral_directory/plugins/filter/:$project_directory/filter/plugins/:~/.ansible/plugins/filter:/usr/share/ansible/plugins/modules Environment variables can be passed to the provisioner. Variables in this section which match the names above will be appened to the above defaults,
codereview_python_data_347
logs.log_error('CSP violation: {}'.format(self.request.get('csp-report'))) @handler.get(handler.JSON) def get(self): """Handle a GET request.""" self.log_csp_violation() @handler.post(handler.JSON, handler.JSON) def post(self): """Handle a POST request.""" self.log_csp_violation() We can't do this without auth, this would just mean arbitrary log injection. logs.log_error('CSP violation: {}'.format(self.request.get('csp-report'))) @handler.get(handler.JSON) + @handler.check_user_access(need_privileged_access=False) def get(self): """Handle a GET request.""" self.log_csp_violation() @handler.post(handler.JSON, handler.JSON) + @handler.check_user_access(need_privileged_access=False) def post(self): """Handle a POST request.""" self.log_csp_violation()
codereview_python_data_349
# Testing xtc striding: Check for resolution of Issue #188 with tmpdir.as_cwd(): - with pytest.raises(FinishTimeException): MDAnalysis.analysis.helanal.helanal_trajectory( u, selection="name CA", finish=5 ) with tmpdir.as_cwd(): - with pytest.raises(FinishTimeException): MDAnalysis.analysis.helanal.helanal_trajectory( u, selection="name CA", begin=1, finish=0 ) Let's use a ValueError, more in line with standard Python usage # Testing xtc striding: Check for resolution of Issue #188 with tmpdir.as_cwd(): + with pytest.raises(ValueError): MDAnalysis.analysis.helanal.helanal_trajectory( u, selection="name CA", finish=5 ) with tmpdir.as_cwd(): + with pytest.raises(ValueError): MDAnalysis.analysis.helanal.helanal_trajectory( u, selection="name CA", begin=1, finish=0 )
codereview_python_data_353
""" self.special_operation_modifiers[name] = func """ PRIVATE """ async def _build_operation_object(self, data): interesting idea. I like this. I'm wondering if this can be combined with the add_special_payload function on the file_svc. they do the same functionality, just different applications (one adds special functionality on a download, another when an operation starts). seems like they can be consolidated. """ self.special_operation_modifiers[name] = func + async def update_config(self, data): + self.set_config(data.get('prop'), data.get('value')) + self.log.debug('Configuration update: %s set to %s' % (data.get('prop'), data.get('value'))) + """ PRIVATE """ async def _build_operation_object(self, data):
codereview_python_data_362
if replacement_is_path: path = Path(replacement) try: - replacement = path.expanduser().resolve(strict=True) except FileNotFoundError as e: raise ValueError(f"Invalid file path: {replacement} ({e})") This doesn't fly - we say `replacement` is a str, so we can't just make it a `Path` here. I think this needs to happen on-demand in the maplocal addon. if replacement_is_path: path = Path(replacement) try: + replacement = str(path.expanduser().resolve(strict=True)) except FileNotFoundError as e: raise ValueError(f"Invalid file path: {replacement} ({e})")
codereview_python_data_383
def __len__(self): if self.fold != -1: - if self.cross_valid: - print("WIP") - else: - return self.fold_start_idx[self.fold + 1] - self.fold_start_idx[self.fold] return len(self.graph_lists) def _download(self): Will we implement this? def __len__(self): if self.fold != -1: + return self.fold_start_idx[self.fold + 1] - self.fold_start_idx[self.fold] return len(self.graph_lists) def _download(self):
codereview_python_data_384
tab.openurl(url) if background is None: background = config.get('tabs', 'background-tabs') - if not background: - self.setCurrentWidget(tab) - elif background: self.tab_index_changed.emit(self.currentIndex(), self.count()) tab.show() self.new_tab.emit(tab, idx) return tab Since there is `if not background` above, this could be cleaned up like this: ``` python if background: self.tab_index_changed.emit(self.currentIndex(), self.count()) else: self.setCurrentWidget(tab) ``` tab.openurl(url) if background is None: background = config.get('tabs', 'background-tabs') + if background: self.tab_index_changed.emit(self.currentIndex(), self.count()) + else: + self.setCurrentWidget(tab) tab.show() self.new_tab.emit(tab, idx) return tab
codereview_python_data_387
avg(t.average_exec_per_sec) as avg_exec_per_sec, avg(t.fuzzing_time_percent) as fuzzing_time_percent, sum(t.new_units_added) as new_tests_added, -sum(t.new_cov_features) as new_cov_features, avg(t.crash_count*100) as regular_crash_percent, avg(t.oom_count*100) as oom_percent, avg(t.leak_count*100) as leak_percent, maybe just "features" cov is a bit redundant avg(t.average_exec_per_sec) as avg_exec_per_sec, avg(t.fuzzing_time_percent) as fuzzing_time_percent, sum(t.new_units_added) as new_tests_added, +sum(t.new_features) as new_features, avg(t.crash_count*100) as regular_crash_percent, avg(t.oom_count*100) as oom_percent, avg(t.leak_count*100) as leak_percent,
codereview_python_data_397
buckets = ab.pop('buckets', [tactic]) ab.pop('access', None) plugin_path = pathlib.PurePath(filename).parts - plugin = plugin_path[1] if 'plugins' in plugin_path else None if tactic and tactic not in filename: self.log.error('Ability=%s has wrong tactic' % id) Might be worth pulling this out to its own function since the same code is used in `load_yaml_file()` buckets = ab.pop('buckets', [tactic]) ab.pop('access', None) plugin_path = pathlib.PurePath(filename).parts + plugin = plugin_path[1] if 'plugins' in plugin_path else '' + ab.pop('plugin', plugin) if tactic and tactic not in filename: self.log.error('Ability=%s has wrong tactic' % id)
codereview_python_data_401
def commit(self): """Commit the stored inventory.""" - if InventoryIndex.inventory_index_warnings: status = IndexState.PARTIAL_SUCCESS else: status = IndexState.SUCCESS Why are you referencing the class instead of the existing object? def commit(self): """Commit the stored inventory.""" + if self.inventory_index.inventory_index_warnings: status = IndexState.PARTIAL_SUCCESS else: status = IndexState.SUCCESS
codereview_python_data_408
import torch from mmcv.ops import batched_nms -from mmdet.core import bbox2result from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead from ..builder import HEADS If `aug_test` is not supported yet, we should raise an exception. import torch from mmcv.ops import batched_nms +from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes, + multiclass_nms) from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead from ..builder import HEADS
codereview_python_data_412
""" if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): module.float() - if (isinstance(module, nn.GroupNorm) or torch.__version__ < '1.3' - or torch.__version__ == 'parrots'): module.forward = patch_forward_method(module.forward, torch.half, torch.float) for child in module.children(): The condition `torch.__version__ < '1.3'` can be removed since we only support pytorch 1.3+. """ if isinstance(module, (nn.modules.batchnorm._BatchNorm, nn.GroupNorm)): module.float() + if isinstance(module, nn.GroupNorm) or torch.__version__ == 'parrots': module.forward = patch_forward_method(module.forward, torch.half, torch.float) for child in module.children():
codereview_python_data_413
np.ndarray: The image with bboxes drawn on it. """ if kwargs is not None: - if 'score_thr' in kwargs: - kwargs.pop('score_thr') - if 'text_color' in kwargs: - kwargs.pop('text_color') - if 'bbox_color' in kwargs: - kwargs['colors'] = kwargs.pop('bbox_color') mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs) can we simply use kwargs.pop('score_thr', None) ? np.ndarray: The image with bboxes drawn on it. """ if kwargs is not None: + kwargs.pop('score_thr', None) + kwargs.pop('text_color', None) + kwargs['colors'] = kwargs.pop('bbox_color', 'green') mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)
codereview_python_data_417
TIME_TO_CONSIDER_STATS_AS_OLD = 12 # hours -def new_user_stats(): return datetime.now(timezone.utc) - db_stats.get_timestamp_for_last_user_stats_update() > timedelta(hours=TIME_TO_CONSIDER_STATS_AS_OLD) this function name doesn't exactly make it clear what is going on. Stats for a new user? Or new stats about users? TIME_TO_CONSIDER_STATS_AS_OLD = 12 # hours +def is_new_user_stats_batch(): + """ Returns True if this batch of user stats is new, False otherwise + + User stats come in as multiple rabbitmq messages. We only wish to send an email once per batch. + So, we check the database and see if the difference between the last time stats were updated + and right now is greater than 12 hours. + """ return datetime.now(timezone.utc) - db_stats.get_timestamp_for_last_user_stats_update() > timedelta(hours=TIME_TO_CONSIDER_STATS_AS_OLD)
codereview_python_data_421
with call signature ``func(r, r0, **kwargs)`` (the "Contacts API"). pbc : bool (optional) Uses periodic boundary conditions to calculate distances if set to ``True``; the - default is ``False``. kwargs : dict, optional dictionary of additional kwargs passed to `method`. Check respective functions for reasonable values. I would set the default to `True` instead, since this is probably what most users would expect the behaviour to be with call signature ``func(r, r0, **kwargs)`` (the "Contacts API"). pbc : bool (optional) Uses periodic boundary conditions to calculate distances if set to ``True``; the + default is ``True``. kwargs : dict, optional dictionary of additional kwargs passed to `method`. Check respective functions for reasonable values.
codereview_python_data_427
# # kirchhoffs_circuit_laws.py - using NetworkX for Kirchhoff's circuit laws # -# Copyright 2015 NetworkX developers. # # This file is part of NetworkX. # Should kirchoffs_circuit_laws.py go with the examples? # # kirchhoffs_circuit_laws.py - using NetworkX for Kirchhoff's circuit laws # +# Copyright 2015 JuanPi Carbajal <juanpi+dev@gmail.com> +# Copyright 2015–2016 NetworkX developers. # # This file is part of NetworkX. #
codereview_python_data_446
out.append(('pac+https://example.com/proxy.pac', 'Proxy autoconfiguration file URL')) return out - def __repr__(self): - return utils.get_repr(self, none_ok=self.none_ok, - valid_values=self.valid_values) - class SearchEngineUrl(BaseType): `valid_values` is fixed, so no need to include it. out.append(('pac+https://example.com/proxy.pac', 'Proxy autoconfiguration file URL')) return out class SearchEngineUrl(BaseType):
codereview_python_data_451
@pytest.mark.usefixtures("maybe_run_functions_eagerly") @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) -def test_random(dtype): inp = np.asanyarray( [[[1.0, 1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0, 3.0]]] ).astype(dtype) NIT: this is not a test case about randomized input :smiley: Maybe we could change its name. @pytest.mark.usefixtures("maybe_run_functions_eagerly") @pytest.mark.parametrize("dtype", [np.float16, np.float32, np.float64]) +def layer_test_esn(dtype): inp = np.asanyarray( [[[1.0, 1.0, 1.0, 1.0]], [[2.0, 2.0, 2.0, 2.0]], [[3.0, 3.0, 3.0, 3.0]]] ).astype(dtype)
End of preview. Expand in Data Studio
README.md exists but content is empty.
Downloads last month
12