id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_3291
orient='index', ) - # XXX: This suite doesn't use the data in its HistoricDataPortal; it uses a # FakeDataPortal with different mock data. def init_instance_fixtures(self): super(TestOrderAfterDelist, self).init_instance_fixtures() Is this talking specifically about the implementation, or the interface? orient='index', ) + # XXX: This suite doesn't use the data in its DataPortal; it uses a # FakeDataPortal with different mock data. def init_instance_fixtures(self): super(TestOrderAfterDelist, self).init_instance_fixtures()
codereview_python_data_3299
self.assertTrue(VK.can_handle_url("http://vk.com/video-24136539_456239830")) self.assertTrue(VK.can_handle_url("https://www.vk.com/video-34453259_456241787")) self.assertTrue(VK.can_handle_url("https://vk.com/videos-24136539?z=video-24136539_456241155%2Fpl_-24136539_-2")) # shouldn't match self.assertFalse(VK.can_handle_url("https://vk.com/")) you don't need a valid video here, but you should add new urls for the updated regex such as `/video123` self.assertTrue(VK.can_handle_url("http://vk.com/video-24136539_456239830")) self.assertTrue(VK.can_handle_url("https://www.vk.com/video-34453259_456241787")) self.assertTrue(VK.can_handle_url("https://vk.com/videos-24136539?z=video-24136539_456241155%2Fpl_-24136539_-2")) + self.assertTrue(VK.can_handle_url("https://vk.com/video34453259_456241787")) + self.assertTrue(VK.can_handle_url("https://www.vk.com/video34453259_456241787")) + self.assertTrue(VK.can_handle_url("https://vk.com/video?z=video44031131_456239067")) + self.assertTrue(VK.can_handle_url("https://www.vk.com/video?z=video44031131_456239067")) # shouldn't match self.assertFalse(VK.can_handle_url("https://vk.com/"))
codereview_python_data_3303
# First, get information on all blocks which contain this transaction response = self.connection.run( r.table('bigchain', read_mode=self.read_mode) - .get_all(value, index=index)\ .pluck('votes', 'id', {'block': ['voters']})) return list(response) no need for a backslash # First, get information on all blocks which contain this transaction response = self.connection.run( r.table('bigchain', read_mode=self.read_mode) + .get_all(value, index=index) .pluck('votes', 'id', {'block': ['voters']})) return list(response)
codereview_python_data_3306
try: inventory_dict = inventory_req['responses'][ 'GET_INVENTORY']['inventory_delta']['inventory_items'] - except:TypeError: return 0 item_count = 0 In what case would we get this TypeError? try: inventory_dict = inventory_req['responses'][ 'GET_INVENTORY']['inventory_delta']['inventory_items'] + except TypeError: return 0 item_count = 0
codereview_python_data_3308
index = np.random.RandomState(seed).permutation(index) train_idx = index[:int(train_size * num_edges)] - val_idx = index[len(index) - int(val_size * num_edges):] test_idx = index[int(train_size * num_edges):num_edges - int(val_size * num_edges)] train_mask = np.zeros(num_edges, dtype=np.bool) val_mask = np.zeros(num_edges, dtype=np.bool) For the sake of consistency, pls replace "len(index)" with "num_edges". index = np.random.RandomState(seed).permutation(index) train_idx = index[:int(train_size * num_edges)] + val_idx = index[num_edges - int(val_size * num_edges):] test_idx = index[int(train_size * num_edges):num_edges - int(val_size * num_edges)] train_mask = np.zeros(num_edges, dtype=np.bool) val_mask = np.zeros(num_edges, dtype=np.bool)
codereview_python_data_3314
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port)) elif 'address=' in opts: if not cls.debug_java_port: - cls.debug_java_port = ops.split("address=")[1] return opts extract the port and set to `debug_java_port` opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port)) elif 'address=' in opts: if not cls.debug_java_port: + cls.debug_java_port = ops.split('address=')[1] return opts
codereview_python_data_3325
else: pokemon_data = self._get_inventory_pokemon(inventory) for pokemon in pokemon_data: - if not(pokemon.get('favorite', 0) is 1 and self.config.get('dont_nickname_favorite','')): self._nickname_pokemon(pokemon) def _get_inventory_pokemon(self,inventory_dict): since `dont_nickname_favorite` is a boolean, the `get` call should default to a boolean as well (`False`) else: pokemon_data = self._get_inventory_pokemon(inventory) for pokemon in pokemon_data: + if not(pokemon.get('favorite', 0) is 1 and self.config.get('dont_nickname_favorite',False)): self._nickname_pokemon(pokemon) def _get_inventory_pokemon(self,inventory_dict):
codereview_python_data_3339
frozen_stages=-1, normalize=dict( type='BN', - eval=True, frozen=False), - with_cp=False): super(ResNet, self).__init__() if depth not in self.arch_settings: raise KeyError('invalid depth {} for resnet'.format(depth)) `eval` is a built-in keyword of python, I suggest using `eval_mode`. frozen_stages=-1, normalize=dict( type='BN', + eval_mode=True, frozen=False), + with_cp=False, + zero_init_residual=True): super(ResNet, self).__init__() if depth not in self.arch_settings: raise KeyError('invalid depth {} for resnet'.format(depth))
codereview_python_data_3348
def __init__(self, graph_name, port): self._graph_name = graph_name self._pid = os.getpid() - self.proxy = xmlrpc.client.ServerProxy("http://localhost:" + str(port) + "/") self._worker_id, self._num_workers = self.proxy.register(graph_name) if self._worker_id < 0: raise Exception('fail to get graph ' + graph_name + ' from the graph store') Please revert this. def __init__(self, graph_name, port): self._graph_name = graph_name self._pid = os.getpid() + self.proxy = xmlrpc.client.ServerProxy("http://127.0.0.1:" + str(port) + "/") self._worker_id, self._num_workers = self.proxy.register(graph_name) if self._worker_id < 0: raise Exception('fail to get graph ' + graph_name + ' from the graph store')
codereview_python_data_3351
"""Custom astroid checker for config calls.""" import sys -import os -import os.path - import yaml import astroid from pylint import interfaces, checkers from pylint.checkers import utils -from pathlib import Path OPTIONS = None Please... - Move this up to the other Python stdlib imports - Remove the unused `os` and `os.path` imports (not sure why `os` was imported in the first place - Do `import pathlib` and use `pathlib.Path` instead (I prefer importing modules and sub-modules instead of importing things from modules) """Custom astroid checker for config calls.""" import sys +import pathlib import yaml import astroid from pylint import interfaces, checkers from pylint.checkers import utils OPTIONS = None
codereview_python_data_3354
('http://localhost:1', ConnectionError), # Inputing a URL that cannot be parsed should raise an InvalidURL error ('http://fe80::5054:ff:fe5a:fc0', InvalidURL), - # Inputing a URL that cannot be parsed by urllib3 should raise a ConnectionError, as it closes pool - ('http://1234567890123456789012345678901234567890123456789012345678901234.com', ConnectionError) )) def test_errors(self, url, exception): with pytest.raises(exception): This test seems to contradict the behaviour we seem to be changing. At least it seems to indicate that we're not allowing a 3rd party exception to be raised ('http://localhost:1', ConnectionError), # Inputing a URL that cannot be parsed should raise an InvalidURL error ('http://fe80::5054:ff:fe5a:fc0', InvalidURL), + ('http://1234567890123456789012345678901234567890123456789012345678901234.com', InvalidURL) )) def test_errors(self, url, exception): with pytest.raises(exception):
codereview_python_data_3355
@pyqtSlot(bool) def _on_fullscreen_requested(self, on): if on: - self._state_before_fullscreen = self.windowState() self.showFullScreen() - else: - self.setWindowState(self._state_before_fullscreen) - log.misc.debug('on: {}, state before fullscreen: {}' - .format(on, self._state_before_fullscreen)) @cmdutils.register(instance='main-window', scope='window') @pyqtSlot() Replacing this with an unconditioned `setWindowState` has the consequence that if I go to fullscreen, enter fullscreen video, go out of fullscreen with `<F11>` (or `<mod+f>` in case of my WM) -- note that the video is still "fullscreen" inside the qutebrowser window -- and finally exit the video fullscreen, then qutebrowser goes into fullscreen again. That seems weird, so I originally called `showNormal` only when fullscreen was not going to be restored. @pyqtSlot(bool) def _on_fullscreen_requested(self, on): if on: + self.state_before_fullscreen = self.windowState() self.showFullScreen() + elif self.isFullScreen(): + self.setWindowState(self.state_before_fullscreen) + log.misc.debug('on: {}, state before fullscreen: {}'.format( + on, debug.qflags_key(Qt, self.state_before_fullscreen))) @cmdutils.register(instance='main-window', scope='window') @pyqtSlot()
codereview_python_data_3363
verify=False, normalize_cfg=None): - model = _build_model_from_cfg(config_path, checkpoint_path) input_config = { 'input_shape': input_shape, 'input_path': input_img, 'normalize_cfg': normalize_cfg } - one_img, one_meta = _preprocess_example_input(input_config) origin_forward = model.forward model.forward = partial( I think we'd better implement this function using ```generate_inputs_and_wrap_model``` verify=False, normalize_cfg=None): + model = build_model_from_cfg(config_path, checkpoint_path) input_config = { 'input_shape': input_shape, 'input_path': input_img, 'normalize_cfg': normalize_cfg } + one_img, one_meta = preprocess_example_input(input_config) origin_forward = model.forward model.forward = partial(
codereview_python_data_3364
a tuple or list, override the CLASSES defined by the dataset. Returns: - tuple[str]: Names of categories of the dataset. """ if classes is None: return cls.CLASSES We may add a conversion from list to tuple before `return` since there are some in list. a tuple or list, override the CLASSES defined by the dataset. Returns: + tuple[str] or list[str]: Names of categories of the dataset. """ if classes is None: return cls.CLASSES
codereview_python_data_3368
def Start(self): """Starts the profiler.""" - filename = '{0!s}-{1:s}.csv.gz'.format( self._FILENAME_PREFIX, self._identifier) if self._path: filename = os.path.join(self._path, filename) self._sample_file = gzip.open(filename, 'wb') - self._Write(self._FILE_HEADER) self._start_time = time.time() Why support anything else than a string here? def Start(self): """Starts the profiler.""" + filename = '{0:s}-{1:s}.csv.gz'.format( self._FILENAME_PREFIX, self._identifier) if self._path: filename = os.path.join(self._path, filename) self._sample_file = gzip.open(filename, 'wb') + self._WritesString(self._FILE_HEADER) self._start_time = time.time()
codereview_python_data_3373
@property def status_verbose_name(self): status_verbose_names = getattr(settings, 'OSCAR_ORDER_STATUS_VERBOSE_NAMES', {}) - if self.status and status_verbose_names and self.status in status_verbose_names: return status_verbose_names[self.status] return self.status Wrong setting I think @property def status_verbose_name(self): status_verbose_names = getattr(settings, 'OSCAR_ORDER_STATUS_VERBOSE_NAMES', {}) + if self.status and self.status in status_verbose_names: return status_verbose_names[self.status] return self.status
codereview_python_data_3377
# [ v[list_map[i]] for i in range( nmodes) ] )) warnings.warn( - "This structure of the `results` list will change in " "MDAnalysis version 2.0.", category=DeprecationWarning ) I think it would be best to have some kind of docstring entries to let users know what's going on. Thoughts? # [ v[list_map[i]] for i in range( nmodes) ] )) warnings.warn( + "The structure of the `results` list will change in " "MDAnalysis version 2.0.", category=DeprecationWarning )
codereview_python_data_3385
""" state = {} - status, output = shellutil.run_get_output("/sbin/ip -a -d -o link") """ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64 I don't think we use full path. I would be leery of hard coding a path because it prevents customers from overriding I would either disable this test in Travis, or modify Travis' environment to include ip. You may have to do the former. """ state = {} + status, output = shellutil.run_get_output("ip -a -d -o link") """ 1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN mode DEFAULT group default qlen 1000\ link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00 promiscuity 0 addrgenmode eui64 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000\ link/ether 00:0d:3a:30:c3:5a brd ff:ff:ff:ff:ff:ff promiscuity 0 addrgenmode eui64
codereview_python_data_3387
def __init__(self, config): self.config = config - try: - self.gmap_client = googlemaps.Client(config.gmapkey) - except: - self.gmap_client = None self.fort_timeouts = dict() self.pokemon_list = json.load( open(os.path.join('data', 'pokemon.json')) The initial should better be inside Task. def __init__(self, config): self.config = config self.fort_timeouts = dict() self.pokemon_list = json.load( open(os.path.join('data', 'pokemon.json'))
codereview_python_data_3389
code.putln("/* --- Runtime support code --- */") def initialize_main_h_code(self): - # h code can include utility code in a few cases so it needs to be insertable rootwriter = self.rootwriter for part in self.code_layout: - if part.find("utility_code") != -1: self.parts[part] = rootwriter.insertion_point() def finalize_main_c_code(self): It took me a while to recognise this as a verbose spelling of `"utility_code" in part`. I'd rather use ```suggestion if part.startswith("utility_code"): ``` However, DRY aside, I wonder if we should move the implementation parts further down in the header file, as we do in the C file. In fact, I wonder if we should allow an implementation in a header file at all There is a `proto_before_types` for a reason, and the same applies to the non-`before_types` section. Could we only allow the two `utility_code_proto` sections for now, and move them before and after the type declarations? code.putln("/* --- Runtime support code --- */") def initialize_main_h_code(self): + # h files can include only include a much smaller list of sections + # that are manually created here rootwriter = self.rootwriter for part in self.code_layout: + if part in ('h_code', 'utility_code_proto_before_types', 'type_declarations', + 'utility_code_proto', 'end'): self.parts[part] = rootwriter.insertion_point() def finalize_main_c_code(self):
codereview_python_data_3395
configStateDbName = 'config_state' attrDbName = 'attr_db' idrCacheDbName = 'idr_cache_db' -tsRevocationDbName = "timestamp_revoc_db" PluginsToLoad = [] We need to store timestamps of all state updates (not only revocation) to reduce the risk of malicious nodes sending incorrect old results. So, it's better to rename it to `state_ts_db` configStateDbName = 'config_state' attrDbName = 'attr_db' idrCacheDbName = 'idr_cache_db' +stateTsDbName = "state_ts_db" PluginsToLoad = []
codereview_python_data_3397
self.assertAllClose(compare_total, res) def testAmbiguousOrder(self): - with self.assertRaises(ValueError): with self.cached_session(use_gpu=True): self.setup() seq_loss = loss.SequenceLoss( ```suggestion with self.assertRaisesRegexp(ValueError, 'because of ambiguous order'): ``` self.assertAllClose(compare_total, res) def testAmbiguousOrder(self): + with self.assertRaisesRegexp(ValueError, 'because of ambiguous order'): with self.cached_session(use_gpu=True): self.setup() seq_loss = loss.SequenceLoss(
codereview_python_data_3404
axis = kwargs.get("axis", 0) if isinstance(other, type(self)): return self.__constructor__( - self._data_obj._binary_op( - lambda x, y: func(x, y, **kwargs), other._data_obj ) ) else: Do we still want to support adding with a `pandas.Series` when we have our own Series object? axis = kwargs.get("axis", 0) if isinstance(other, type(self)): return self.__constructor__( + self._modin_frame._binary_op( + lambda x, y: func(x, y, **kwargs), other._modin_frame ) ) else:
codereview_python_data_3405
status (str): Final inventory_status. """ self.completed_at_datetime = date_time.get_utc_now_datetime() - if self.inventory_index_warnings: - self.inventory_status = IndexState.PARTIAL_SUCCESS - else: - self.inventory_status = status def add_warning(self, session, warning): """Add a warning to the inventory. You should set the status when you are calling the complete method instead of updating the status inside the method status (str): Final inventory_status. """ self.completed_at_datetime = date_time.get_utc_now_datetime() + self.inventory_status = status def add_warning(self, session, warning): """Add a warning to the inventory.
codereview_python_data_3409
# Requires Python 2.6+ and Openssl 1.0+ # import mock import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.osutil.default as osutil -from tests.tools import AgentTestCase, open_patch, patch class TestDHCP(AgentTestCase): Why do you need `\n`? It wasn't there when the string was inline. # Requires Python 2.6+ and Openssl 1.0+ # +import os import mock import azurelinuxagent.common.dhcp as dhcp import azurelinuxagent.common.osutil.default as osutil +from tests.tools import AgentTestCase, open_patch, patch, skip_if_predicate_true class TestDHCP(AgentTestCase):
codereview_python_data_3410
self.flag[i] = 1 def _rand_another(self, idx): - """Get another random index""" pool = np.where(self.flag == self.flag[idx])[0] return np.random.choice(pool) Get another random index from the same group as the given index self.flag[i] = 1 def _rand_another(self, idx): + """Get another random index from the same group as the given index""" pool = np.where(self.flag == self.flag[idx])[0] return np.random.choice(pool)
codereview_python_data_3417
def test_save_file(self, loop, file_svc, tmp_path): filename = "test_file.txt" payload = b'These are the file contents.' - # save temporary test file loop.run_until_complete(file_svc.save_file(filename, payload, tmp_path, encrypt=False)) file_location = tmp_path / filename assert os.path.isfile(file_location) def test_create_exfil_sub_directory(self, loop, file_svc): exfil_dir_name = 'unit-testing-Rocks' This is good, I would also verify that the contents of the file match what the input payload is. def test_save_file(self, loop, file_svc, tmp_path): filename = "test_file.txt" payload = b'These are the file contents.' + # Save temporary test file loop.run_until_complete(file_svc.save_file(filename, payload, tmp_path, encrypt=False)) file_location = tmp_path / filename + # Read file contents from saved file + file_contents = open(file_location, "r") assert os.path.isfile(file_location) + assert 'These are the file contents.' == file_contents.read() def test_create_exfil_sub_directory(self, loop, file_svc): exfil_dir_name = 'unit-testing-Rocks'
codereview_python_data_3421
.. deprecated:: 1.0.0 """ return np.sqrt(3. * B / 8.) / np.pi Here you will need to provide more details for the deprecation notice. See the deprecation notice underneath `density_from_PDB` for an idea of what to write (possibly mention that it's going away as part of the scheduled removal of `density_from_PDB`)? .. deprecated:: 1.0.0 + :func:`Bfactor2RMSF` is no longer supported and will be removed in 2.0.0. + as part of the removal of the :func:`density_from_PDB` function. """ return np.sqrt(3. * B / 8.) / np.pi
codereview_python_data_3425
return node def visit_FuncDefNode(self, node): - import Nodes - if not node.doc or (isinstance(node, Nodes.DefNode) and - node.fused_py_func): return node if not self.cdef_docstrings: if isinstance(node, CFuncDefNode) and not node.py_func: You may as well just import this at the top along with the rest of the items from Node. return node def visit_FuncDefNode(self, node): + if not node.doc or (isinstance(node, DefNode) and node.fused_py_func): return node if not self.cdef_docstrings: if isinstance(node, CFuncDefNode) and not node.py_func:
codereview_python_data_3426
def back_transcribe(rna): - """Return the RNA back-transcribed sequence into DNA. If given a string, returns a new string object. How about ``Return the RNA sequence back-transcribed into DNA.`` def back_transcribe(rna): + """Return the RNA sequence back-transcribed into DNA. If given a string, returns a new string object.
codereview_python_data_3427
def p_c_class_options(s): objstruct_name = None typeobj_name = None - check_size = b'min' s.expect('[') while 1: if s.sy != 'IDENT': I'd like to have this a normal text string rather than a bytes string. Comparison to bytes might fail then parsed from Py3 code with unicode literals. One way to do that might be to use the identifier `min` instead of the string literal `"min"`. Or check the literal node type and decode at need. def p_c_class_options(s): objstruct_name = None typeobj_name = None + check_size = 'min' s.expect('[') while 1: if s.sy != 'IDENT':
codereview_python_data_3432
#print "rc: "+str(rc) def mqtt_on_message(self, mqttc, obj, msg): #msg.topic+" "+str(msg.qos)+" "+str(msg.payload) - print msg.topic pokemon = json.loads(msg.payload) if pokemon and 'encounter_id' in pokemon: new_list = [x for x in self.bot.mqtt_pokemon_list if x['encounter_id'] is pokemon['encounter_id']] if not (new_list and len(new_list) > 0): - print "[new] " + str(pokemon['encounter_id']) self.bot.mqtt_pokemon_list.append(pokemon) - else: - print "[duplicate] " + str(pokemon['encounter_id']) #def mqtt_on_publish(self, mqttc, obj, mid): #print "mid: "+str(mid) #def mqtt_on_subscribe(self, mqttc, obj, mid, granted_qos): Can you remove this print? #print "rc: "+str(rc) def mqtt_on_message(self, mqttc, obj, msg): #msg.topic+" "+str(msg.qos)+" "+str(msg.payload) pokemon = json.loads(msg.payload) if pokemon and 'encounter_id' in pokemon: new_list = [x for x in self.bot.mqtt_pokemon_list if x['encounter_id'] is pokemon['encounter_id']] if not (new_list and len(new_list) > 0): self.bot.mqtt_pokemon_list.append(pokemon) #def mqtt_on_publish(self, mqttc, obj, mid): #print "mid: "+str(mid) #def mqtt_on_subscribe(self, mqttc, obj, mid, granted_qos):
codereview_python_data_3433
def hausdorff_avg(P, Q): - r"""Calculate the average (undirected) Hausdorff distance between two paths. *P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time steps, :math:`N` atoms, and :math:`3N` coordinates (e.g., ~~symmetric~~ - omit "(undirected)" Btw, thanks for updating the docs. def hausdorff_avg(P, Q): + r"""Calculate the average Hausdorff distance between two paths. *P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
codereview_python_data_3434
anchors.contiguous(), bbox_preds, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) - mlvl_confids.append(confidences) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) - mlvl_confids = torch.cat(mlvl_confids) if self.use_sigmoid_cls: padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1) do we also need to change `mlvl_confid` -> `mlvl_confidences`> anchors.contiguous(), bbox_preds, max_shape=img_shape) mlvl_bboxes.append(bboxes) mlvl_scores.append(scores) + mlvl_confidences.append(confidences) mlvl_bboxes = torch.cat(mlvl_bboxes) if rescale: mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor) mlvl_scores = torch.cat(mlvl_scores) + mlvl_confidences = torch.cat(mlvl_confidences) if self.use_sigmoid_cls: padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1) mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)
codereview_python_data_3445
def __repr__(self): return "SIDData({0})".format(self.__dict__) - def _get_buffer(self, bars, field='price', raw=False): """ Gets the result of history for the given number of bars and field. when do we call this with `raw=False` or the default? def __repr__(self): return "SIDData({0})".format(self.__dict__) + def _get_buffer(self, bars, field='price'): """ Gets the result of history for the given number of bars and field.
codereview_python_data_3447
else: try: data = type(data)(values, dtype=old_dtype) - except Exception: data = values elif is_dict_like(data) and not isinstance( data, (pandas.Series, Series, pandas.DataFrame, DataFrame) When would an Exception get thrown here? else: try: data = type(data)(values, dtype=old_dtype) + except TypeError: data = values elif is_dict_like(data) and not isinstance( data, (pandas.Series, Series, pandas.DataFrame, DataFrame)
codereview_python_data_3449
-import bisect import numpy as np from torch.utils.data.dataset import ConcatDataset as _ConcatDataset Should this be `__getitem__()`? I guess there is no need to overwrite other methods than the `__init__()` method. import numpy as np from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
codereview_python_data_3458
return onnx_outputs -def flat(outputs): """Convert the torch forward outputs containing tuple or list to a list only containing torch.Tensor. Could change to a more self-explaining function instead of `flat`. return onnx_outputs +def convert_list(outputs): """Convert the torch forward outputs containing tuple or list to a list only containing torch.Tensor.
codereview_python_data_3460
# Change the exchange with each mapping period. We don't # currently support point in time exchange information, # so we just take the most recent by end date. - 'exchange': 'EXCHANGE-%d' % n, } for n, date in enumerate(dates) for sid in sids Out of paranoia, should we give each (or at least one of) the assets a different exchange to make sure that we're correctly deduplicating only within each asset? # Change the exchange with each mapping period. We don't # currently support point in time exchange information, # so we just take the most recent by end date. + 'exchange': 'EXCHANGE-%d-%d' % (sid, n), } for n, date in enumerate(dates) for sid in sids
codereview_python_data_3465
# whether the in_docker check should always return true OVERRIDE_IN_DOCKER = is_env_true("OVERRIDE_IN_DOCKER") -# number of times proxy will retry the request to the backend service. Set to 0 to disable retries (especially useful for debugging) -_proxy_retries = os.environ.get("PROXY_MAX_RETRIES") -PROXY_MAX_RETRIES = None if _proxy_retries is None else int(_proxy_retries) - def has_docker(): try: nit: Let's better hedge for non-`None` values like empty strings (we may see values like `PROXY_MAX_RETRIES=` getting passed in, causing the `int` cast to fail): ``` _proxy_retries = str(os.environ.get("PROXY_MAX_RETRIES") or "").strip() PROXY_MAX_RETRIES = None if not _proxy_retries else int(_proxy_retries) ``` # whether the in_docker check should always return true OVERRIDE_IN_DOCKER = is_env_true("OVERRIDE_IN_DOCKER") def has_docker(): try:
codereview_python_data_3475
self.privilege = privilege self.timeout = timeout self.repeatable = repeatable - if variations: - self.variations = [Variation(description=v['description'], command=v['command']) for v in variations] - else: - self.variations = [] if access: self.access = self.Access(access) You can follow the same format as above: ```Python self.variations = [Variation(description=v['description'], command=v['command']) for v in variations] if variations else [] ``` self.privilege = privilege self.timeout = timeout self.repeatable = repeatable + self.variations = [Variation(description=v['description'], command=v['command']) for v in variations] if variations else [] if access: self.access = self.Access(access)
codereview_python_data_3486
full = self.adaptor.get_subseq_as_string( self.primary_id, self.start + start, self.start + end ) - return full[:: step].encode("ASCII") def __bytes__(self): """Return the full sequence as bytes.""" Is this bytes or unicode? full = self.adaptor.get_subseq_as_string( self.primary_id, self.start + start, self.start + end ) + return full[::step].encode("ASCII") def __bytes__(self): """Return the full sequence as bytes."""
codereview_python_data_3487
launcher.set_sanitizer_options(target_path) cleanse_tmp_dir = self._create_temp_corpus_dir('cleanse-workdir') - artifact_prefix = constants.ARTIFACT_PREFIX_FLAG + cleanse_tmp_dir + '/' result = runner.cleanse_crash( input_path, output_path, nit: can create a helper for _get_artifact_prefix(output_dir) launcher.set_sanitizer_options(target_path) cleanse_tmp_dir = self._create_temp_corpus_dir('cleanse-workdir') + artifact_prefix = self._artifact_prefix(cleanse_tmp_dir) result = runner.cleanse_crash( input_path, output_path,
codereview_python_data_3498
If there are no project_ids enabled for bigquery an empty list will be returned. """ bigquery_projects_api = self.service.projects() request = bigquery_projects_api.list() this is super nitpicky, and I think we are probably not consistent about it, but does it make sense to make this (and the other for loop) a list comprehension? you don't have to, if you don't want to. If there are no project_ids enabled for bigquery an empty list will be returned. """ + key='projects' bigquery_projects_api = self.service.projects() request = bigquery_projects_api.list()
codereview_python_data_3500
from . import carafe_cuda, carafe_naive_cuda -class CARAFENAIVEFunction(Function): @staticmethod def forward(ctx, features, masks, kernel_size, group_size, scale_factor): Better to name it `CARAFENaive`. from . import carafe_cuda, carafe_naive_cuda +class CARAFENaiveFunction(Function): @staticmethod def forward(ctx, features, masks, kernel_size, group_size, scale_factor):
codereview_python_data_3510
from config import local_config from flask import request -from flask import Response from handlers import base_handler_flask from libs import auth from libs import handler_flask Can we just set_cookie on the return value from render_json, then return it? from config import local_config from flask import request from handlers import base_handler_flask from libs import auth from libs import handler_flask
codereview_python_data_3515
class LogLevelInt(Enum): - """Represents different logging levels by their short codes""" TRACE = 0 DEBUG = 1 ```suggestion """Represents different logging levels by their short codes.""" ``` class LogLevelInt(Enum): + """Represents different logging levels by their short codes.""" TRACE = 0 DEBUG = 1
codereview_python_data_3517
x_seg = x_size[0] * [x_size[1]] else: x_seg = [F.shape(x)[0]] - if algorithm == 'nn-descent': - return nndescent_knn_graph(x, k, x_seg, dist=dist) - else: - out = knn(x, x_seg, x, x_seg, k, algorithm=algorithm, dist=dist) - row, col = out[1], out[0] - return convert.graph((row, col)) def _knn_graph_blas(x, k, dist='euclidean'): r"""Construct a graph from a set of points according to k-nearest-neighbor (KNN). Why not wrap in `knn` as well? x_seg = x_size[0] * [x_size[1]] else: x_seg = [F.shape(x)[0]] + out = knn(k, x, x_seg, algorithm=algorithm, dist=dist) + row, col = out[1], out[0] + return convert.graph((row, col)) def _knn_graph_blas(x, k, dist='euclidean'): r"""Construct a graph from a set of points according to k-nearest-neighbor (KNN).
codereview_python_data_3518
Shows how to use the AWS SDK for Python (Boto3) with Amazon Simple Storage Service (Amazon S3) to generate a presigned URL that can perform an action for a limited -time with the generator's credentials. Also shows how to use the Requests package to make a request with the URL. """ the generator's -> your? Shows how to use the AWS SDK for Python (Boto3) with Amazon Simple Storage Service (Amazon S3) to generate a presigned URL that can perform an action for a limited +time with your credentials. Also shows how to use the Requests package to make a request with the URL. """
codereview_python_data_3519
# extensions on incarnation change, we need to maintain its state. # Setting the status as Initialize here. This would be overridden as soon as the first GoalState is processed # (once self._extension_processing_allowed() is True). - self.__gs_aggregate_status = GoalStateAggregateStatus(status=GoalStateState.Initialize, seq_no="-1", code=GoalStateAggregateStatusCodes.Success, message="Initializing new GoalState") should we save this to disk and capture in inspect disk for debugging? # extensions on incarnation change, we need to maintain its state. # Setting the status as Initialize here. This would be overridden as soon as the first GoalState is processed # (once self._extension_processing_allowed() is True). + self.__gs_aggregate_status = GoalStateAggregateStatus(status=GoalStateStatus.Initialize, seq_no="-1", code=GoalStateAggregateStatusCodes.Success, message="Initializing new GoalState")
codereview_python_data_3521
} def test_duplicate_vote_throws_critical_error(b): - class TestVoting(Voting): - @classmethod - def verify_vote_signature(cls, vote): - return True keyring = 'abc' block = {'id': 'xyz', 'block': {'voters': 'ab'}} votes = [{ its common practice to just mock this kind of behaviour } +@patch('bigchaindb.voting.Voting.verify_vote_signature', return_value=True) def test_duplicate_vote_throws_critical_error(b): keyring = 'abc' block = {'id': 'xyz', 'block': {'voters': 'ab'}} votes = [{
codereview_python_data_3526
token1_deserialized = deserialize_ethereum_address(swap_token1['id']) from_address_deserialized = deserialize_ethereum_address(swap['sender']) to_address_deserialized = deserialize_ethereum_address(swap['to']) - except DeserializationError: - log.error('Failed to deserialize address in swap') token0 = get_ethereum_token( symbol=swap_token0['symbol'], as above raise remote error. here you also forgot a `continue` which would have resulted in a BOOM token1_deserialized = deserialize_ethereum_address(swap_token1['id']) from_address_deserialized = deserialize_ethereum_address(swap['sender']) to_address_deserialized = deserialize_ethereum_address(swap['to']) + except DeserializationError as e: + msg = ( + f'Failed to deserialize addresses in trade from uniswap graph with' + f'token 0: {swap_token0["id"]}, token 1: {swap_token1["id"]}, ' + f'swap sender: {swap["sender"]}, swap receiver {swap["to"]}' + ) + log.error(msg) + raise RemoteError(msg) from e token0 = get_ethereum_token( symbol=swap_token0['symbol'],
codereview_python_data_3527
target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each - prediction. Defaults to None. - alpha (float, optional): A balance factor for Varifocal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0. Better to clarify this alpha is used to balance the negative part which is different from the convention of the normal FL. target (torch.Tensor): The learning target of the iou-aware classification score with shape (N, C), C is the number of classes. weight (torch.Tensor, optional): The weight of loss for each + prediction. Defaults to None. + alpha (float, optional): A balance factor for the negative part of + Varifocal Loss, which is different from the alpha of Focal Loss. Defaults to 0.75. gamma (float, optional): The gamma for calculating the modulating factor. Defaults to 2.0.
codereview_python_data_3536
>> reply() >> ConnectionClosed(tctx.client) ) def test_close_error(ws_testdata): Can we please assert the close code here? >> reply() >> ConnectionClosed(tctx.client) ) + # The \x03\xe8 above is code 1000 (normal closure). + # But 1006 (ABNORMAL_CLOSURE) is expected, because the connection was already closed. + assert flow.websocket.close_code == 1006 def test_close_error(ws_testdata):
codereview_python_data_3538
cls_score) # Empty proposal. - if cls_score.shape[0] == 0: break roi_labels = torch.where( Simply break here might cause different graph during training? cls_score) # Empty proposal. + if cls_score.numel() == 0: break roi_labels = torch.where(
codereview_python_data_3546
def _append_list_of_managers(self, others, axis, **kwargs): if not isinstance(others, list): others = [others] - if self._is_transposed: return ( self.transpose() ._append_list_of_managers( Do we need to check if the others are already transposed or not? def _append_list_of_managers(self, others, axis, **kwargs): if not isinstance(others, list): others = [others] if self._is_transposed: + # If others are transposed, we handle that behavior correctly in + # `copartition`, but it is not handled correctly in the case that `self` is + # transposed. return ( self.transpose() ._append_list_of_managers(
codereview_python_data_3551
where `d(v, u)` is the shortest-path distance between `v` and `u`. If 'sources' is given as an argument, the returned harmonic centrality - values are calculated as the reciprocal of the shortest path distances - from the nodes specified in 'sources' to 'u' instead of from all nodes - to 'u'. Notice that higher values indicate higher centrality. ```suggestion values are calculated as the sum of the reciprocals of the shortest path distances from the nodes specified in 'sources' to 'u' instead of from all nodes to 'u'. ``` where `d(v, u)` is the shortest-path distance between `v` and `u`. If 'sources' is given as an argument, the returned harmonic centrality + values are calculated as the sum of the reciprocals of the shortest + path distances from the nodes specified in 'sources' to 'u' instead + of from all nodes to 'u'. Notice that higher values indicate higher centrality.
codereview_python_data_3552
if not header: raise ValueError("Empty file.") if len(header) < 112: - raise ValueError("Improper header, cannot read 112 bytes from handle" % length) (version, type, topology, length, neg_length, com_length) = unpack( ">BBB25xII60xI12x", header ) ``` $ flake8 ./Bio/SeqIO/XdnaIO.py:172:30: BLK100 Black would make changes. ./Bio/SeqIO/XdnaIO.py:172:85: F821 undefined name 'length' ``` This line is currently 91 characters long, so black would break that - but we don't need the ``% length`` bit which also solves the long line. if not header: raise ValueError("Empty file.") if len(header) < 112: + raise ValueError("Improper header, cannot read 112 bytes from handle") (version, type, topology, length, neg_length, com_length) = unpack( ">BBB25xII60xI12x", header )
codereview_python_data_3557
has_fixed_positional_count, has_kw_only_args, all_args, argtuple_error_label, code): # First we count how many arguments must be passed as positional - num_required_posonly_args = num_posonly_args = 0 for i, arg in enumerate(all_args): if arg.pos_only: - num_posonly_args += 1 if not arg.default: num_required_posonly_args += 1 I think we know this already, right? There's an instance attribute for it. has_fixed_positional_count, has_kw_only_args, all_args, argtuple_error_label, code): # First we count how many arguments must be passed as positional + num_required_posonly_args = num_pos_only_args = 0 for i, arg in enumerate(all_args): if arg.pos_only: + num_pos_only_args += 1 if not arg.default: num_required_posonly_args += 1
codereview_python_data_3566
class StateTsDbStorage(): - def __init__(self, name, db_dir, db_name): logger.debug("Initializing timestamp-root_hash storage for revocation") - self._storage = KeyValueStorageLeveldbIntKeys(db_dir, db_name) self._name = name def __repr__(self): I think this needs to be moved to KeyValueStorage Interface level, so that we can easily replace leveldb by rockdsb in this class. class StateTsDbStorage(): + def __init__(self, name, storage): logger.debug("Initializing timestamp-root_hash storage for revocation") + self._storage = storage self._name = name def __repr__(self):
codereview_python_data_3571
ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), - l2_dim=512, l2_norm_scale=20), neck=None, bbox_head=dict( This param can be obtained from previous layers. ceil_mode=True, out_indices=(3, 4), out_feature_indices=(22, 34), l2_norm_scale=20), neck=None, bbox_head=dict(
codereview_python_data_3579
def parse(self, manager: "CommandManager", t: type, s: str) -> flow.Flow: try: - flows = manager.execute("view.flows.resolve '%s'" % (s)) except exceptions.CommandError as e: raise exceptions.TypeError(str(e)) from e if len(flows) != 1: This looks better than before, but we'll now likely run into issues with `'` characters in the spec. Maybe we can just use `manager.call_strings` instead? def parse(self, manager: "CommandManager", t: type, s: str) -> flow.Flow: try: + flows = manager.call_strings("view.flows.resolve", [s]) except exceptions.CommandError as e: raise exceptions.TypeError(str(e)) from e if len(flows) != 1:
codereview_python_data_3580
from space_age import SpaceAge -# test cases adapted from `x-common//canonical-data.json` @ version: 1.0.0 class SpaceAgeTest(unittest.TestCase): This should reference `problem-specifications`: ``` # Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0 ``` from space_age import SpaceAge +# Tests adapted from `problem-specifications//canonical-data.json` @ v1.0.0 class SpaceAgeTest(unittest.TestCase):
codereview_python_data_3582
from torch import nn -class Swish(nn.Module): - """Swish activation function: x * sigmoid(x).""" - - def __init__(self): - super(Swish, self).__init__() - - def forward(self, x): - return x * torch.sigmoid(x) - - class MemoryEfficientSwishImplementation(torch.autograd.Function): @staticmethod Is it possible to keep just `MemoryEfficientSwish` , set it as `Swish` and delete the naive implementation? from torch import nn class MemoryEfficientSwishImplementation(torch.autograd.Function): @staticmethod
codereview_python_data_3583
def _get_redis_ip(project): """Get the redis IP address.""" _, ip = common.execute('gcloud redis instances describe redis-instance ' - '--project={project} --region=us-central1 ' - '--format="value(host)"'.format(project=project)) return ip.strip() some way to not hardcode us-central1 def _get_redis_ip(project): """Get the redis IP address.""" + region = appengine.region(project) _, ip = common.execute('gcloud redis instances describe redis-instance ' + '--project={project} --region={region} ' + '--format="value(host)"'.format( + project=project, region=region)) return ip.strip()
codereview_python_data_3587
if cls == "NTP": if isinstance(self, NTP): return True - elif issubclass(globals()[cls], NTP): if isinstance(self, cls): return True return super(NTP, self).haslayer(cls) The best way to fix that is ```python elif not isinstance(cls, str) and issubclass(cls, NTP): ``` if cls == "NTP": if isinstance(self, NTP): return True + elif not isinstance(cls, str) and issubclass(cls, NTP): if isinstance(self, cls): return True return super(NTP, self).haslayer(cls)
codereview_python_data_3589
job.description = description job.environment_string = environment_string job.templates = templates - job.project = project blob_info = self.get_upload() if blob_info: How about setting this as a `_pre_put_hook` in data_types.Job? That will be a bit more reliable, and can prevent issues in the future when we write e.g. migration scripts. job.description = description job.environment_string = environment_string job.templates = templates blob_info = self.get_upload() if blob_info:
codereview_python_data_3591
def spiral_matrix(size): - matrix = [[0]*size for _ in range(size)] idx = 0 jdx = -1 element = 1 ```suggestion matrix = [[0]*size for row in range(size)] ``` def spiral_matrix(size): + matrix = [[0]*size for row in range(size)] idx = 0 jdx = -1 element = 1
codereview_python_data_3592
self.uncompilable_modules = {} self.blocked_modules = ['Cython', 'pyxbuild', 'pyximport.pyxbuild', 'distutils'] def find_module(self, fullname, package_path=None): if fullname in sys.modules: return None - if fullname.startswith('Cython.') or fullname.startswith('distutils.'): return None if fullname in self.blocked_modules: # prevent infinite recursion I'd rather have this depend on `blocked_modules`. self.uncompilable_modules = {} self.blocked_modules = ['Cython', 'pyxbuild', 'pyximport.pyxbuild', 'distutils'] + self.blocked_packages = ['Cython.', 'distutils.'] def find_module(self, fullname, package_path=None): if fullname in sys.modules: return None + if any([fullname.startswith(pkg) for pkg in self.blocked_packages]): return None if fullname in self.blocked_modules: # prevent infinite recursion
codereview_python_data_3594
def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): - if self.relu_extra_convs: caffe2_xavier_init(m) else: xavier_init(m, distribution='uniform') There is no logical relation between these two statements. def init_weights(self): for m in self.modules(): if isinstance(m, nn.Conv2d): + if self.caffe2_xavier_initialize: caffe2_xavier_init(m) else: xavier_init(m, distribution='uniform')
codereview_python_data_3597
rows.append(u"%s%s" % (fill, json.dumps(line, sort_keys=True))) - return str('\n'.join(rows)) def _create_mock_service_config(self): mock_data_access = mock.MagicMock() nit: join method returns a string, the str() is not needed rows.append(u"%s%s" % (fill, json.dumps(line, sort_keys=True))) + return '\n'.join(rows) def _create_mock_service_config(self): mock_data_access = mock.MagicMock()
codereview_python_data_3605
class SffTrimedRandomAccess(SffRandomAccess): - """Random access to a SFF file with sequence records trimmed.""" def get(self, offset): - """Return the Sequence records trimmed at the given offset.""" handle = self._handle handle.seek(offset) return SeqIO.SffIO._sff_read_seq_record(handle, How about: ``` """Random access to an SFF file with defined trimming applied to each sequence.""" ``` class SffTrimedRandomAccess(SffRandomAccess): + """Random access to an SFF file with defined trimming applied to each sequence.""" def get(self, offset): + """Return the SeqRecord starting at the given offset.""" handle = self._handle handle.seek(offset) return SeqIO.SffIO._sff_read_seq_record(handle,
codereview_python_data_3606
A "prefix tree" represents the prefix structure of the strings. Each node represents a prefix of some string. The root represents the empty prefix with children for the single letter prefixes which - in turn have children for each double letter prefixes starting with the single letter corresponding to the parent node, and so on. More generally the prefixes do not need to be strings. A prefix refers ```suggestion in turn have children for each double letter prefix starting with ``` A "prefix tree" represents the prefix structure of the strings. Each node represents a prefix of some string. The root represents the empty prefix with children for the single letter prefixes which + in turn have children for each double letter prefix starting with the single letter corresponding to the parent node, and so on. More generally the prefixes do not need to be strings. A prefix refers
codereview_python_data_3608
SELECT tables.TABLE_NAME 'table' from information_schema.tables as tables inner join snapshot_cycles as snap - ON snap.complete_time < DATE_SUB(NOW(), INTERVAL %s DAY) - AND tables.TABLE_NAME LIKE CONCAT('%%', snap.cycle_timestamp); """ DROP_TABLE = "DROP TABLE {0}" `complete_time`is not always populated; like when someone halts the inventory process in mid-stream. It might be better to change to `snap.start_time`. What happens if ```0```is passed into ```INTERVAL %s DAY```? Will it work for Angelo's use case of deleting all previous tables? SELECT tables.TABLE_NAME 'table' from information_schema.tables as tables inner join snapshot_cycles as snap + ON snap.start_time < DATE_SUB(NOW(), INTERVAL %s DAY) + AND tables.TABLE_NAME LIKE CONCAT('%%', snap.cycle_timestamp) + WHERE tables.TABLE_SCHEMA = %s; """ DROP_TABLE = "DROP TABLE {0}"
codereview_python_data_3609
from ..lib.transformations import rotation_matrix from ..core.groups import AtomGroup -def rotateby(angle, direction, center="geometry", pbc=None, ag=None, position=[]): ''' Rotates the trajectory by a given angle on a given axis. The axis is defined by the user, combining the direction vector and a position. This position can be the center iirc there's some weird stuff that can happen with `position=[]` as a kwarg. Like I think you can append to that list and the default drifts over time... use `=None` from ..lib.transformations import rotation_matrix from ..core.groups import AtomGroup +def rotateby(angle, direction, center="geometry", pbc=None, ag=None, position=None): ''' Rotates the trajectory by a given angle on a given axis. The axis is defined by the user, combining the direction vector and a position. This position can be the center
codereview_python_data_3613
dilation=1, style='caffe', normalize=dict(type='BN', frozen=True), - norm_eval=True, - with_cp=False), rpn_head=dict( type='RPNHead', in_channels=1024, This field can be just omitted. dilation=1, style='caffe', normalize=dict(type='BN', frozen=True), + norm_eval=True), rpn_head=dict( type='RPNHead', in_channels=1024,
codereview_python_data_3616
def handle_event(self, event, sender, level, formatted_msg, data): # Honour config settings if log level disabled - for event_level in ['info', 'warning', 'error', 'critical', 'debug']: - if event_level == level and hasattr(self.bot.config, event_level) and not getattr(self.bot.config, event_level): - self._last_event = event - return - # Prepare message string message = None if formatted_msg: Why a loop ? def handle_event(self, event, sender, level, formatted_msg, data): # Honour config settings if log level disabled + if hasattr(self.bot.config, level) and not getattr(self.bot.config, level): + self._last_event = event + return + # Prepare message string message = None if formatted_msg:
codereview_python_data_3620
message.error('Edited element vanished') ed.backup() except webelem.Error as e: ed.backup() - raise cmdexc.CommandError(str(e)) @cmdutils.register(instance='command-dispatcher', maxsplit=0, scope='window') Probably would make sense to make those two consistent - i.e. do `ed.backup()` and then `raise cmdexc.CommandError` at both places, or use `message.error` and then `ed.backup()` at both places. message.error('Edited element vanished') ed.backup() except webelem.Error as e: + message.error(str(e)) ed.backup() @cmdutils.register(instance='command-dispatcher', maxsplit=0, scope='window')
codereview_python_data_3621
_configure_logging(inventory_flags.get('loglevel')) - inventory_conf = inventory_flags.get('config_path') - if inventory_conf is None: LOGGER.error('Path to pipeline config needs to be specified.') sys.exit() nit: Can we make ```inventory_conf``` to be ```config_path``` to be consistent? So, this line would be: ```python config_path = inventory_flags.get('config_path') ``` _configure_logging(inventory_flags.get('loglevel')) + config_path = inventory_flags.get('config_path') + if config_path is None: LOGGER.error('Path to pipeline config needs to be specified.') sys.exit()
codereview_python_data_3622
-from nose.tools import assert_equal import networkx as nx from networkx.algorithms.approximation.steinertree import metric_closure from networkx.algorithms.approximation.steinertree import steiner_tree class TestSteinerTree: Perhaps you can use the `assert_edges_equal` helper function from `networkx.utils` here, since the order of the edges generated by `S.edges()` may not always be the same. import networkx as nx from networkx.algorithms.approximation.steinertree import metric_closure from networkx.algorithms.approximation.steinertree import steiner_tree +from networkx.testing.utils import assert_edges_equal class TestSteinerTree:
codereview_python_data_3628
ConditionalField( BitField("TSPCMI", 0, 1), lambda pkt: pkt.length > 6), ConditionalField( - BitField("Spare1", 0, 1), lambda pkt: pkt.length > 7), ConditionalField( - BitField("Spare2", 0, 1), lambda pkt: pkt.length > 7), ConditionalField( - BitField("Spare3", 0, 1), lambda pkt: pkt.length > 7), ConditionalField( BitField("N5GNMI", 0, 1), lambda pkt: pkt.length > 7), ConditionalField( Could you make this "SPARE1" for the sake of consistency? ConditionalField( BitField("TSPCMI", 0, 1), lambda pkt: pkt.length > 6), ConditionalField( + BitField("SPARE1", 0, 1), lambda pkt: pkt.length > 7), ConditionalField( + BitField("SPARE2", 0, 1), lambda pkt: pkt.length > 7), ConditionalField( + BitField("SPARE3", 0, 1), lambda pkt: pkt.length > 7), ConditionalField( BitField("N5GNMI", 0, 1), lambda pkt: pkt.length > 7), ConditionalField(
codereview_python_data_3642
try: result = do_corpus_pruning(context, last_execution_failed, revision) - record_cross_pollination_stats(result.cross_pollination_stats) _save_coverage_information(context, result) _process_corpus_crashes(context, result) except CorpusPruningException: nit: for consistency, rename function with _ prefix. try: result = do_corpus_pruning(context, last_execution_failed, revision) + _record_cross_pollination_stats(result.cross_pollination_stats) _save_coverage_information(context, result) _process_corpus_crashes(context, result) except CorpusPruningException:
codereview_python_data_3646
return False target = getattr(node, key) if isinstance(pattern, str): - return isinstance(target, basestring) and re.match( pattern + "$", target ) if isinstance(pattern, bool): Missed one, spotted by flake8 and me though. return False target = getattr(node, key) if isinstance(pattern, str): + return isinstance(target, str) and re.match( pattern + "$", target ) if isinstance(pattern, bool):
codereview_python_data_3650
Tensor from which to copy `ptr` : LoDTensor data pointer Destination of the copy - `cuda_stream` : Any value that can be caste to cudaStream_t CUDA stream to be used for the copy (if not provided, an internal user stream will be selected) """ ```suggestion `cuda_stream` : Any value that can be cast to cudaStream_t ``` But maybe we should call it `representing` cudaStream_t? Accessing some attributes is not exactly casting, right? Tensor from which to copy `ptr` : LoDTensor data pointer Destination of the copy + `cuda_stream` : cudaStream_t handle or any value that can be cast to cudaStream_t CUDA stream to be used for the copy (if not provided, an internal user stream will be selected) """
codereview_python_data_3651
temp_seq = "" # split the sequence into exons - exons = [seq[i:j] for i, j in zip(positions, positions[1:])] for exon in exons: - # convert exon (string) to seq in order to reverse and complement it - seq = Seq(exon).reverse_complement() # then concatenate it to the previous ones - temp_seq += str(seq) seq = Seq(temp_seq) result_multiseq.append(SeqRecord(seq, If you are working with strings, just use the ``reverse_complement()`` function instead or converting to ``Seq`` object and back again. i.e. ``from Bio.Seq import reverse_complement as _rc`` or similar. temp_seq = "" # split the sequence into exons + exons = [seq[exonstart:exonend] for exonstart, exonend in zip(positions, positions[1:])] for exon in exons: + # reverse and complement the exon + seq = _rc(exon) # then concatenate it to the previous ones + temp_seq += seq seq = Seq(temp_seq) result_multiseq.append(SeqRecord(seq,
codereview_python_data_3667
message = u"Unknown ext handler state:{0}".format(state) raise ExtensionError(message) except ExtensionUpdateError as e: - # Only setting the handler status here as the error has already been reported from the old version - ext_handler_i.set_handler_status(message=ustr(e), code=e.code) except ExtensionOperationError as e: self.handle_ext_handler_error(ext_handler_i, e, e.code) except ExtensionDownloadError as e: this works fine now, but if we ever add/change logic in handle_ext_handler_error (which currently is the central place to handle errors) we may miss this one... consider adding a flag to handle_ext_handler_error to do/not do the logging and call it from here as well. message = u"Unknown ext handler state:{0}".format(state) raise ExtensionError(message) except ExtensionUpdateError as e: + # Not reporting the error as it has already been reported from the old version + self.handle_ext_handler_error(ext_handler_i, e, e.code, report_telemetry_event=False) except ExtensionOperationError as e: self.handle_ext_handler_error(ext_handler_i, e, e.code) except ExtensionDownloadError as e:
codereview_python_data_3676
operation[0].autonomous = 0 if operation[0].autonomous else 1 self.log.debug('Toggled operation=%s autonomous to %s' % (op_id, bool(autonomous))) - async def retrieve_versions(self): - app_svc = self.get_service('app_svc') - plugins = await self.get_service('data_svc').locate('plugins') - plug_versions = {p.name: p.version for p in plugins} - return dict(core=app_svc.version, plugins=plug_versions) - """ PRIVATE """ async def _build_operation_object(self, access, data): I think you can kill this function operation[0].autonomous = 0 if operation[0].autonomous else 1 self.log.debug('Toggled operation=%s autonomous to %s' % (op_id, bool(autonomous))) """ PRIVATE """ async def _build_operation_object(self, access, data):
codereview_python_data_3677
import ssl import sys import time -import googlemaps from datetime import timedelta from getpass import getpass from pgoapi.exceptions import NotLoggedInException We don't need import the googlemaps here, right? import ssl import sys import time from datetime import timedelta from getpass import getpass from pgoapi.exceptions import NotLoggedInException
codereview_python_data_3679
"""Return a filter to be used to select entities with translations with warnings. This filter will return an entity if at least one of its plural forms - has an *active* translation with a warning. :arg Locale locale: a Locale object to get translations for [nit] I find those methods (errors and warnings) very similar, maybe we could create a base method for them? """Return a filter to be used to select entities with translations with warnings. This filter will return an entity if at least one of its plural forms + has an approved or fuzzy translation with a warning. :arg Locale locale: a Locale object to get translations for
codereview_python_data_3681
trajectory: bool, optional if ``True``, attaches a :class:`MDAnalysis.coordinates.memory.MemoryReader` allowing - coordinates to be set and written. Default is ``False`` velocities: bool, optional include velocities in the :class:`MDAnalysis.coordinates.memory.MemoryReader` ```suggestion coordinates to be set and written. ``` Let's start removing default values from docstrings? trajectory: bool, optional if ``True``, attaches a :class:`MDAnalysis.coordinates.memory.MemoryReader` allowing + coordinates to be set and written. velocities: bool, optional include velocities in the :class:`MDAnalysis.coordinates.memory.MemoryReader`
codereview_python_data_3683
super(F1Score, self).__init__(name=name) self.num_classes = num_classes if beta <= 0: - raise ValueError("beta value should be greater tha zero") else: self.beta = beta if average not in (None, 'micro', 'macro', 'weighted'): Typo: tha -> than super(F1Score, self).__init__(name=name) self.num_classes = num_classes if beta <= 0: + raise ValueError("beta value should be greater than zero") else: self.beta = beta if average not in (None, 'micro', 'macro', 'weighted'):
codereview_python_data_3688
slot_table = ( ConstructorSlot("tp_dealloc", '__dealloc__'), - EmptySlot("tp_print", ifdef="PY_VERSION_HEX <= 0x030400b4"), - EmptySlot("tp_vectorcall_offset", ifdef="PY_VERSION_HEX >= 0x030400b4"), EmptySlot("tp_getattr"), EmptySlot("tp_setattr"), Should this be 3.8 instead of 3.4? slot_table = ( ConstructorSlot("tp_dealloc", '__dealloc__'), + EmptySlot("tp_print", ifdef="PY_VERSION_HEX < 0x030800b4"), + EmptySlot("tp_vectorcall_offset", ifdef="PY_VERSION_HEX >= 0x030800b4"), EmptySlot("tp_getattr"), EmptySlot("tp_setattr"),
codereview_python_data_3694
""" @tb.must_fail(errors.EdgeQLSyntaxError, - "Unexpected token.*AT", line=6, col=24) def test_edgeql_syntax_shape_20(self): """ INSERT Foo{ bar: { @weight, # this syntax may be valid in the future - BarLink@special, } }; """ We decided that the form should always be `[IS BarLink]@special` """ @tb.must_fail(errors.EdgeQLSyntaxError, + "Unexpected token.*AT", line=6, col=29) def test_edgeql_syntax_shape_20(self): """ INSERT Foo{ bar: { @weight, # this syntax may be valid in the future + [IS BarLink]@special, } }; """
codereview_python_data_3696
class Matrix(object): - def __init__(self, matrix): pass I think `matrix` is somewhat confusing name, since it just a string of numbers divided by newlines. So, it would be more appropriate to name it as `matrix_string` or something else. What do you think? class Matrix(object): + def __init__(self, matrix_string): pass
codereview_python_data_3725
elif role == "ALL" and self.need_to_be_owner and self.sig_count > 1: return "{} signatures of any role are required and needs to be owner".format(self.sig_count) class AuthConstraintAnd(AbstractAuthConstraint): def __init__(self, auth_constraints: List[AbstractAuthConstraint]): I would add static `from_dict` method to every AuthConstraint class, so that `ConstraintCreator` can be simplifed and do not access `AUTH_CONSTRAINTS` (which is actually internals). elif role == "ALL" and self.need_to_be_owner and self.sig_count > 1: return "{} signatures of any role are required and needs to be owner".format(self.sig_count) + @staticmethod + def from_dict(as_dict): + return AuthConstraint(**as_dict) + class AuthConstraintAnd(AbstractAuthConstraint): def __init__(self, auth_constraints: List[AbstractAuthConstraint]):
codereview_python_data_3726
from indy_node.server.request_handlers.action_req_handlers.utils import generate_action_result from indy_node.server.restarter import Restarter -from indy_common.constants import POOL_RESTART from plenum.common.request import Request from plenum.common.txn_util import get_request_data Why this is done in dynamic validation and not in processing? +from indy_common.authorize.auth_actions import AuthActionAdd +from indy_common.authorize.auth_request_validator import WriteRequestValidator from indy_node.server.request_handlers.action_req_handlers.utils import generate_action_result from indy_node.server.restarter import Restarter +from indy_common.constants import POOL_RESTART, ACTION from plenum.common.request import Request from plenum.common.txn_util import get_request_data
codereview_python_data_3729
main_parser = argparse.ArgumentParser(dnf.const.PROGRAM_NAME, add_help=False) main_parser._optionals.title = _("Optional arguments") - main_parser.add_argument("--legacy", "--rpm", dest="legacy", - action="store_true", help="use legacy functionality of dnf") main_parser.add_argument("-c", "--config", dest="config_file_path", default=None, metavar='[config file]', help=_("config file location")) The option is unused. What about to remove it? main_parser = argparse.ArgumentParser(dnf.const.PROGRAM_NAME, add_help=False) main_parser._optionals.title = _("Optional arguments") main_parser.add_argument("-c", "--config", dest="config_file_path", default=None, metavar='[config file]', help=_("config file location"))
codereview_python_data_3736
def forward(self, g): g.register_message_func(lambda src, dst, edge: src['h']) - g.register_reduce_func(lambda msgs: reduce_sum(msgs)) for layer in self.layers: # apply dropout if self.dropout is not None: User should specify `g.register_reduce_func('sum')` in this case so we can optimize it using spmv. Also, please move the `register` to the `__init__` func so it won't be called multiple times as the `forward` func. def forward(self, g): g.register_message_func(lambda src, dst, edge: src['h']) + g.register_reduce_func(sum) for layer in self.layers: # apply dropout if self.dropout is not None:
codereview_python_data_3743
__all__ = [ 'HydrogenBondAnalysis' ] from .hbond_analysis import HydrogenBondAnalysis Also add to `__all__`? ```suggestion 'WaterBridgeAnalysis' ] ``` __all__ = [ 'HydrogenBondAnalysis' + 'WaterBridgeAnalysis' ] from .hbond_analysis import HydrogenBondAnalysis
codereview_python_data_3746
# Skip the current resource if it's in the excluded_resources list. excluded_resources = visitor.config.variables.get( 'excluded_resources', {}) resource_name = '{}/{}'.format(self.type(), self.key()) - if resource_name in excluded_resources: return self._visitor = visitor I'm sure I missed something, but I'm not seeing how children resources are skipped. Can you please explain how that works? # Skip the current resource if it's in the excluded_resources list. excluded_resources = visitor.config.variables.get( 'excluded_resources', {}) + cur_resource_repr = set() resource_name = '{}/{}'.format(self.type(), self.key()) + cur_resource_repr.add(resource_name) + if self.type() == 'project': + # Supports matching on projectNumber. + project_number = '{}/{}'.format(self.type(), self['projectNumber']) + cur_resource_repr.add(project_number) + if cur_resource_repr.intersection(excluded_resources): return self._visitor = visitor