id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_3747
tensor The destination node ID array. """ - return find_edges(self, edges) def get_partition_book(self): """Get the partition information. you can rename the one imported graph_services with another name? it looks like recursive call. tensor The destination node ID array. """ + return dist_find_edges(self, edges) def get_partition_book(self): """Get the partition information.
codereview_python_data_3750
file """ if "TTS" not in raw_audio_settings: - return [TextToSpeechSettings()] return [ TextToSpeechSettings(tts["Provider"], tts["Voice"], tts["Engine"]) ```suggestion text_to_speech_configs = _convert_text_to_speech_configs(raw_audio_settings) ``` I feel like it's a better naming, no? Maybe throughout the codebase. If I understand correctly, there can be multiple configs simultaneously file """ if "TTS" not in raw_audio_settings: + return AudioSettings().text_to_speech_settings return [ TextToSpeechSettings(tts["Provider"], tts["Voice"], tts["Engine"])
codereview_python_data_3752
inputs (list): list of {txid, output} Returns: - List of transactions that spend given inputs """ raise NotImplementedError Better to describe what is actually returned. In this case a tuple (block_id, transaction). And the same for `get_owned_ids` inputs (list): list of {txid, output} Returns: + Iterator of (block_ids, transaction) for transactions that + spend given inputs. """ raise NotImplementedError
codereview_python_data_3755
if member in self.member_cache and member not in db_members: db_members.append(self.member_cache[member]) continue - elif member in self.member_cache: - # member exists in member cache and already added - # to db_members. - continue - if member not in self.member_cache_policies: try: # This is the default case, e.g. 'group/foobar' m_type, name = member.split('/', 1) the if elif here is a little bit confusing. Can you clear the logic here a little bit? if member in self.member_cache and member not in db_members: db_members.append(self.member_cache[member]) continue + if (member not in self.member_cache and + member not in self.member_cache_policies): try: # This is the default case, e.g. 'group/foobar' m_type, name = member.split('/', 1)
codereview_python_data_3760
nodes = dgl.distributed.node_split(np.arange(g.number_of_nodes()), g.get_partition_book(), force_even=True) y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_hidden), th.float32, 'h', - reuse_if_exist=True) for l, layer in enumerate(self.layers): if l == len(self.layers) - 1: y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_classes), - th.float32, 'h_last', reuse_if_exist=True) sampler = NeighborSampler(g, [-1], dgl.distributed.sample_neighbors) print('|V|={}, eval batch size: {}'.format(g.number_of_nodes(), batch_size)) When will we free this tensor? nodes = dgl.distributed.node_split(np.arange(g.number_of_nodes()), g.get_partition_book(), force_even=True) y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_hidden), th.float32, 'h', + persistent=True) for l, layer in enumerate(self.layers): if l == len(self.layers) - 1: y = dgl.distributed.DistTensor(g, (g.number_of_nodes(), self.n_classes), + th.float32, 'h_last', persistent=True) sampler = NeighborSampler(g, [-1], dgl.distributed.sample_neighbors) print('|V|={}, eval batch size: {}'.format(g.number_of_nodes(), batch_size))
codereview_python_data_3761
-def primitive_triplets(nbr): pass Is it possible to give a more meaningful name for this parameter? And thanks for the PR! +def primitive_triplets(number_in_triplet): pass
codereview_python_data_3766
if tx['assignee'] == self.bigchain.me: tx.pop('assignee') tx.pop('assignment_timestamp') - # required for mongodb - tx.pop('_id', None) return tx def validate_tx(self, tx): Should this be handled by something on a `backend` level instead of here? I can help here if you want. if tx['assignee'] == self.bigchain.me: tx.pop('assignee') tx.pop('assignment_timestamp') return tx def validate_tx(self, tx):
codereview_python_data_3770
data = self.form.cleaned_data if data.get('upc'): - # Filter the queryset by upc - matches_upc = Product.objects.filter(upc__icontains=data['upc']) - queryset = queryset.filter( Q(id=matches_upc.values('id')) | Q(id=matches_upc.values('parent_id')) ) if data.get('title'): queryset = queryset.filter(title__icontains=data['title']) Instead of your changes, what do you think of just doing the following? `queryset = queryset.filter(Q(upc__icontains=data['upc']) | Q(parent__upc__icontains=data['upc']))` That way, we don't redefine the queryset, and I find it slightly more obvious what we're trying to achieve. One would have to adapt the `exists` logic as well (which I'd like to keep). data = self.form.cleaned_data if data.get('upc'): + # If there's an exact UPC match, it returns just the matched + # product. Otherwise does a broader icontains search. + qs_match = queryset.filter(Q(upc=data['upc']) | Q(parent__upc=data['upc'])) + + if qs_match.exists(): + queryset = qs_match + else: + queryset = queryset.filter(Q(upc__icontains=data['upc']) | Q(parent__upc__icontains=data['upc'])) if data.get('title'): queryset = queryset.filter(title__icontains=data['title'])
codereview_python_data_3773
def _write_pdb_header(self): """ .. versionchanged: 1.0.0 - Write CRYST1 only if :code:`u.trajectory.ts.dimensions` - is not :code:`None`. """ if self.first_frame_done == True: return Pendantic, but any chance I can convince you to add the extra docstring bits here? def _write_pdb_header(self): """ + Write PDB header. + + CRYST1 field is skipped if if :code:`u.dimensions` is :code:`None`. + .. versionchanged: 1.0.0 + Write CRYST1 only if :code:`u.dimensions` + is not :code:`None` (Issue #2679). """ + if self.first_frame_done == True: return
codereview_python_data_3774
import pytest from mmcv.utils import build_from_cfg -from mmdet.datasets.builder import PIPELINES def test_translate(): The script only tests img but miss bboxes, masks, segmentations. import pytest from mmcv.utils import build_from_cfg +from mmdet.datasets.builder import DATASETS, PIPELINES def test_translate():
codereview_python_data_3776
continue util.print_error('response', r) method = r.get('method') result = r.get('result') if result is None: continue if method == 'blockchain.scripthash.subscribe': - addr = r.get('params')[0] - scripthash = bitcoin.address_to_scripthash(addr) self.network.get_balance_for_scripthash( scripthash, self.response_queue.put) elif method == 'blockchain.scripthash.get_balance': - scripthash = r.get('params')[0] addr = self.hash2address.get(scripthash, None) if addr is None: util.print_error( As I said on IRC, I am fairly certain this will fail here now, as you no longer give the address to the network. `r.get('params')[0]` is now a scripthash continue util.print_error('response', r) method = r.get('method') + scripthash = r.get('params')[0] result = r.get('result') if result is None: continue if method == 'blockchain.scripthash.subscribe': self.network.get_balance_for_scripthash( scripthash, self.response_queue.put) elif method == 'blockchain.scripthash.get_balance': addr = self.hash2address.get(scripthash, None) if addr is None: util.print_error(
codereview_python_data_3782
def load(handle): - """Load(handle) -> MarkovModel().""" # Load the states. line = _readline_and_check_start(handle, "STATES:") states = line.split()[1:] This is written like a function signature, not human readable. How about: ``"""Parse a file handle into a MarkovModel object."""`` def load(handle): + """Parse a file handle into a MarkovModel object.""" # Load the states. line = _readline_and_check_start(handle, "STATES:") states = line.split()[1:]
codereview_python_data_3789
}) row = result.fetchone() if row: - id, userid, sid, api_key, timestamp = row - return Session(id, userid, sid, api_key, timestamp) return None @staticmethod Using sqlalchemy, `row` should also be accessible as a dictionary. It would make this line more explicit. What about `return Session(r["id"], r["userid"], ...)`? }) row = result.fetchone() if row: + return Session(row["id"], row["user_id"], row["sid"], row["api_key"], row["ts"]) return None @staticmethod
codereview_python_data_3799
@cached_method def cimported_files(self, filename): -<<<<<<< HEAD filename_root, filename_ext = os.path.splitext(filename) if filename_ext in ('.pyx', '.py') and path_exists(filename_root + '.pxd'): pxd_list = [filename_root + '.pxd'] -======= - if filename[-4:] == '.pyx' and path_exists(filename[:-4] + '.pxd'): - pxd_list = [filename[:-4] + '.pxd'] - elif filename[-3:] == '.py' and path_exists(filename[:-3] + '.pxd'): - pxd_list = [filename[:-3] + '.pxd'] ->>>>>>> 1f893a1a319dc3413c63aafe1e535ff1cff6d4d4 else: pxd_list = [] # Cimports generates all possible combinations package.module This looks look a good approach, but doesn't look to have merged right @cached_method def cimported_files(self, filename): filename_root, filename_ext = os.path.splitext(filename) if filename_ext in ('.pyx', '.py') and path_exists(filename_root + '.pxd'): pxd_list = [filename_root + '.pxd'] else: pxd_list = [] # Cimports generates all possible combinations package.module
codereview_python_data_3801
from . import NeighborSearch from . import formats from . import pkdtree -from . import nsgrid # pylint: disable=import-self Do not disable here, instead add an exception to the `package/.pylintrc` file from . import NeighborSearch from . import formats from . import pkdtree +from . import nsgrid
codereview_python_data_3803
def get_ogb_graph(name): data = DglNodePropPredDataset(name=name) return data[0][0] @VoVAllen Is this still needed? def get_ogb_graph(name): + os.symlink('/tmp/dataset/', os.path.join(os.getcwd(), 'dataset')) data = DglNodePropPredDataset(name=name) return data[0][0]
codereview_python_data_3807
class ConnectGame: def __init__(self, board): pass The solution template doesn't match its usage in `connect_test.py`. The solution template should only contain placeholders for all classes and methods referenced in the tests. `connect_test.py` does not reference `connect.ConnectGame`, but it does reference `connect.play()`, so that should be in the template. class ConnectGame: def __init__(self, board): pass + + def get_winner(self): + pass
codereview_python_data_3813
flow.response.content = flow.response.content.replace('https://', 'http://') # strip meta tag upgrade-insecure-requests in response body - csp_meta_tag_pattern = '<meta.*http-equiv=["\']Content-Security-Policy[\'"].*upgrade-insecure-requests.*?>' - flow.response.content = re.sub(csp_meta_tag_pattern, '', flow.response.content, flags=re.IGNORECASE) # strip links in 'Location' header if flow.response.headers.get('Location', '').startswith('https://'): This will not work on Python 3 (to which we are transitioning) because `.content` is bytes, not a str. Can you make the pattern a bytes object as well (like so: `b"pattern"`)? flow.response.content = flow.response.content.replace('https://', 'http://') # strip meta tag upgrade-insecure-requests in response body + csp_meta_tag_pattern = b'<meta.*http-equiv=["\']Content-Security-Policy[\'"].*upgrade-insecure-requests.*?>' + flow.response.content = re.sub(csp_meta_tag_pattern, b'', flow.response.content, flags=re.IGNORECASE) # strip links in 'Location' header if flow.response.headers.get('Location', '').startswith('https://'):
codereview_python_data_3814
self.parser = None if self.element is not None: # No more XML data, but there is still some unfinished business - raise CorruptedXMLError("Incomplete XML file?") # Send out the remaining records yield from records You don't know if it's a file. self.parser = None if self.element is not None: # No more XML data, but there is still some unfinished business + raise CorruptedXMLError("Premature end of data") # Send out the remaining records yield from records
codereview_python_data_3815
FAKE_ORG_IAM_POLICY_MAP = [{ - 'org_id': 660570133860, 'iam_policy': { 'bindings': [ {'role': 'roles/billing.creator', 'members': [ question: would it be possible not to include an actual org id or does that break the test? FAKE_ORG_IAM_POLICY_MAP = [{ + 'org_id': 666666, 'iam_policy': { 'bindings': [ {'role': 'roles/billing.creator', 'members': [
codereview_python_data_3818
from scapy.arch.consts import LOOPBACK_NAME from scapy.config import conf,ConfClass -from scapy.error import warning, Scapy_Exception, log_runtime -from scapy.data import * -from scapy.arch import pcapdnet -from scapy.arch.pcapdnet import * def sndrcv(pks, pkt, timeout = 2, inter = 0, verbose=None, chainCC=0, retry=0, multi=0): if not isinstance(pkt, Gen): Some of those are relevent, some using "*" are not from scapy.arch.consts import LOOPBACK_NAME from scapy.config import conf,ConfClass +from scapy.base_classes import Gen, SetGen +from scapy import plist as plist +from scapy.utils import PcapReader +from scapy.data import MTU, ETH_P_ARP def sndrcv(pks, pkt, timeout = 2, inter = 0, verbose=None, chainCC=0, retry=0, multi=0): if not isinstance(pkt, Gen):
codereview_python_data_3820
self.evaluate(tf.compat.v1.global_variables_initializer()) with tf.control_dependencies([wn_output]): wn_removed_layer = wn_layer.remove() - wn_removed_output = wn_removed_layer(sample_data) if wn_layer.is_rnn: kernel = wn_layer.layer.cell.recurrent_kernel Why remove this check? self.evaluate(tf.compat.v1.global_variables_initializer()) with tf.control_dependencies([wn_output]): wn_removed_layer = wn_layer.remove() if wn_layer.is_rnn: kernel = wn_layer.layer.cell.recurrent_kernel
codereview_python_data_3824
class _LGBMModel: def __init__(self): if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)): - raise LightGBMError('Dask, Pandas and Scikit-learn are required for this module') def _fit(self, model_factory, X, y=None, sample_weight=None, group=None, client=None, **kwargs): """Docstring is inherited from the LGBMModel.""" ```suggestion raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask') ``` Instead of "this module", could you use the specific name? I think that makes the log message a little more useful standalone. It can be helpful for cases where people don't have direct access to the stack trace, which is required to understand what "this module" refers to. For example, user code or other frameworks might write things like this ```python try: dask_reg = DaskLGBMClassifier() except LightGBMError as err: log.fatal(err) raise SomeOtherException("LightGBM training failed") ``` I also think packages should be referenced by their exact package names, not capitalized names. class _LGBMModel: def __init__(self): if not all((DASK_INSTALLED, PANDAS_INSTALLED, SKLEARN_INSTALLED)): + raise LightGBMError('dask, pandas and scikit-learn are required for lightgbm.dask') def _fit(self, model_factory, X, y=None, sample_weight=None, group=None, client=None, **kwargs): """Docstring is inherited from the LGBMModel."""
codereview_python_data_3825
elif n == 1: ret_new[f'{q}_alt'] = items[n] else: - ret_new[f'{q}_alt_{n}'] = items[n] return ret_new def open(self): Without a third `height=1080` entry in `test_parse_manifest_with_duplicated_resolutions`, the else-block is not covered by the tests. elif n == 1: ret_new[f'{q}_alt'] = items[n] else: + ret_new[f'{q}_alt{n}'] = items[n] return ret_new def open(self):
codereview_python_data_3826
CONTEXT_SIZE_LIMIT = 131072 JSON_LD_LIMIT = CONTEXT_SIZE_LIMIT -enableRichSchemas = False Suggest changing `enableRichSchemas` to `ENABLE_RICH_SCHEMAS` because it's some kind of constants. But in fact, looks like we don't have rules for config naming. So it's just my point of view and not necessary to change. CONTEXT_SIZE_LIMIT = 131072 JSON_LD_LIMIT = CONTEXT_SIZE_LIMIT +ENABLE_RICH_SCHEMAS = False
codereview_python_data_3828
group : array-like or None, optional (default=None) Group data of training data. eval_set : list or None, optional (default=None) - A list of (X, y) tuple pairs to use as a validation set. eval_names : list of strings or None, optional (default=None) Names of eval_set. eval_sample_weight : list of arrays or None, optional (default=None) LightGBM supports multiple validation sets, so please leave `sets`. group : array-like or None, optional (default=None) Group data of training data. eval_set : list or None, optional (default=None) + A list of (X, y) tuple pairs to use as validation sets. eval_names : list of strings or None, optional (default=None) Names of eval_set. eval_sample_weight : list of arrays or None, optional (default=None)
codereview_python_data_3829
def define_graph(self): self.jpegs, self.labels = self.input() images = self.decode(self.jpegs) - outputs = self.cmnp([images, images], - crop_pos_x = 0.5, - crop_pos_y = 0.5) outputs[1] = self.affine(outputs[1]) return [self.labels] + outputs I think we can remove and use the default. def define_graph(self): self.jpegs, self.labels = self.input() images = self.decode(self.jpegs) + outputs = self.cmnp([images, images]) outputs[1] = self.affine(outputs[1]) return [self.labels] + outputs
codereview_python_data_3832
# matches the common endpoints like # - '<bucket_name>.s3.<region>.*' - localstack_pattern = re.compile(r'^(.+)\.s3[.\-][a-z]{2}-[a-z]+-[0-9]{1,}.*') # matches the common endpoints like # - '<bucket_name>.s3.<region>.amazonaws.com' Let's use the same `S3_HOSTNAME_PATTERN` as above here. # matches the common endpoints like # - '<bucket_name>.s3.<region>.*' + localstack_pattern = re.compile(S3_HOSTNAME_PATTERN) # matches the common endpoints like # - '<bucket_name>.s3.<region>.amazonaws.com'
codereview_python_data_3844
How the component should size itself. - This is a high-level setting for maintaining width and height - of the component. To gain more fine grained control over - sizing, use ``width_policy``, ``height_policy`` and - ``aspect_ratio`` instead (those take precedence over - ``sizing_mode``). - * "fixed" : Component is not responsive. It will retain its original width and height regardless of any subsequent browser window I guess it is worth exposing the bokeh semantics even if users should rarely need to set this explicitly (in which case I would expect it to override everything else) How the component should size itself. * "fixed" : Component is not responsive. It will retain its original width and height regardless of any subsequent browser window
codereview_python_data_3847
HydrogenBondLifetimes ~~~~~~~~~~~~~~~~~~~~~ -The class Analyzing hydrogen bond lifetimes (HBL) -:class:`HydrogenBondLifetimes` has been removed. -Please use the newer module -:mod:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis`. WaterOrientationalRelaxation Instead of saying that it has been removed just say that there are other ways to analyze water dynamics is the ...lifetimes() method. Add a See Also section for the hbond_analysis module. HydrogenBondLifetimes ~~~~~~~~~~~~~~~~~~~~~ +To analyse hydrogen bond lifetime, use +:mod:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis` and +its method `.lifetime` to find the hydrogen bonds and their lifetime. +See Also + -------- + :mod:`MDAnalysis.analysis.hydrogenbonds.hbond_analysis` WaterOrientationalRelaxation
codereview_python_data_3851
""" Reads the SHA file and returns the string stripped of any whitespace and extra characters """ with open(filepath, "r") as f: - sha = f.read().replace('\n', '') # Get rid of new line characters - sha = sha.lstrip().rstrip() # Get rid of any leading or trailing whitespace - sha = sha.split(' ')[0] # Get rid of filenames if present return sha I think these three lines can be replaced with: sha = f.read().split(" ", 1)[0] no? """ Reads the SHA file and returns the string stripped of any whitespace and extra characters """ with open(filepath, "r") as f: + sha = f.read().lstrip().split(" ", 1)[0] return sha
codereview_python_data_3857
network_interfaces.append(ins.create_network_interfaces()) if not network_interfaces: - error_message = 'No VM network interfaces found. Exiting.' - LOGGER.warn(error_message) - raise util_errors.NoDataError(error_message) return network_interfaces nit: 'Exiting' can be a bit misleading as this isn't doing a fatal log or os.exit here. The exception in a normal situation is handled and ignored in the 'run' function so the binary didn't 'Exit'. network_interfaces.append(ins.create_network_interfaces()) if not network_interfaces: + LOGGER.warn('No VM network interfaces found.') + return [] return network_interfaces
codereview_python_data_3858
ir_set, xctx.rel, aspect='serialized', wrapper=wrapper, ctx=xctx) - if wrapper: - ctx.rel = stmt = wrapper - return new_stmt_set_rvar(ir_set, stmt, ctx=ctx) This seems fishy. `wrapper` may come from a `ctx.subrel()` so we're doing some weird inversion here. Why is this needed? ir_set, xctx.rel, aspect='serialized', wrapper=wrapper, ctx=xctx) return new_stmt_set_rvar(ir_set, stmt, ctx=ctx)
codereview_python_data_3860
# Map from calendar name to default domain for that calendar. -_DEFAULT_DOMAINS = {d.calendar_name: d for d in domain.BUILT_IN_DOMAINS} If we have a list of built-in domains defined in the domain module, then this can probably turn into something like: ```python _DEFAULT_DOMAINS = {d.calendar_name: d for d in domain.BUILT_IN_DOMAINS} ``` (We should make `calendar_name` public if we do ^). # Map from calendar name to default domain for that calendar. +_DEFAULT_DOMAINS = { + 'NYSE': domain.US_EQUITIES, + 'TSX': domain.CA_EQUITIES, + 'LSE': domain.GB_EQUITIES, +}
codereview_python_data_3876
def __str__(self): return ( - "Unexpected inputs types in {}. " "Inputs to Pipeline expressions must be Filters, Factors, " "Classifiers, or BoundColumns.\n" "Got the following type(s) instead: {}".format( (my) typo here: "types" shouldn't be plural. def __str__(self): return ( + "Unexpected input types in {}. " "Inputs to Pipeline expressions must be Filters, Factors, " "Classifiers, or BoundColumns.\n" "Got the following type(s) instead: {}".format(
codereview_python_data_3877
from zipline.utils.calendars.exchange_calendar_lse import LSEExchangeCalendar from zipline.utils.calendars.exchange_calendar_tsx import TSXExchangeCalendar -_static_calendars = {} - NYSE_CALENDAR_EXCHANGE_NAMES = frozenset([ "NYSE", Is this still used? from zipline.utils.calendars.exchange_calendar_lse import LSEExchangeCalendar from zipline.utils.calendars.exchange_calendar_tsx import TSXExchangeCalendar NYSE_CALENDAR_EXCHANGE_NAMES = frozenset([ "NYSE",
codereview_python_data_3878
def get_java_opts(port): opts = config.LAMBDA_JAVA_OPTS if opts.find('_debug_port_'): - java_opts = opts.replace('_debug_port_', ('address=%s' % port)) return java_opts return opts Sorry for being so picky on this PR, but can we change this such that `_debug_port_` only represents the actual port number? I.e., the user would specify `LAMBDA_JAVA_OPTS=...,address=_debug_port_` def get_java_opts(port): opts = config.LAMBDA_JAVA_OPTS if opts.find('_debug_port_'): + java_opts = opts.replace('_debug_port_', ('%s' % port)) return java_opts return opts
codereview_python_data_3881
response = self.client.post(url + '/tagSeries', {'path': 'test.a;hello=tiger;blah=blah'}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') - if sys.version_info[0] >= 3: - self.assertEqual(response.content, - json.dumps(expected, indent=2, sort_keys=True).encode('utf-8')) - else: - self.assertEqual(response.content, json.dumps(expected, indent=2, sort_keys=True)) ## list tags Instead of having these through the whole file, can we add a helper function to use throughout the file? ``` def json_dumps(obj, *args, **kwargs): if sys.version_info[0] >= 3: return json.dumps(obj, *args, **kwargs).encode('utf-8') return json.dumps(obj, *args, **kwargs) ``` response = self.client.post(url + '/tagSeries', {'path': 'test.a;hello=tiger;blah=blah'}) self.assertEqual(response.status_code, 200) self.assertEqual(response['Content-Type'], 'application/json') + self.assertEqual(response.content, json_bytes(expected, indent=2, sort_keys=True)) ## list tags
codereview_python_data_3886
INSERT OR IGNORE INTO asset_types(type, seq) VALUES ('\', 28); /* FANTOM TOKEN */ INSERT OR IGNORE INTO asset_types(type, seq) VALUES (']', 29); -/* ARBITRIUM TOKEN */ INSERT OR IGNORE INTO asset_types(type, seq) VALUES ('^', 30); /* OPTIMISM TOKEN */ INSERT OR IGNORE INTO asset_types(type, seq) VALUES ('_', 31); Isn't it ARBITRUM? INSERT OR IGNORE INTO asset_types(type, seq) VALUES ('\', 28); /* FANTOM TOKEN */ INSERT OR IGNORE INTO asset_types(type, seq) VALUES (']', 29); +/* ARBITRUM TOKEN */ INSERT OR IGNORE INTO asset_types(type, seq) VALUES ('^', 30); /* OPTIMISM TOKEN */ INSERT OR IGNORE INTO asset_types(type, seq) VALUES ('_', 31);
codereview_python_data_3888
else: raise NoDataError("Provide at least a position, velocity" " or force group in the h5md file.") - except IndexError: raise IOError from None self._frame = frame Let's make this 2.0 and 3.0 compatible: ```suggestion except (ValueError, IndexError): ``` else: raise NoDataError("Provide at least a position, velocity" " or force group in the h5md file.") + except (ValueError, IndexError): raise IOError from None self._frame = frame
codereview_python_data_3889
gym_latitude=fort.get('latitude'), gym_longitude=fort.get('longitude')) response_gym_details = self.api.call() - gym_details = response_gym_details.get('responses', {}).get('GET_GYM_DETAILS', None) - if gym_details is not None: - fort['gym_details'] = gym_details user_data_cells = "data/cells-%s.json" % (self.config.username) with open(user_data_cells, 'w') as outfile: you need need one line to fix this bug: `fort['gym_details'] = response_gym_details.get('responses', {}).get('GET_GYM_DETAILS', None)` gym_latitude=fort.get('latitude'), gym_longitude=fort.get('longitude')) response_gym_details = self.api.call() + fort['gym_details'] = response_gym_details.get('responses', {}).get('GET_GYM_DETAILS', None) user_data_cells = "data/cells-%s.json" % (self.config.username) with open(user_data_cells, 'w') as outfile:
codereview_python_data_3908
training. Defaults to 10. metrics_format: Custom format for how metrics are formatted. See https://github.com/tqdm/tqdm#parameters for more detail. - leave_epoch_progress (bool): True to leave epoch progress bars. - leave_overall_progress (bool): True to leave overall progress bar. - show_epoch_progress (bool): False to hide epoch progress bars. - show_overall_progress (bool): False to hide overall progress bar. """ def __init__(self, Any reason for keeping `bool` type info (in the parenthesis) for `leave_*` and `show_*`, while the others (`string format`, `int`, `string`) were removed? training. Defaults to 10. metrics_format: Custom format for how metrics are formatted. See https://github.com/tqdm/tqdm#parameters for more detail. + leave_epoch_progress: True to leave epoch progress bars. + leave_overall_progress: True to leave overall progress bar. + show_epoch_progress: False to hide epoch progress bars. + show_overall_progress: False to hide overall progress bar. """ def __init__(self,
codereview_python_data_3916
conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, - rfp_inplanes=None, - sac=None, plugins=None): super(BasicBlock, self).__init__() assert dcn is None, 'Not implemented yet.' - assert rfp_inplanes is None, 'Not implemented yet.' - assert sac is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1) If we make a new backbone class, we don't need to support `BasicBlock` conv_cfg=None, norm_cfg=dict(type='BN'), dcn=None, plugins=None): super(BasicBlock, self).__init__() assert dcn is None, 'Not implemented yet.' assert plugins is None, 'Not implemented yet.' self.norm1_name, norm1 = build_norm_layer(norm_cfg, planes, postfix=1)
codereview_python_data_3931
format = 'H5MD' multiframe = True _unit_translation_dict = { 'time': { 'ps': 'ps', Just passing by (strangely enough was thinking of #2698 and saw this pop up), going by line 622, wouldn't a no-dimensions case set `ts.dimensions` to `None` (i.e. this should cause a `TypeError`)? format = 'H5MD' multiframe = True + #: These variables are not written from :attr:`Timestep.data` + #: dictionary to the observables group in the H5MD file + data_blacklist = ['step', 'time', 'dt'] _unit_translation_dict = { 'time': { 'ps': 'ps',
codereview_python_data_3933
import typing from mitmproxy.contentviews import base -from mitmproxy.contentviews.json import parse_json - - -PARSE_ERROR = object() def format_graphql(data): This should be imported and not redefined here, or am I missing something? import typing from mitmproxy.contentviews import base +from mitmproxy.contentviews.json import parse_json, PARSE_ERROR def format_graphql(data):
codereview_python_data_3936
`y_pred`, and returns the loss. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then - be the sum of all individual losses. loss_weights: (Optional) a list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then ```suggestion be the sum of all individual losses, each weighted by `loss_weights`. ``` `y_pred`, and returns the loss. If the model has multiple outputs, you can use a different loss on each output by passing a dictionary or a list of losses. The loss value that will be minimized by the model will then + be the sum of all individual losses, each weighted by `loss_weights`. loss_weights: (Optional) a list or dictionary specifying scalar coefficients (Python floats) to weight the loss contributions of different model outputs. The loss value that will be minimized by the model will then
codereview_python_data_3941
import tempfile import unittest -import metrics import mock import parameterized import pyfakefs.fake_filesystem_unittest as fake_fs_unittest nit: move to the import group after this one, and do `from metrics import logs` instead of importing just `metrics`. import tempfile import unittest import mock import parameterized import pyfakefs.fake_filesystem_unittest as fake_fs_unittest
codereview_python_data_3947
if isinstance(item, list) and item: # check for empty list # hack to make lists into numpy arrays # important for boolean slicing - # TODO: what about tuples? item = np.array(item) return self.__class__(self._ix[item], self._u) WRT tuples, I've tried to make slicing a Group identical to slicing a numpy array. Slicing a np array with a tuple seems to do some sort of weird n-dimensional slice, so `a[(1, 2)] == a[1][2]`. Happy to change this over if it's a problem though if isinstance(item, list) and item: # check for empty list # hack to make lists into numpy arrays # important for boolean slicing item = np.array(item) return self.__class__(self._ix[item], self._u)
codereview_python_data_3962
import numpy as np import warnings from enum import Enum, unique def _iterator_deprecation_warning(): warnings.warn("Please set `reader_name` and don't set last_batch_padded and size manually " + - "whenever possible. This may lead, in some situations, to miss some " + - "samples or return duplicated ones. Check the Sharding section of the " "documentation for more details.", Warning, stacklevel=2) lead to _something_ ```suggestion "whenever possible. This may lead, in some situations, to missing some " + "samples or returning duplicated ones. Check the Sharding section of the " ``` import numpy as np import warnings from enum import Enum, unique +from collections import Iterable def _iterator_deprecation_warning(): warnings.warn("Please set `reader_name` and don't set last_batch_padded and size manually " + + "whenever possible. This may lead, in some situations, to missing some " + + "samples or returning duplicated ones. Check the Sharding section of the " "documentation for more details.", Warning, stacklevel=2)
codereview_python_data_3966
<h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> - <p>Details: cache-sea4420-SEA 1645546101 2168331721</p> <hr> <p>Varnish cache server</p> </body> Is this a copy and paste error? (Couchbase vs. ArangoDB?) <h1>Error 503 Backend is unhealthy</h1> <p>Backend is unhealthy</p> <h3>Guru Mediation:</h3> + <p>Details: cache-sea4452-SEA 1645546101 2777032446</p> <hr> <p>Varnish cache server</p> </body>
codereview_python_data_3969
x_range=x_range, y_range=y_range) dfdata = PandasInterface.as_dframe(data) - # Suppress numpy warning emitted by datashader with warnings.catch_warnings(): warnings.filterwarnings( action='ignore', message='casting datetime64', Is there a corresponding issue/PR to fix this warning at the datashader level? Would be good to have a link to point to so this can be removed once fixed in datashader. x_range=x_range, y_range=y_range) dfdata = PandasInterface.as_dframe(data) + # Suppress numpy warning emitted by dask: + # https://github.com/dask/dask/issues/8439 with warnings.catch_warnings(): warnings.filterwarnings( action='ignore', message='casting datetime64',
codereview_python_data_3970
-from .affine_grid_generator import affine_grid from .context_block import ContextBlock from .dcn import (DeformConv, DeformConvPack, DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformConv, ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack, deform_conv, deform_roi_pooling, modulated_deform_conv) -from .grid_sampler import grid_sample from .masked_conv import MaskedConv2d from .nms import nms, soft_nms from .roi_align import RoIAlign, roi_align `affine_grid` and `grid_sample` are currently unused. We may remove it from `ops/__init__.py` to speedup the loading of mmdet. from .context_block import ContextBlock from .dcn import (DeformConv, DeformConvPack, DeformRoIPooling, DeformRoIPoolingPack, ModulatedDeformConv, ModulatedDeformConvPack, ModulatedDeformRoIPoolingPack, deform_conv, deform_roi_pooling, modulated_deform_conv) from .masked_conv import MaskedConv2d from .nms import nms, soft_nms from .roi_align import RoIAlign, roi_align
codereview_python_data_3971
try: atom = mol.GetAtomWithIdx(0) - except: top = Topology(n_atoms=0, n_res=0, n_seg=0, attrs=None, atom_resindex=None, As the auto-linter suggests, you may want to be more specific here so that you don't accidentally intercept other important exceptions. Looking at the matching issue, you may be able to use `except RuntimeError` to be more specific. try: atom = mol.GetAtomWithIdx(0) + except RuntimeError: top = Topology(n_atoms=0, n_res=0, n_seg=0, attrs=None, atom_resindex=None,
codereview_python_data_3977
try: dict['resolution'] = float(r) except: -<<<<<<< HEAD # print('nonstandard resolution %r' % r) dict['resolution'] = None -======= - #print('nonstandard resolution %r' % r) - dict['resolution']=None - elif key=="CRYST1": # get the symmetry & the cell parameters dict['cell'] = (float(hh[6:15]), float(hh[15:24]), It looks like you committed a merge without resolving it - you'll need to clean up this in order for the Python code to work. try: dict['resolution'] = float(r) except: # print('nonstandard resolution %r' % r) dict['resolution'] = None + elif key == "CRYST1": # get the symmetry & the cell parameters dict['cell'] = (float(hh[6:15]), float(hh[15:24]),
codereview_python_data_3981
if file: file.close() - def new_http_session(self): - self.http.close() - @property def version(self): return __version__ if needed it should be in `src/streamlink/plugins/orf_tvthek.py` if file: file.close() @property def version(self): return __version__
codereview_python_data_3982
except AttributeError: stdlib_dir = os.path.dirname(shutil.__file__) + os.sep module_path = getattr(module, '__file__', stdlib_dir) # no __file__? => builtin stdlib module - if (module_path # GraalPython seems to return None for some unknown reason - and module_path.startswith(stdlib_dir)): # stdlib module version = sys.version.partition(' ')[0] elif '.' in name: ```suggestion # GraalPython seems to return None for some unknown reason if module_path and module_path.startswith(stdlib_dir): ``` except AttributeError: stdlib_dir = os.path.dirname(shutil.__file__) + os.sep module_path = getattr(module, '__file__', stdlib_dir) # no __file__? => builtin stdlib module + # GraalPython seems to return None for some unknown reason + if module_path and module_path.startswith(stdlib_dir): # stdlib module version = sys.version.partition(' ')[0] elif '.' in name:
codereview_python_data_3993
regression_testcase_url = os.path.join( corpus.get_regressions_corpus_gcs_url(), os.path.basename(testcase_file_path)) - storage.copy_file_to(testcase_file_path, regression_testcase_url) - logs.log('Successfully stored testcase for regression testing: ' + - regression_testcase_url) def find_fixed_range(testcase_id, job_type): nit: move this under `if storage.copy_file_to(...):` regression_testcase_url = os.path.join( corpus.get_regressions_corpus_gcs_url(), os.path.basename(testcase_file_path)) + + if storage.copy_file_to(testcase_file_path, regression_testcase_url): + logs.log('Successfully stored testcase for regression testing: ' + + regression_testcase_url) + else: + logs.log_error('Failed to store testcase for regression testing: ' + + regression_testcase_url) def find_fixed_range(testcase_id, job_type):
codereview_python_data_4003
else: self._doc.setPlainText(self._opt.text) else: - self._doc.setHtml('{}'.format(html.escape(self._opt.text))) def _draw_focus_rect(self): """Draw the focus rectangle of an ItemViewItem.""" As you only have `{}` as the format string (without anything else in it), this is the same as doing `self._doc.setHtml(html.escape(self._opt.text))` else: self._doc.setPlainText(self._opt.text) else: + self._doc.setHtml(html.escape(self._opt.text)) def _draw_focus_rect(self): """Draw the focus rectangle of an ItemViewItem."""
codereview_python_data_4010
if self._multi_etype: assert isinstance(val, dict), \ 'Current HeteroEdgeDataView has multiple edge types, ' \ - 'please passing the edge type and the corresponding data through a dict.' for (etype, data) in val.items(): etid = self._graph.get_etype_id(etype) self._graph._set_e_repr(etid, self._edges, {key : data}) else: self._graph._set_e_repr(self._etid, self._edges, {key : val}) def __delitem__(self, key): I think we also need to check if val is a dictionary here and raise an error if so. if self._multi_etype: assert isinstance(val, dict), \ 'Current HeteroEdgeDataView has multiple edge types, ' \ + 'please pass the edge type and the corresponding data through a dict.' for (etype, data) in val.items(): etid = self._graph.get_etype_id(etype) self._graph._set_e_repr(etid, self._edges, {key : data}) else: + assert isinstance(val, dict) is False, \ + 'The HeteroEdgeDataView has only one edge type. ' \ + 'please pass a tensor directly' self._graph._set_e_repr(self._etid, self._edges, {key : val}) def __delitem__(self, key):
codereview_python_data_4013
answer += ", location_operator=%r" % self.location_operator if self.id and self.id != "<unknown id>": answer += ", id=%r" % self.id if self.ref: answer += ", ref=%r" % self.ref if self.ref_db: answer += ", ref_db=%r" % self.ref_db - if self.qualifiers: - answer += ", qualifiers=..." answer += ")" return answer Move before ref to match __init__ order answer += ", location_operator=%r" % self.location_operator if self.id and self.id != "<unknown id>": answer += ", id=%r" % self.id + if self.qualifiers: + answer += ", qualifiers=..." if self.ref: answer += ", ref=%r" % self.ref if self.ref_db: answer += ", ref_db=%r" % self.ref_db answer += ")" return answer
codereview_python_data_4014
# If string is passed in, execute and get reference to # functions. self.algoscript = kwargs.pop('script', None) - self._initialize = None - self._analyze = kwargs.pop('analyze', None) self._before_trading_start = None self._analyze = None From the two cases below, it looks like we should assign to `self._analyze` from `kwargs` only in the second case, where we assign all our functions from `kwargs`. (In the first case, we assign it from `self.namespace`.) What do you think about moving this assignment to the body of the `elif` below? Also, did you find this worked for you, since we re-assign `self._analyze = None` two lines below this? # If string is passed in, execute and get reference to # functions. self.algoscript = kwargs.pop('script', None) self._initialize = None self._before_trading_start = None self._analyze = None
codereview_python_data_4017
def simple_cycles(G): """Find simple cycles (elementary circuits) of a directed graph. - An simple cycle, or elementary circuit, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other. Typo here: An -> A. Also should italicize definitions _simple cycle_ and _elementary circuit_. def simple_cycles(G): """Find simple cycles (elementary circuits) of a directed graph. + A simple cycle, or elementary circuit, is a closed path where no node appears twice. Two elementary circuits are distinct if they are not cyclic permutations of each other.
codereview_python_data_4026
@property def data(self): warnings.warn( - "Accessing MutableSeq.data has been deprecated, as it is a private " "attribute. Please use indexing to access the sequence contents of " "a MutableSeq object.", BiopythonDeprecationWarning, Maybe a slight rewording, "is now a private attribute"? @property def data(self): warnings.warn( + "Accessing MutableSeq.data has been deprecated, as it is now a private " "attribute. Please use indexing to access the sequence contents of " "a MutableSeq object.", BiopythonDeprecationWarning,
codereview_python_data_4028
return MultiplexingClusterManager() raise ValueError( - "cannot manage clusters with ES_ENDPOINT_STRATEGY=off and ES_MULTI_CLUSTER=False" ) nit: should this be ``` "cannot manage clusters with ES_ENDPOINT_STRATEGY=off and ES_MULTI_CLUSTER=true" ``` ? return MultiplexingClusterManager() raise ValueError( + "cannot manage clusters with ES_ENDPOINT_STRATEGY=off and ES_MULTI_CLUSTER=True" )
codereview_python_data_4033
# Create a dummy invisible scatter trace for this image. # This serves two purposes # 1. The two points placed on the corners of the image are used by the - # autoscale logic to allow using the autoscale button to property center # the image. # 2. This trace will be given a UID, and this UID will make it possible to # associate callbacks with the image element. This is needed, in particular ```suggestion # autoscale logic to allow using the autoscale button to properly center ``` # Create a dummy invisible scatter trace for this image. # This serves two purposes # 1. The two points placed on the corners of the image are used by the + # autoscale logic to allow using the autoscale button to properly center # the image. # 2. This trace will be given a UID, and this UID will make it possible to # associate callbacks with the image element. This is needed, in particular
codereview_python_data_4036
return namespaces @property - def unfenced_descendants(self) -> Iterator['ScopeTreeNode']: """An iterator of node's unfenced descendants including self.""" yield self for child in tuple(self.children): We can probably also fix things like `Iterator['ScopeTreeNode']` to `Iterator[ScopeTreeNode]` if that's easy to handle in the script you have; and if not this can wait/be done manually. return namespaces @property + def unfenced_descendants(self) -> Iterator[ScopeTreeNode]: """An iterator of node's unfenced descendants including self.""" yield self for child in tuple(self.children):
codereview_python_data_4040
class ClassificationInterpretation(): "Interpretation methods for classification models." - def __init__(self, data:DataBunch, learn:Learner, probs:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid): - self.data,self.probs,self.y_true,self.losses,self.ds_type, self.learn= data,probs,y_true,losses,ds_type,learn self.pred_class = self.probs.argmax(dim=1) So we need to had the `Learner` for this to work, which is fine. Let's remove `DataBunch` then since we can access it with `learn.data`. This needs a bit of adjustment after (and in the `from_learner` method, don't forget to pass this `Learner`), so let me know if you'd rather have me doing it. class ClassificationInterpretation(): "Interpretation methods for classification models." + def __init__(self, learn:Learner, probs:Tensor, y_true:Tensor, losses:Tensor, ds_type:DatasetType=DatasetType.Valid): + self.data,self.probs,self.y_true,self.losses,self.ds_type, self.learn= learn.data,probs,y_true,losses,ds_type,learn self.pred_class = self.probs.argmax(dim=1)
codereview_python_data_4042
if issue_owners: metadata['issue_owners'] = ','.join(issue_owners) - additional_issue_fields = get_additional_fields(fuzz_target_path) - if additional_issue_fields: - metadata['additional_issue_fields'] = additional_issue_fields return metadata Let's do this in a way that's more extensible in the future. We can make the format of the ".issue_metadata" file be a json that looks like this: ``` { "additional_fields": ..., } ``` That way, if we need to add additional metadata, we can just add to this json object without breaking the format. if issue_owners: metadata['issue_owners'] = ','.join(issue_owners) + issue_metadata = get_issue_metadata(fuzz_target_path) + if issue_metadata: + metadata['issue_metadata'] = issue_metadata return metadata
codereview_python_data_4045
label : [None | string] Label for legend - margins : [None | list] (default=None) - Horizontal and vertical plot margins. List should contain exactly two elements. Returns ------- ```suggestion margins : Sequence of 2 numbers or None (default=None) The sequence contains horizontal and vertical axis margins. Adjust to avoid image being clipped. ``` label : [None | string] Label for legend + margins : Sequence of 2 numbers or None (default=None) + The sequence contains horizontal and vertical axis margins. Adjust to avoid image being clipped. Returns -------
codereview_python_data_4049
for _ in range(3): pipe.run() -def test_affine_transforms_cpu(): check_no_input(fn.translate_transform, offset=(2, 3)) check_no_input(fn.scale_transform, scale=(2, 3)) check_no_input(fn.rotate_transform, angle=30.0) check_no_input(fn.shear_transform, shear=(2., 1.)) # ToDo add tests for DLTensorPythonFunction if easily possible I would split it into separate test cases, but maybe it is an overkill... for _ in range(3): pipe.run() +def test_affine_translate_cpu(): check_no_input(fn.translate_transform, offset=(2, 3)) + +def test_affine_scale_cpu(): check_no_input(fn.scale_transform, scale=(2, 3)) + +def test_affine_rotate_cpu(): check_no_input(fn.rotate_transform, angle=30.0) + +def test_affine_shear_cpu(): check_no_input(fn.shear_transform, shear=(2., 1.)) # ToDo add tests for DLTensorPythonFunction if easily possible
codereview_python_data_4055
(np.array([[1, 2], [3, 4]], dtype=np.intp), np.array([1, 2, 3], dtype=np.intp)), ]) def test_in2d_VE(arr1, arr2): - with pytest.raises(ValueError): _in2d(arr1, arr2) ```suggestion with pytest.raises(ValueError, match="Both arrays must be (n, 2) arrays"): ``` (np.array([[1, 2], [3, 4]], dtype=np.intp), np.array([1, 2, 3], dtype=np.intp)), ]) def test_in2d_VE(arr1, arr2): + with pytest.raises(ValueError, match="Both arrays must be (n, 2) arrays"): _in2d(arr1, arr2)
codereview_python_data_4056
'name': 'id', 'schema': {'type': 'string'}, 'required': 'true', - 'description': 'Return UUID of the objective to be retrieved'}]) @aiohttp_apispec.querystring_schema(BaseGetOneQuerySchema) @aiohttp_apispec.response_schema(ObjectiveSchema(partial=True), description='Returns single objective in ObjectiveSchema format.') You can remove "Return" here - the URL parameter just specifies which objective to return 'name': 'id', 'schema': {'type': 'string'}, 'required': 'true', + 'description': 'UUID of the objective to be retrieved'}]) @aiohttp_apispec.querystring_schema(BaseGetOneQuerySchema) @aiohttp_apispec.response_schema(ObjectiveSchema(partial=True), description='Returns single objective in ObjectiveSchema format.')
codereview_python_data_4058
import re from requests.models import Request from localstack.utils.common import to_str from localstack.services.generic_proxy import ProxyListener -AWS_JSON_CONTENT_TYPE = 'application/x-amz-json-1.1' - class ProxyListenerCloudWatchLogs(ProxyListener): def forward_request(self, method, path, data, headers): nit: We could import `APPLICATION_AMZ_JSON_1_1` from `constants.py` here. import re from requests.models import Request from localstack.utils.common import to_str +from localstack.constants import APPLICATION_AMZ_JSON_1_1 from localstack.services.generic_proxy import ProxyListener class ProxyListenerCloudWatchLogs(ProxyListener): def forward_request(self, method, path, data, headers):
codereview_python_data_4061
# Pop these off first because they aren't expected by the function. manual_partition = kwargs.pop("manual_partition", False) lengths = kwargs.pop("_lengths", None) - kwargs.pop("_transposed", False) dataframe = pandas.concat(list(partitions), axis=axis, copy=False) result = func(dataframe, **kwargs) Can we not pass in `_transposed` since we do not use it? # Pop these off first because they aren't expected by the function. manual_partition = kwargs.pop("manual_partition", False) lengths = kwargs.pop("_lengths", None) dataframe = pandas.concat(list(partitions), axis=axis, copy=False) result = func(dataframe, **kwargs)
codereview_python_data_4063
self.last_feat_output_conv = ConvModule( in_channels[-1], feat_channels, - kernel_size=1, bias=self.use_bias, - norm_cfg=norm_cfg) self.mask_feature = Conv2d( feat_channels, out_channels, kernel_size=3, stride=1, padding=1) last_feat_output_conv -> last_feat_conv self.last_feat_output_conv = ConvModule( in_channels[-1], feat_channels, + kernel_size=3, + padding=1, + stride=1, bias=self.use_bias, + norm_cfg=norm_cfg, + act_cfg=act_cfg) self.mask_feature = Conv2d( feat_channels, out_channels, kernel_size=3, stride=1, padding=1)
codereview_python_data_4068
# (in case this is a specialization) # specialized_cpdefs [DefNode] list of specialized cpdef DefNodes # py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign - # - # decorator_call_tree StatList node containing LetNodes containing SimpleCallNodes - # Used to remove __Pyx_Method_ClassMethod for fused functions child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"] outer_attrs = ["decorators", "return_type_annotation"] I'm not so sure that the `DefNode` should know its decorator calls. They should be part of the name assignment, and not necessarily the `DefNode` itself. # (in case this is a specialization) # specialized_cpdefs [DefNode] list of specialized cpdef DefNodes # py_cfunc_node PyCFunctionNode/InnerFunctionNode The PyCFunction to create and assign child_attrs = ["args", "star_arg", "starstar_arg", "body", "decorators", "return_type_annotation"] outer_attrs = ["decorators", "return_type_annotation"]
codereview_python_data_4069
{"weight": 118, "value": 229}, {"weight": 120, "value": 240}]), 1458) - # Utility functions - def setUp(self): - try: - self.assertRaisesRegex - except AttributeError: - self.assertRaisesRegex = self.assertRaisesRegexp - - def assertRaisesWithMessage(self, exception): - return self.assertRaisesRegex(exception, r".+") - if __name__ == "__main__": unittest.main() You don't need to include the Utility functions unless they are used. In this case, they are not. {"weight": 118, "value": 229}, {"weight": 120, "value": 240}]), 1458) if __name__ == "__main__": unittest.main()
codereview_python_data_4071
code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME: these still need to get initialised even with the limited-API - for slot in TypeSlots.get_slot_table(code.globalstate.directives).slot_table: slot.generate_dynamic_init_code(scope, code) code.putln("#endif") I wonder if the `SlotTable` should just be iterable. code.putln("#if !CYTHON_COMPILING_IN_LIMITED_API") # FIXME: these still need to get initialised even with the limited-API + for slot in TypeSlots.get_slot_table(code.globalstate.directives): slot.generate_dynamic_init_code(scope, code) code.putln("#endif")
codereview_python_data_4075
# Even if the revoke failed, remove the cookie. logs.log_error('Failed to revoke session cookie.') - response = Response() response.delete_cookie('session') - return self.redirect(request.get('dest'), response=response) I'm a bit confused how this works here. redirect doesn't take in a response argument? And again, we can delete the cookie from after getting the return value from self.redirect instead of doiung this. # Even if the revoke failed, remove the cookie. logs.log_error('Failed to revoke session cookie.') + response = self.redirect(request.get('dest')) response.delete_cookie('session') + return response
codereview_python_data_4083
if config.get_cai_enabled(): # TODO: When CAI supports resource exclusion, update the following # method to handle resource exclusion during export time. - asset_count = cloudasset.load_cloudasset_data(storage.session, config, tracer) LOGGER.info('%s total assets loaded from Cloud Asset data.', asset_count) No need to pass tracer explicitly here, since this method call is in the same context as the calling method. The tracer will be inferred from context and set to the `tracer` argument of the method automatically. if config.get_cai_enabled(): # TODO: When CAI supports resource exclusion, update the following # method to handle resource exclusion during export time. + asset_count = cloudasset.load_cloudasset_data(storage.session, config) LOGGER.info('%s total assets loaded from Cloud Asset data.', asset_count)
codereview_python_data_4085
Unknown = "Unknown" Upgrade = "Upgrade" Update = "Update" - HandleRemoteAccess = "RemoteAccessHandling" SHOULD_ENCODE_MESSAGE_LEN = 80 > HandleRemoteAccess = "Remot [](start = 3, length = 28) Please follow the existing convention. 1. Alphabetize 2. Constant has the same value as the name. Unknown = "Unknown" Upgrade = "Upgrade" Update = "Update" SHOULD_ENCODE_MESSAGE_LEN = 80
codereview_python_data_4086
Within each run, the number of frames to analyse [50] pbc : bool, optional Whether to consider periodic boundaries in calculations [``True``] - - - .. versionchanged:: 0.20.0 - If donors are not given, an attempt to automatically find these is done """ def __init__(self, universe, Add text explaining that `find_hydrogen_donors()` might be used, link to its docs. Within each run, the number of frames to analyse [50] pbc : bool, optional Whether to consider periodic boundaries in calculations [``True``] """ def __init__(self, universe,
codereview_python_data_4087
Return: Pandas series with the sum of each numerical column or row. """ - return self._process_sum_prod(pandas.DataFrame.sum, **kwargs).fillna(0) # END Full Reduce operations Why do we need to add this? Return: Pandas series with the sum of each numerical column or row. """ + return self._process_sum_prod(pandas.DataFrame.sum, **kwargs) # END Full Reduce operations
codereview_python_data_4090
wmsg = "The `network` attribute was deprecated in MDAnalysis 2.0.0" with pytest.warns(DeprecationWarning, match=wmsg): assert_equal(wb.network, wb.results.network) - - wb.generate_table() - wmsg = "The `table` attribute was deprecated in MDAnalysis 2.0.0" - with pytest.warns(DeprecationWarning, match=wmsg): - assert_equal(wb.table, wb.results.table) remove (or test deprecation of `table`, in case we do that) wmsg = "The `network` attribute was deprecated in MDAnalysis 2.0.0" with pytest.warns(DeprecationWarning, match=wmsg): assert_equal(wb.network, wb.results.network)
codereview_python_data_4091
thread_delay = environment.get_value('THREAD_DELAY') thread_error_occurred = False - # TODO: remove environment variable once refactor is complete - # Set multi-armed bandit strategy selection distribution as an environment - # variable so we can access it in launcher. - if environment.get_value('USE_BANDIT_STRATEGY_SELECTION'): - distribution = get_fuzz_strategy_distribution() - environment.set_value('STRATEGY_SELECTION_DISTRIBUTION', distribution) # Reset memory tool options. environment.reset_current_memory_tool_options(redzone_size=redzone) Instead of having an if here, move everything into the get_fuzz_strategy_distribution function (and rename it to something more appropriate for that case). thread_delay = environment.get_value('THREAD_DELAY') thread_error_occurred = False + set_strategy_distribution_in_env() # Reset memory tool options. environment.reset_current_memory_tool_options(redzone_size=redzone)
codereview_python_data_4096
return compare_versions(mod_version, version) def check_program_version(command, version, **kwargs): - output = subprocess.check_output(command.split(' '), stderr=subprocess.STDOUT, shell=False) return compare_versions(output.decode('utf-8'), version) def compare_versions(version_string, minimum_version): I just want to check, what's the effect of this change? Does this mean stderr is redirected to stdout, which in turn gets saved into the output var (e.g. both stderr and stdout are mingled in output?)? return compare_versions(mod_version, version) def check_program_version(command, version, **kwargs): + output = subprocess.check_output(command.split(' '), stderr=subprocess.STDOUT, shell=False, timeout=10) return compare_versions(output.decode('utf-8'), version) def compare_versions(version_string, minimum_version):
codereview_python_data_4100
(renamed as appropriate) to be updated in an event. """ stream_params = set(util.stream_parameters(self.streams)) updated_streams = [] for stream in self.streams: - rkwargs = util.rename_stream_kwargs(stream, kwargs, reverse=True) stream.update(**dict(rkwargs, trigger=False)) updated_streams.append(stream) Maybe I'm not following the logic correctly in the utilities but won't this try to send the supplied kwargs to all the streams even though they may not apply? That's what the overlap bit was for I assume. (renamed as appropriate) to be updated in an event. """ stream_params = set(util.stream_parameters(self.streams)) + for k in stream_params - set(kwargs.keys()): + raise KeyError('Key %r does not correspond to any stream parameter') + updated_streams = [] for stream in self.streams: + applicable_kws = {k:v for k,v in kwargs.items() + if k in set(stream.contents.keys())} + rkwargs = util.rename_stream_kwargs(stream, applicable_kws, reverse=True) stream.update(**dict(rkwargs, trigger=False)) updated_streams.append(stream)
codereview_python_data_4108
def _assert_in(output, string): - assert_(string in output, - "Output '{0}' does not match required format '{1}'.".format( - output.replace('\r', '\\r'), string.replace('\r', '\\r'))) def test_default_ProgressMeter(buffer, n=101, interval=10): format = "Step {step:5d}/{numsteps} [{percentage:5.1f}%]" use plain pytest `assert`: ```python assert string in output, "Output '{0}' does not match required format '{1}'.".format(output.replace('\r', '\\r'), string.replace('\r', '\\r'))) ``` def _assert_in(output, string): + assert string in output + def test_default_ProgressMeter(buffer, n=101, interval=10): format = "Step {step:5d}/{numsteps} [{percentage:5.1f}%]"
codereview_python_data_4114
import torch as th import backend as F -from dgl.backend.pytorch.sparse_emb import NodeEmbedding -from dgl.backend.pytorch.sparse_optim import SparseAdam, SparseAdagrad import unittest, os Should avoid this namespace even in unittest. Try use user-facing ns as much as possible. import torch as th import backend as F +from dgl.nn import NodeEmbedding +from dgl.optim import SparseAdam, SparseAdagrad import unittest, os
codereview_python_data_4117
'Updating code for Lambda "%s" from location: %s' % (props["FunctionName"], code) ) - code = self.get_deploy_templates()["create"]["parameters"].get("Code")(props) client.update_function_code(FunctionName=props["FunctionName"], **code) if "Environment" in update_props: environment_variables = update_props["Environment"].get("Variables", {}) nit: might be better to "extract" this code param from props generation in get_deploy_templates and use it here as well 'Updating code for Lambda "%s" from location: %s' % (props["FunctionName"], code) ) + code = LambdaFunction.get_lambda_code_param(props) client.update_function_code(FunctionName=props["FunctionName"], **code) if "Environment" in update_props: environment_variables = update_props["Environment"].get("Variables", {})
codereview_python_data_4119
-from utils import FakeLanguages from librelingo_utils import clean_word this is confusing 1. why extract this value to a constant/class? I don't see the point in that as this function basically works on generic strings. 2. `FakeLanguages.LANG_1` sounds like this is a language. But it's actually a cleaned word. I would probably remove this `FakeLanguages` thing from librelingo_utils import clean_word
codereview_python_data_4120
--1# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # MDAnalysis --- https://www.mdanalysis.org the `return` on line 0 is weird, but either way is there a reason for having `-1` on this line? +# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # MDAnalysis --- https://www.mdanalysis.org
codereview_python_data_4125
""" path = url.path() if path: - return 'text/html', utils.read_file("javascript" + path, binary=False) else: raise QuteSchemeError("No file specified", ValueError()) You could do something like this: ```python path = os.sep.join(url.path().split('/')) ..., utils.read_file(os.path.join("javascript", path), binary=False) ``` """ path = url.path() if path: + path = "javascript" + os.sep.join(path.split('/')) + return 'text/html', utils.read_file(path, binary=False) else: raise QuteSchemeError("No file specified", ValueError())
codereview_python_data_4129
g.edata[EID] = eid_tensor return g def sample_neighbors(dist_graph, nodes, fanout, edge_dir='in', prob=None, replace=False): """Sample from the neighbors of the given nodes from a distributed graph. Local sample can only work with single process? g.edata[EID] = eid_tensor return g +LocalSampledGraph = namedtuple('LocalSampledGraph', 'global_src global_dst global_eids') def sample_neighbors(dist_graph, nodes, fanout, edge_dir='in', prob=None, replace=False): """Sample from the neighbors of the given nodes from a distributed graph.
codereview_python_data_4132
try: command = shlex.split(' '.join(command)) - result = subprocess.check_output( - command, - env=environ, - universal_newlines=True, - encoding="utf-8", - errors="ignore") return parse_checkers(result) except (subprocess.CalledProcessError, OSError): return [] Here again, is it a good idea to join and split instead of keeping the command as the original list? try: command = shlex.split(' '.join(command)) + result = subprocess.check_output(command, env=environ, + universal_newlines=True) return parse_checkers(result) except (subprocess.CalledProcessError, OSError): return []
codereview_python_data_4135
list: indexes. """ - indices = [random.randint(0, len(dataset)) for _ in range(3)] - return indices def _mosaic_transform(self, results): """Mosaic transform function. indexes or indices? seems using indexes list: indexes. """ + indexes = [random.randint(0, len(dataset)) for _ in range(3)] + return indexes def _mosaic_transform(self, results): """Mosaic transform function.
codereview_python_data_4143
res += i if in_list: res += '</ul>' - return res \ No newline at end of file There was an EOL removed from this file. This should be restored. res += i if in_list: res += '</ul>' \ No newline at end of file + return res
codereview_python_data_4147
# All the unique values in f are the fragments frags = tuple( sorted( - [AtomGroup(list(a.ats)) for a in set(f.values())], key=lambda x: x[0].index ) ) This doesn't ensure that the contents of each fragment are sorted # All the unique values in f are the fragments frags = tuple( sorted( + [AtomGroup(list(sorted(a.ats))) for a in set(f.values())], key=lambda x: x[0].index ) )
codereview_python_data_4148
os.remove(jmeter_dist.name) if self.check_if_installed(): - cleaner = JarCleaner(self.log) - cleaner.clean(os.path.join(dest, 'lib')) return self.tool_path else: raise RuntimeError("Unable to run %s after installation!" % self.tool_name) - - class JMeterPlugins(RequiredTool): """ JMeter plugins No need to clean anything for base JMeter. Only plugins need cleaning. os.remove(jmeter_dist.name) if self.check_if_installed(): return self.tool_path else: raise RuntimeError("Unable to run %s after installation!" % self.tool_name) class JMeterPlugins(RequiredTool): """ JMeter plugins
codereview_python_data_4150
num_nodes_dict = defaultdict(int) for (srctype, etype, dsttype), data in data_dict.items(): if isinstance(data, tuple): src = utils.toindex(data[0], dtype='int32' if idtype == F.int32 else 'int64').tonumpy() dst = utils.toindex(data[1], Add a TODO here: `TODO(minjie): converting to index is unnecessary just for getting the max ID.` I will change them later when I get rid of all the Index object. num_nodes_dict = defaultdict(int) for (srctype, etype, dsttype), data in data_dict.items(): if isinstance(data, tuple): + # TODO(minjie): converting to index is unnecessary just for getting the max ID. src = utils.toindex(data[0], dtype='int32' if idtype == F.int32 else 'int64').tonumpy() dst = utils.toindex(data[1],