id
stringlengths
24
28
content
stringlengths
121
2.08k
codereview_python_data_7885
return self.df.index def time_dtypes(self, shape): - # trigger _compute_dtypes func - self.df._query_compiler._modin_frame._dtypes = None return self.df.dtypes This makes bench strictly engine specific, can we somehow allow it to execute only for pandas backend? return self.df.index def time_dtypes(self, shape): + if ASV_USE_IMPL == "modin": + # trigger _compute_dtypes func + self.df._query_compiler._modin_frame._dtypes = None return self.df.dtypes
codereview_python_data_7888
@classmethod - def validate(cls, dataset): - if dataset._virtual_vdims: - dimensions = dataset.dimensions('key', label='name') - else: - dimensions = dataset.dimensions(label='name') not_found = [d for d in dimensions if d not in dataset.data] if not_found: raise DataError('Following columns specified as dimensions ' Why not make the ``derived_vdims`` flag (or similar, ``validate_vdims`` maybe?) an explicit argument to ``validate``? @classmethod + def validate(cls, dataset, vdims=True): + dim_types = 'key' if vdims else 'all' + dimensions = dataset.dimensions(dim_types, label='name') not_found = [d for d in dimensions if d not in dataset.data] if not_found: raise DataError('Following columns specified as dimensions '
codereview_python_data_7891
parser = await self.data_svc.dao.get('core_parser', dict(ability=x['ability'])) if parser: if parser[0]['name'] == 'json': - matched_facts = parsers._json(parser[0], b64decode(x['output']).decode('utf-8')) elif parser[0]['name'] == 'line': - matched_facts = parsers._line(parser[0], b64decode(x['output']).decode('utf-8')) elif parser[0]['name'] == 'parse_mimikatz': - matched_facts = mimikatz_parser._parse_mimikatz(b64decode(x['output']).decode('utf-8')) else: - matched_facts = parsers._regex(parser[0], b64decode(x['output']).decode('utf-8')) # save facts to DB for match in matched_facts: since we're moving parsers out of this class, let's "promote" them to first-class citizens and change them from private to public functions (i.e., remove the underscore from their names). FANTASTIC idea and execution here though. parser = await self.data_svc.dao.get('core_parser', dict(ability=x['ability'])) if parser: if parser[0]['name'] == 'json': + matched_facts = parsers.json(parser[0], b64decode(x['output']).decode('utf-8')) elif parser[0]['name'] == 'line': + matched_facts = parsers.line(parser[0], b64decode(x['output']).decode('utf-8')) elif parser[0]['name'] == 'parse_mimikatz': + matched_facts = mimikatz_parser.parse_mimikatz(b64decode(x['output']).decode('utf-8')) else: + matched_facts = parsers.regex(parser[0], b64decode(x['output']).decode('utf-8')) # save facts to DB for match in matched_facts:
codereview_python_data_7892
num_total_samples=num_total_samples) avg_factor = sum(avg_factor) - avg_factor = reduce_mean(avg_factor).item() - if avg_factor < EPS: - avg_factor = 1 losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict( Shall we use `avg_factor.clamp_(min=1)` here? num_total_samples=num_total_samples) avg_factor = sum(avg_factor) + avg_factor = reduce_mean(avg_factor).clamp_(min=1).item() losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox)) losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl)) return dict(
codereview_python_data_7894
content_id='Scanner Violations' ) scanner_subject = '{} Complete - {} violation(s) found'.format( - total_violations, email_description) self.email_util.send( email_sender=email_sender, email_recipient=email_recipient, I think, the order of the formatted item needs to be: email description first, and then total_violations content_id='Scanner Violations' ) scanner_subject = '{} Complete - {} violation(s) found'.format( + email_description, total_violations) self.email_util.send( email_sender=email_sender, email_recipient=email_recipient,
codereview_python_data_7896
}; """ - @test.xfail('not sure how to test multi-line semantics, ' - 'as strings no longer preserve original structure') def test_eschema_syntax_type_22(self): """ module test { @vpetrovykh Any suggestions here? }; """ def test_eschema_syntax_type_22(self): """ module test {
codereview_python_data_7898
prefix = s3_configuration.get("Prefix", "") s3 = connect_to_resource("s3") - batched_data = b"".join([base64.b64decode(r.get("Data") or r["data"]) for r in records]) obj_path = get_s3_object_path(stream_name, prefix) try: nit: I think `Data` may potentially contain an empty string (at least kinesis-mock allows to push records with empty data), which could then result in `KeyError` if it falls back to `r"data"]`. We could do something like: ``` batched_data = b"".join([base64.b64decode(r.get("Data", r.get("data"))) for r in records]) ``` ? prefix = s3_configuration.get("Prefix", "") s3 = connect_to_resource("s3") + batched_data = b"".join([base64.b64decode(r.get("Data", r.get("data"))) for r in records]) obj_path = get_s3_object_path(stream_name, prefix) try:
codereview_python_data_7900
'CTX', # Centauri coin but CarTaxi in CC ethaddress_to_identifier('0xf14922001A2FB8541a433905437ae954419C2439'), # noqa: E501 # Direct insurance token but DitCoin in CC 'DRM', # Dreamcoin but Dreamchain in CC - ethaddress_to_identifier('0x82fdedfB7635441aA5A92791D001fA7388da8025'), # noqa: E501 # test_cryptocompare_asset_supportDigital Ticks but Data Exchange in CC 'GNC', # Galaxy network but Greencoin in CC ethaddress_to_identifier('0xfF5c25D2F40B47C4a37f989DE933E26562Ef0Ac0'), # noqa: E501 # Kora network but Knekted in CC ethaddress_to_identifier('0x49bD2DA75b1F7AF1E4dFd6b1125FEcDe59dBec58'), # noqa: E501 # Linkey but LuckyCoin in CC What are you doing here? Is this a wrong copy paste? 'CTX', # Centauri coin but CarTaxi in CC ethaddress_to_identifier('0xf14922001A2FB8541a433905437ae954419C2439'), # noqa: E501 # Direct insurance token but DitCoin in CC 'DRM', # Dreamcoin but Dreamchain in CC + ethaddress_to_identifier('0x82fdedfB7635441aA5A92791D001fA7388da8025'), # noqa: E501 # Digital Ticks but Data Exchange in CC 'GNC', # Galaxy network but Greencoin in CC ethaddress_to_identifier('0xfF5c25D2F40B47C4a37f989DE933E26562Ef0Ac0'), # noqa: E501 # Kora network but Knekted in CC ethaddress_to_identifier('0x49bD2DA75b1F7AF1E4dFd6b1125FEcDe59dBec58'), # noqa: E501 # Linkey but LuckyCoin in CC
codereview_python_data_7904
:param content: what gets written into the file :return: None """ - try: - with open(filename, 'w') as f: - f.write(content) - except Exception as e: - raise def print_stdout(line): What the point of catching the exception to re-raise it right away? :param content: what gets written into the file :return: None """ + with open(filename, 'w') as f: + f.write(content) def print_stdout(line):
codereview_python_data_7907
""" Finds any applicable compositor and applies it. """ - from .overlay import Overlay while True: match = cls.strongest_match(overlay, mode) if match is None: return overlay Not looked ahead yet - I'm hoping to see some new tests showing this new compositor functionality. """ Finds any applicable compositor and applies it. """ + from .overlay import Overlay, CompositeOverlay + if not isinstance(overlay, CompositeOverlay): + overlay = Overlay([overlay]) while True: match = cls.strongest_match(overlay, mode) if match is None: return overlay
codereview_python_data_7908
return "<" + repr(self.__class__.__name__) + ">" class FullSelgroupSelection(Selection): def __init__(self, selgroup): - warnings.warn("Use of 'fullgroup' in selections is deprecated " - "in MDAnalysis '0.11' and will be removed entirely in upcoming " - "releases. Use the equivalent syntax 'global group' instead.", DeprecationWarning) Selection.__init__(self) self._grp = selgroup can you use the numpy deprecation decorator. ``` python from numpy.lib.utils import deprecate @deprecate class Full(): ``` I'm not enttirely sure if this also works for classes but give it a try. It will make the code better readable and this decorator also changes the doc-string automatically return "<" + repr(self.__class__.__name__) + ">" +@deprecate(old_name='fullgroup', new_name='global group') class FullSelgroupSelection(Selection): def __init__(self, selgroup): Selection.__init__(self) self._grp = selgroup
codereview_python_data_7912
self.assertEqual(hit11, query["hit1"]) self.assertEqual(hit11, query["alt1"]) self.assertEqual(hit11, query["alt11"]) - self.assertEqual(hit11.id, "alt1") - self.assertEqual(hit11.id, "alt11") hit11._id_alt = [] def test_setitem_ok_alt_existing(self): These should also be `assertNotEqual` self.assertEqual(hit11, query["hit1"]) self.assertEqual(hit11, query["alt1"]) self.assertEqual(hit11, query["alt11"]) + self.assertNotEqual(hit11.id, "alt1") + self.assertNotEqual(hit11.id, "alt11") hit11._id_alt = [] def test_setitem_ok_alt_existing(self):
codereview_python_data_7914
import numpy as np from numpy.testing import assert_allclose, assert_equal - import random from MDAnalysis.lib import transformations as t +1 for removing `TestCase`. I don't understand why we actually need it, even under `nose`, since a test class inheriting from `object` will work fine, even with fixtures like `setUp` and such. import numpy as np from numpy.testing import assert_allclose, assert_equal +import itertools import random from MDAnalysis.lib import transformations as t
codereview_python_data_7922
db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_indexes.sql')) def drop_tables(self): db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'drop_tables.sql')) def drop_schema(self): I learned recently that you can use `DROP SCHEMA CASCADE` to drop all tables in a schema. As we have mostly schemas here, perhaps we could just drop the `"user"` table and `statistics` and `api_compat` schemas. Are we still using the `listen` and `listen_json` tables?? db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'create_indexes.sql')) def drop_tables(self): + self.drop_schema() db.run_sql_script(os.path.join(ADMIN_SQL_DIR, 'drop_tables.sql')) def drop_schema(self):
codereview_python_data_7923
return interpreter -def get_execute_command(file_to_execute, is_blackbox_fuzzer=False): """Return command to execute |file_to_execute|.""" - interpreter_path = get_interpreter( - file_to_execute, is_blackbox_fuzzer=is_blackbox_fuzzer) # Hack for Java scripts. file_to_execute = file_to_execute.replace('.class', '') Can you clean up use of is_blackbox_fuzzer argument as it is not needed. return interpreter +def get_execute_command(file_to_execute): """Return command to execute |file_to_execute|.""" + interpreter_path = get_interpreter(file_to_execute) # Hack for Java scripts. file_to_execute = file_to_execute.replace('.class', '')
codereview_python_data_7924
yield source - def get_keystore_config(self): - return self.get(self.FIELD_KEYSTORE_CONFIG) - def get_requests(self, parser=RequestParser, require_url=True): """ Generator object to read requests This is JMeter-specific thing, it can't be in the core yield source def get_requests(self, parser=RequestParser, require_url=True): """ Generator object to read requests
codereview_python_data_7928
PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; """ In addition to this here, we also should have the entirety of the unparsed (i.e. raw) Member response stored in one big chunk in a separate table, with a 1:1 relation to Groups. Please take a look at the RAW_PROJECT_IAM_POLICIES_TABLE. Okay, to stick a TODO here and follow-up. This is by design, as we might need it in case of troubleshooting our pipeline. Storing this in a separate table is non-ideal, but it's a trade-off between the speed of loading to separate table, vs inserting into the Groups table. PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8; """ + +# TODO: Add a RAW_GROUP_MEMBERS_TABLE.
codereview_python_data_7941
# the fd from epoll(7) anymore, causing a 100% CPU poll loop. fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) # Safely call hub.add_reader for the determined fd - self.iterate_file_descriptors_safely( [fd], None, hub.add_reader, self._event_process_exit, hub, proc) i am currently trying this out. it did not solve my problem OOB, but i am trying if it works, when the `epoll.unregister` is not swallowing all kinds of exceptions. `managed_list` is a `set`, when being called below with `all_inqueues`, which results in a `TypeError`. # the fd from epoll(7) anymore, causing a 100% CPU poll loop. fd = proc._sentinel_poll = os.dup(proc._popen.sentinel) # Safely call hub.add_reader for the determined fd + iterate_file_descriptors_safely( [fd], None, hub.add_reader, self._event_process_exit, hub, proc)
codereview_python_data_7942
class new_build_ext(_build_ext, object): - user_options = _build_ext.user_options[:] - - user_options.extend([ ('c-build-dir=', None, "directory for generated c files"), - ]) def initialize_options(self): _build_ext.initialize_options(self) This can be simplified. ```suggestion user_options = _build_ext.user_options + [ ('c-build-dir=', None, "directory for generated c files"), ] ``` class new_build_ext(_build_ext, object): + user_options = _build_ext.user_options + [ ('c-build-dir=', None, "directory for generated c files"), + ] def initialize_options(self): _build_ext.initialize_options(self)
codereview_python_data_7947
self._dump_slack_output(violation.get('violation_data'), 1)) # Wait 30 seconds before retrying: https://api.slack.com/docs/rate-limits - @retry(wait_exponential_multiplier=30000, wait_exponential_max=60000, stop_max_attempt_number=2) def _send(self, payload): """Sends a post to a Slack webhook url We should specify the exception to retry instead of retrying on all general exceptions. self._dump_slack_output(violation.get('violation_data'), 1)) # Wait 30 seconds before retrying: https://api.slack.com/docs/rate-limits + @retry(retry_on_exception=retryable_exceptions.is_retryable_exception, + wait_exponential_multiplier=30000, wait_exponential_max=60000, stop_max_attempt_number=2) def _send(self, payload): """Sends a post to a Slack webhook url
codereview_python_data_7949
-------- >>> G = nx.Graph() >>> G.add_path([0, 1, 2, 3, 4]) - >>> print(list(nx.dfs_preorder_nodes(G,0))) [0, 1, 2, 3, 4] - >>> print(list(nx.dfs_preorder_nodes(G,0,2))) [0, 1, 2] Notes You don't need to have `print` here, just do `list(...)`, since it is part of an interactive shell. -------- >>> G = nx.Graph() >>> G.add_path([0, 1, 2, 3, 4]) + >>> list(nx.dfs_preorder_nodes(G,0)) [0, 1, 2, 3, 4] + >>> list(nx.dfs_preorder_nodes(G,0,2)) [0, 1, 2] Notes
codereview_python_data_7951
objects. index (pandas.Index, list, ObjectID): The row index for this DataFrame. - columns (pandas.Index): The column names for this pandas, in pandas Index object. dtype: Data type to force. Only a single dtype is allowed. If None, infer Not sure what happened here objects. index (pandas.Index, list, ObjectID): The row index for this DataFrame. + columns (pandas.Index): The column names for this DataFrame, in pandas Index object. dtype: Data type to force. Only a single dtype is allowed. If None, infer
codereview_python_data_7958
validator_info.get_action_id(): AuthConstraintOr([AuthConstraint(TRUSTEE, 1), AuthConstraint(STEWARD, 1), AuthConstraint(NETWORK_MONITOR, 1)]), - create_revoc_reg_def.get_action_id(): trust_anchor_or_steward_or_trustee_owners_constraint, - create_revoc_reg_entry.get_action_id(): trust_anchor_or_steward_or_trustee_owners_constraint } # Edit Trustee: What does `owner` mean for RevocRegDef entry? Currently we don't require that the author of RevocRegDef is the author of corresponding CredDef (unlike the case of ReovRegEntry where we must require that the author of RevocRegEntry is also the author of the corresponding RevocRegDef). So, maybe we should use just `trust_anchor_or_steward_or_trustee_constraint` for `create_revoc_reg_def`? validator_info.get_action_id(): AuthConstraintOr([AuthConstraint(TRUSTEE, 1), AuthConstraint(STEWARD, 1), AuthConstraint(NETWORK_MONITOR, 1)]), + add_revoc_reg_def.get_action_id(): trust_anchor_or_steward_or_trustee_constraint, + add_revoc_reg_entry.get_action_id(): trust_anchor_or_steward_or_trustee_constraint, + edit_revoc_reg_def.get_action_id(): trust_anchor_or_steward_or_trustee_constraint } # Edit Trustee:
codereview_python_data_7959
if fields[0].strip() == "GROUP": return self.__parse_group(fields) elif fields[0].strip() == "REQUEST": - if self.guessed_gatling_version == "3.X": - fields.insert(1, 'Taurus Scenario') return self.__parse_request(fields) else: return None Is it always "Taurus Scenario"? I guess it can be different for user's scala files if fields[0].strip() == "GROUP": return self.__parse_group(fields) elif fields[0].strip() == "REQUEST": + del fields[0] + if self.guessed_gatling_version != "3.X": + del fields[0] return self.__parse_request(fields) else: return None
codereview_python_data_7969
if columns is None: columns = {} - for term in columns.values(): if not isinstance(term, ComputableTerm): raise TypeError( - '"{term}" is not a valid pipeline column. Did you mean ' - 'to add ".latest"?'.format(term=term) ) self._columns = columns self._screen = screen Showing the name of the column here would be nice as well. if columns is None: columns = {} + for column_name, term in columns.items(): if not isinstance(term, ComputableTerm): raise TypeError( + "Column {column_name!r} contains an invalid pipeline term " + "({term}). Did you mean to append '.latest'?".format( + column_name=column_name, term=term, + ) ) self._columns = columns self._screen = screen
codereview_python_data_7971
(['redhat', '7.7.1908', 'Core'], False), (['bigip', '15.0.1', 'Final'], False), (['gaia', '273.562', 'R80.30'], False), - (['debian' '9.1', ''], False), # pylint: disable=implicit-str-concat,implicit-str-concat-in-sequence ] for (distro, supported) in test_cases: Nice! This is my bad. I just submitted #1986 fixing this. We can remove this suppression. (['redhat', '7.7.1908', 'Core'], False), (['bigip', '15.0.1', 'Final'], False), (['gaia', '273.562', 'R80.30'], False), + (['debian', '9.1', ''], False), ] for (distro, supported) in test_cases:
codereview_python_data_7973
Discrete Algorithms, 132--139, 2003. """ - def _choose_node(candidates, n_nodes, delta): if delta > 0: - bias_sum = n_nodes * delta p_delta = bias_sum / (bias_sum + len(candidates)) if seed.random() < p_delta: - return seed.randint(0, n_nodes) return seed.choice(candidates) if create_using is None or not hasattr(create_using, "_adj"): Implicit here is an assumption that the nodes are essentially drawn from `range(0, n_nodes)`. For cases where this isn't true, the returned nodes might seem strange to users: ```python >>> G = nx.scale_free_graph(10) >>> H = nx.relabel_nodes(G, {i: 2**i for i in range(10)} >>> H.nodes() NodeView((1, 2, 4, 8, 16, 32, 64, 128, 256, 512)) >>> K = nx.scale_free_graph(20, create_using=H, delta_in=1e6, delta_out=1e6) >>> K.nodes() NodeView((1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 7, 11, 12, 5, 14, 0, 9, 13, 18, 17)) ``` AFAICT this doesn't affect the algorithm behavior so it isn't wrong. However it might be worth just adding a note to the docstring about how new nodes are selected so that users starting with an initial graph that has non-integer nodes or unevenly spaced integer nodes know what to expect. Discrete Algorithms, 132--139, 2003. """ + def _choose_node(candidates, node_list, delta): if delta > 0: + bias_sum = len(node_list) * delta p_delta = bias_sum / (bias_sum + len(candidates)) if seed.random() < p_delta: + return seed.choice(node_list) return seed.choice(candidates) if create_using is None or not hasattr(create_using, "_adj"):
codereview_python_data_7976
self.assertEqual(str(e.exception), expected) -class TestDownsampledRowwiseOperation(WithSeededRandomPipelineEngine, - ZiplineTestCase): T = partial(pd.Timestamp, tz='utc') - START_DATE = T('2014-01-02') - END_DATE = T('2014-02-03') HALF_WAY_POINT = T('2014-01-15') dates = pd.date_range(START_DATE, END_DATE) we aren't using a trading calendar, this doesn't matter, if anything shifting the dates here obscures the point of the test self.assertEqual(str(e.exception), expected) +class TestDownsampledRowwiseOperation(WithAssetFinder, ZiplineTestCase): + T = partial(pd.Timestamp, tz='utc') + START_DATE = T('2014-01-01') + END_DATE = T('2014-02-01') HALF_WAY_POINT = T('2014-01-15') dates = pd.date_range(START_DATE, END_DATE)
codereview_python_data_7979
summary='Retrieve Facts', description='Retrieve facts by criteria. Use fields from the `FactSchema` in the request body to filter retrieved facts.') @aiohttp_apispec.querystring_schema(BaseGetAllQuerySchema) - @aiohttp_apispec.response_schema(FactSchema(many=True, partial=True), description='Returns facts in `FactSchema` format.') async def get_facts(self, request: web.Request): knowledge_svc_handle = self._api_manager.knowledge_svc fact_data = await self._api_manager.extract_data(request) For this, it might be helpful for users just to change it to something like `Returns matching facts in `FactSchema` format.` summary='Retrieve Facts', description='Retrieve facts by criteria. Use fields from the `FactSchema` in the request body to filter retrieved facts.') @aiohttp_apispec.querystring_schema(BaseGetAllQuerySchema) + @aiohttp_apispec.response_schema(FactSchema(many=True, partial=True), description='Returns matching facts in `FactSchema` format.') async def get_facts(self, request: web.Request): knowledge_svc_handle = self._api_manager.knowledge_svc fact_data = await self._api_manager.extract_data(request)
codereview_python_data_7985
self.log = log.getChild(self.__class__.__name__) if parent: - for method, args, kwargs in parent.get_queue(): - self._queue.append((self.__getattribute__(method), args, kwargs)) - - pass def get_queue(self): - for method, args, kwargs in self._queue: - yield method.__name__, args, kwargs def set(self, *args, **kwargs): self._add_to_queue(self._set, *args, **kwargs) Could just use list.extend method instead of iterating self.log = log.getChild(self.__class__.__name__) if parent: + self._queue.extend( + [(self.__getattribute__(method), args, kwargs) for method, args, kwargs in parent.get_queue()]) def get_queue(self): + return [(method.__name__, args, kwargs) for method, args, kwargs in self._queue] def set(self, *args, **kwargs): self._add_to_queue(self._set, *args, **kwargs)
codereview_python_data_7992
def __init__( self, name: str, default: typing.Any, - typespec: typing.Type, help: str, choices: typing.Optional[typing.Sequence[str]] ) -> None: typecheck.check_type(name, default, typespec) self.name = name - self._default = default self.typespec = typespec self.value = unset self.help = help self.choices = choices Would we ever have mutable defaults? Right now we only have sequences, for which we could use tuples. We don't have dicts yet, but we could use `types.MappingProxyType` as a "frozendict". This would even be a bit nicer as that you cannot accidentally call `.append()` without noticing that it's a no-op. def __init__( self, name: str, + typespec: type, default: typing.Any, help: str, choices: typing.Optional[typing.Sequence[str]] ) -> None: typecheck.check_type(name, default, typespec) self.name = name self.typespec = typespec + self._default = default self.value = unset self.help = help self.choices = choices
codereview_python_data_7995
DataFrame with asset_id as index and 'start_date'/'end_date' columns. calendar : pd.DatetimeIndex The trading calendar to use. - holes : dict[int -> tuple[pd.Timestamps]] A dict mapping asset ids to the tuple of dates that should have - no data for that asset in the output. Yields ------ We should add a note here that this is optional and that the default is no holes. DataFrame with asset_id as index and 'start_date'/'end_date' columns. calendar : pd.DatetimeIndex The trading calendar to use. + holes : dict[int -> tuple[pd.Timestamps]], optional A dict mapping asset ids to the tuple of dates that should have + no data for that asset in the output. Default is no holes. Yields ------
codereview_python_data_8002
def parseTimeReference(ref, now): - if not ref or ref == 'now': return datetime.utcnow().replace(tzinfo=pytz.utc) #Time-of-day reference i = ref.find(':') hour,min = 0,0 Instead of `datetime.utcnow().replace(tzinfo=pytz.utc)` you can do ``` python from django.utils import timezone timezone.now() # returns the tz-aware current datetime ``` You should also add `USE_TZ = True` to `webapp/graphite/settings.py` def parseTimeReference(ref, now): + if not ref or ref == 'now': return timezone.now() #Time-of-day reference i = ref.find(':') hour,min = 0,0
codereview_python_data_8006
# This is temporary, until we implement `subtransaction` # functionality of RFC1004 - warnings.filterwarnings('ignore', message=r'The "transaction\(\)" method is deprecated' r' and is scheduled to be removed', category=DeprecationWarning) ```suggestion warnings.filterwarnings( 'ignore', message=r'The "transaction\(\)" method is deprecated' ``` # This is temporary, until we implement `subtransaction` # functionality of RFC1004 + warnings.filterwarnings( + 'ignore', message=r'The "transaction\(\)" method is deprecated' r' and is scheduled to be removed', category=DeprecationWarning)
codereview_python_data_8007
-"""Tests for google3.experimental.users.ahoying.forseti-security.tests.services.inventory.cai_temporary_storage.""" from future import standard_library standard_library.install_aliases() We might want to remove the ref to google3 here +"""Tests for google.services.inventory.cai_temporary_storage.""" from future import standard_library standard_library.install_aliases()
codereview_python_data_8009
base = '%s_%s' % (product.id, stockrecord.id) if not options: return base - repr_options = [(repr(option['option']), repr(option['value'])) - for option in options] - repr_options.sort() return "%s_%s" % (base, zlib.crc32(repr(repr_options).encode('utf8'))) def _get_total(self, property): I'd prefer not to change the content of what is being hashed, since this would invalidate all existing hashes. I think we can make the existing hash values deterministic by doing this instead: ``` repr_options.sort(key=lambda a: a['option']) ``` base = '%s_%s' % (product.id, stockrecord.id) if not options: return base + repr_options = [{'option': repr(option['option']), + 'value': repr(option['value'])} for option in options] + repr_options.sort(key=itemgetter('option')) return "%s_%s" % (base, zlib.crc32(repr(repr_options).encode('utf8'))) def _get_total(self, property):
codereview_python_data_8011
Number of pixels to spread on all sides.""") def _apply_spreading(self, array): - replace_none_how = ds_version <= '0.11.1' and (self.p.how is None) - how = 'source' if replace_none_how else self.p.how - return tf.spread(array, px=self.p.px, how=how, shape=self.p.shape) class dynspread(SpreadingOperation): That's some pretty convoluted logic. What about: ``` def _calculate_how_from_default(how): """Handle changes in values accepted for `how` across Datashader versions""" if ds_version <= '0.11.1' and (how is None): how = 'source' return how how = _calculate_how_from_default(self.p.how) ``` Number of pixels to spread on all sides.""") def _apply_spreading(self, array): + return tf.spread(array, px=self.p.px, how=self.p.how, shape=self.p.shape) class dynspread(SpreadingOperation):
codereview_python_data_8027
TelemetryEventParam(CommonTelemetryEventSchema.TaskName, threading.current_thread().getName()), TelemetryEventParam(CommonTelemetryEventSchema.KeywordName, '')] - if event.eventId in (None, TELEMETRY_EVENT_EVENT_ID) and event.providerId in (None, TELEMETRY_EVENT_PROVIDER_ID): # Currently only the GuestAgentExtensionEvents has these columns, the other tables dont have them so skipping # this data in those tables. - # By default, if no eventId/providerId is specified, add this data too. common_params.extend([TelemetryEventParam(GuestAgentExtensionEventsSchema.ExtensionType, event.file_type), TelemetryEventParam(GuestAgentExtensionEventsSchema.IsInternal, False)]) From our conversation, these 2 should not be None and it does not really matter whether those columns are added either way (None and not None), so removing the explicit check for None makes the condition a little clearer. TelemetryEventParam(CommonTelemetryEventSchema.TaskName, threading.current_thread().getName()), TelemetryEventParam(CommonTelemetryEventSchema.KeywordName, '')] + if event.eventId == TELEMETRY_EVENT_EVENT_ID and event.providerId == TELEMETRY_EVENT_PROVIDER_ID: # Currently only the GuestAgentExtensionEvents has these columns, the other tables dont have them so skipping # this data in those tables. common_params.extend([TelemetryEventParam(GuestAgentExtensionEventsSchema.ExtensionType, event.file_type), TelemetryEventParam(GuestAgentExtensionEventsSchema.IsInternal, False)])
codereview_python_data_8033
"""A SCOP domain. A leaf node in the Scop hierarchy. - sid -- The SCOP domain identifier. e.g. ``"d5hbib_"`` - - residues -- A Residue object. It defines the collection of PDB atoms that make up this domain. """ This extra white space is why the TravisCI style check is failing: ``` $ flake8 Bio/ Bio/SCOP/__init__.py:620:1: W293 blank line contains whitespace ``` """A SCOP domain. A leaf node in the Scop hierarchy. - sid -- The SCOP domain identifier. e.g. ``"d5hbib_"`` + - residues -- A Residue object. It defines the collection of PDB atoms that make up this domain. """
codereview_python_data_8041
if sys.version_info >= (3, 0): from io import BytesIO as StringIO else: - try: - from cStringIO import StringIO - except ImportError: - from StringIO import StringIO - INFINITY = float('inf') We can eliminate the try/except here since we don't need to support <2.7 if sys.version_info >= (3, 0): from io import BytesIO as StringIO else: + from cStringIO import StringIO INFINITY = float('inf')
codereview_python_data_8042
"""Test that we can query bisq for market prices""" price = get_bisq_market_price(A_BSQ) assert price != Price(ZERO) - # Test that error is correctly rised when there is no market with pytest.raises(RemoteError): get_bisq_market_price(A_3CRV) ```suggestion # Test that error is correctly raised when there is no market ``` """Test that we can query bisq for market prices""" price = get_bisq_market_price(A_BSQ) assert price != Price(ZERO) + # Test that error is correctly raised when there is no market with pytest.raises(RemoteError): get_bisq_market_price(A_3CRV)
codereview_python_data_8047
if self.orig_bases: # update __orig_bases__ if needed code.putln("if (%s != %s) {" % (self.bases.result(), self.orig_bases.result())) - code.putln('PyDict_SetItemString(%s, "__orig_bases__", %s);' % ( - self.dict.result(), self.orig_bases.result())) code.putln("}") self.orig_bases.generate_disposal_code(code) self.orig_bases.free_temps(code) This needs error handling. if self.orig_bases: # update __orig_bases__ if needed code.putln("if (%s != %s) {" % (self.bases.result(), self.orig_bases.result())) + code.putln( + code.error_goto_if('PyDict_SetItemString(%s, "__orig_bases__", %s) == -1' % ( + self.dict.result(), self.orig_bases.result()), + self.pos + )) code.putln("}") self.orig_bases.generate_disposal_code(code) self.orig_bases.free_temps(code)
codereview_python_data_8048
@abc.abstractmethod def fetch_bigquery_iam_policy(self, project_number, dataset_id): - """Gets IAM policy if a bigquery dataset from gcp API call. Args: project_number (str): number of the project to query. typo: if -> of @abc.abstractmethod def fetch_bigquery_iam_policy(self, project_number, dataset_id): + """Gets IAM policy of a bigquery dataset from gcp API call. Args: project_number (str): number of the project to query.
codereview_python_data_8054
location=Location.KRAKEN, location_label=self.name, ) - trades_raw, _ = self.db.get_history_events(filter_query) trades, max_ts = self.process_kraken_trades(trades_raw) queried_range = (start_ts, Timestamp(max_ts)) if with_errors else (start_ts, end_ts) return trades, queried_range This is exactly why for others we have `get_xxx()` and `get_xxx_and_limit_info()`. So that you don't have to go and do those changes. Please use the already existing logic. location=Location.KRAKEN, location_label=self.name, ) + trades_raw = self.db.get_history_events( + filter_query=filter_query, + has_premium=True, + ) trades, max_ts = self.process_kraken_trades(trades_raw) queried_range = (start_ts, Timestamp(max_ts)) if with_errors else (start_ts, end_ts) return trades, queried_range
codereview_python_data_8063
if i % 10 == 0: print(bst.eval_train(), bst.eval_valid()) self.assertEqual(bst.current_iteration(), 20) self.assertEqual(bst.num_trees(), 20) self.assertEqual(bst.num_model_per_iteration(), 1) `places=2` seems to be very poor comparison. Do you have any thoughts why is it fail with more strict checks? if i % 10 == 0: print(bst.eval_train(), bst.eval_valid()) + f = open("expected_model.json", "r") + expected_json = json.load(f) + expected_str = json.dumps(expected_json, sort_keys=True, indent=4) + result_str = json.dumps(bst.dump_model(), sort_keys=True, indent=4) + result_json = json.loads(result_str) + self.assertEqual(result_str, expected_str) + self.assertEqual(bst.current_iteration(), 20) self.assertEqual(bst.num_trees(), 20) self.assertEqual(bst.num_model_per_iteration(), 1)
codereview_python_data_8074
from nose.plugins.attrib import attr from numpy.testing import (assert_equal, assert_almost_equal, dec, assert_array_almost_equal, assert_raises, - assert_) from unittest import TestCase import tempdir `assert_` is not used from nose.plugins.attrib import attr from numpy.testing import (assert_equal, assert_almost_equal, dec, assert_array_almost_equal, assert_raises, + ) from unittest import TestCase import tempdir
codereview_python_data_8079
@property def pickle_protocol(self): - return configuration.get_config().getint(self.spark_version, "pickle-protocol", pickle.DEFAULT_PROTOCOL) def setup(self, conf): """ why is this pulling from `self.spark_version` config section rather than the `spark` config section (`py-packages` appears to pull from a config section called `spark`) @property def pickle_protocol(self): + return configuration.get_config().getint('spark', 'pickle-protocol', pickle.DEFAULT_PROTOCOL) def setup(self, conf): """
codereview_python_data_8086
self.rate_average = rate_average def message(self, msg): - dnf.util._terminal_messenger('write', msg, self.fo) - dnf.util._terminal_messenger('flush', out=self.fo) def start(self, total_files, total_size): self.total_files = total_files There is no need for 'message', if there is possibility of immediate flushing. self.rate_average = rate_average def message(self, msg): + dnf.util._terminal_messenger('write_flush', msg, self.fo) def start(self, total_files, total_size): self.total_files = total_files
codereview_python_data_8087
augment images. Examples: - TODO: Implement 'Shear', 'Sharpness' and 'Rotate' transforms >>> replace = (104, 116, 124) >>> policies = [ >>> [ We may move this TODO to Line15. augment images. Examples: >>> replace = (104, 116, 124) >>> policies = [ >>> [
codereview_python_data_8089
Examples -------- - How DSSP could be use:: from Bio.PDB import PDBParser from Bio.PDB.DSSP import DSSP used --> used Examples -------- + How DSSP could be used:: from Bio.PDB import PDBParser from Bio.PDB.DSSP import DSSP
codereview_python_data_8091
subctx.implicit_tid_in_shapes = False viewgen.compile_view_shapes(ir_set, ctx=subctx) elif (orig_stype.issubclass(ctx.env.schema, json_t) - and new_stype.is_enum(ctx.env.schema)): # Casts from json to enums need some special handling # here, where we have access to the enum type. Just turn # it into json->str and str->enum. - str_typ = ctx.env.get_track_schema_object('std::str') - assert isinstance(str_typ, s_types.Type) str_ir = compile_cast(ir_expr, str_typ, srcctx=srcctx, ctx=ctx) return compile_cast(str_ir, new_stype, srcctx=srcctx, ctx=ctx) Use `get_track_schema_type` and then you won't need the `assert` subctx.implicit_tid_in_shapes = False viewgen.compile_view_shapes(ir_set, ctx=subctx) elif (orig_stype.issubclass(ctx.env.schema, json_t) + and new_stype.is_enum(ctx.env.schema) and False): # Casts from json to enums need some special handling # here, where we have access to the enum type. Just turn # it into json->str and str->enum. + str_typ = ctx.env.get_track_schema_type('std::str') str_ir = compile_cast(ir_expr, str_typ, srcctx=srcctx, ctx=ctx) return compile_cast(str_ir, new_stype, srcctx=srcctx, ctx=ctx)
codereview_python_data_8097
doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=') for op in sorted(all_ops, key=lambda v: str(v).lower()): schema = b.GetSchema(op) - op_full_name = op - if not op_full_name.startswith('_'): - op_full_name = op_full_name.replace('_', '.') - *submodule, op_name = op_full_name.split('.') is_cpu = '|v|' if op in cpu_ops else '' is_gpu = '|v|' if op in gpu_ops else '' is_mixed = '|v|' if op in mix_ops else '' I think that deserves a function in ops - it's used in two distant places and can go stale quite easily and errors can go unnoticed for a long time. doc_table += formater.format('', '', '', '', '', '', op_name_max_len = op_name_max_len, c='=') for op in sorted(all_ops, key=lambda v: str(v).lower()): schema = b.GetSchema(op) + op_full_name, submodule, op_name = ops._process_op_name(op) is_cpu = '|v|' if op in cpu_ops else '' is_gpu = '|v|' if op in gpu_ops else '' is_mixed = '|v|' if op in mix_ops else ''
codereview_python_data_8099
"""Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule May add a TODO here, indicating that we will use the registry to choose either DepthwseSeparableConvModule or ConvModule in the future. Thus, we will have a chance to reduce the option `use_depthwise` in the future. """Initialize layers of the head.""" self.cls_convs = nn.ModuleList() self.reg_convs = nn.ModuleList() + # TODO: Use registry to choose ConvModule type conv = DepthwiseSeparableConvModule \ if self.use_depthwise else ConvModule
codereview_python_data_8100
start = 0 for n in num_levels: end = start + n - level_targets.append(target[:, start:end].squeeze(0)) start = end return level_targets Maybe we can directly use `utils.py` as the filename under the anchor dir. start = 0 for n in num_levels: end = start + n + # level_targets.append(target[:, start:end].squeeze(0)) + level_targets.append(target[:, start:end]) start = end return level_targets
codereview_python_data_8101
if __name__ == '__main__': - print(0) test_basic() - print(1) test_copy() - print(2) test_apply_nodes() - print(3) test_apply_edges() - print(4) test_flow_compute() - print(5) test_prop_flows() could you remove the prints? if __name__ == '__main__': test_basic() test_copy() test_apply_nodes() test_apply_edges() test_flow_compute() test_prop_flows()
codereview_python_data_8113
# -*- coding: utf-8 -*- from decimal import Decimal as D -from django.utils import translation -from django.test import TestCase from django import template def render(template_string, ctx): Minor nitpick, could you alphabetically sort these three imports? # -*- coding: utf-8 -*- from decimal import Decimal as D from django import template +from django.test import TestCase +from django.utils import translation def render(template_string, ctx):
codereview_python_data_8131
import unittest -from crypto_square import ciphertext # Tests adapted from `problem-specifications//canonical-data.json` @ v3.2.0 Seems your template is checking the incorrect key here. import unittest +from crypto_square import cipher_text # Tests adapted from `problem-specifications//canonical-data.json` @ v3.2.0
codereview_python_data_8144
self, type1, type2) def py_operation_function(self, code): type1, type2 = self.operand1.type, self.operand2.type - is_unicode_concat = type1 is unicode_type and type2 is unicode_type if is_unicode_concat: if self.operand1.may_be_none() or self.operand2.may_be_none(): Looks like the right place to fix this, but I think we should also allow `str_type` here if one side is `unicode_type`. self, type1, type2) def py_operation_function(self, code): + language_level = code.globalstate.directives['language_level'] or '3str' + unicode_types = ((unicode_type,) if language_level in ('2', '3str') + else (unicode_type, str_type)) type1, type2 = self.operand1.type, self.operand2.type + is_unicode_concat = type1 in unicode_types and type2 in unicode_types if is_unicode_concat: if self.operand1.may_be_none() or self.operand2.may_be_none():
codereview_python_data_8146
self, key: str, lock: Union[threading.Semaphore, threading.Lock] ) -> Union[threading.Semaphore, threading.Lock]: with self.creation_lock: - if key not in self.locks: - self.locks[key] = lock - return self.locks[key] LAMBDA_ASYNC_LOCKS = LambdaAsyncLocks() nit: we could use the pretty nifty `setdefault(..)` method here: ``` with self.creation_lock: return self.locks.setdefault(key, lock) ``` self, key: str, lock: Union[threading.Semaphore, threading.Lock] ) -> Union[threading.Semaphore, threading.Lock]: with self.creation_lock: + return self.locks.setdefault(key, lock) LAMBDA_ASYNC_LOCKS = LambdaAsyncLocks()
codereview_python_data_8148
def hausdorff_wavg(P, Q): - r"""Calculate the weighted average (undirected) Hausdorff distance between - two paths. *P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time steps, :math:`N` atoms, and :math:`3N` coordinates (e.g., ~~symmetric~~ - omit "(undirected)" def hausdorff_wavg(P, Q): + r"""Calculate the weighted average Hausdorff distance between two paths. *P* (*Q*) is a :class:`numpy.ndarray` of :math:`N_p` (:math:`N_q`) time steps, :math:`N` atoms, and :math:`3N` coordinates (e.g.,
codereview_python_data_8158
if isinstance(filepath_or_buffer, str): if "*" not in filepath_or_buffer: warnings.warn( - f"'*' symbol not found in filename: '{filepath_or_buffer}'" ) if not cls.file_exists(filepath_or_buffer): return cls.single_worker_read(filepath_or_buffer, **kwargs) The error message should not only state the error condition, but _why_ it's probably an error; for now it's just stating "hey, no star in name", and a user could be "so what?". Warning should explain what's going on or give a reference where to find more. if isinstance(filepath_or_buffer, str): if "*" not in filepath_or_buffer: warnings.warn( + "Shell-style wildcard '*' must be in the filename in order to read multiple " + f"files at once; passed filename: '{filepath_or_buffer}'" ) if not cls.file_exists(filepath_or_buffer): return cls.single_worker_read(filepath_or_buffer, **kwargs)
codereview_python_data_8161
yield finally: try: - if os.path.exists(filepath): - os.remove(filepath) except OSError as e: log.misc.error(f"Failed to delete tempfile {filepath} ({e})!") I still don't think this is needed because we handle `OSError` anyways: ```pycon >>> try: ... os.remove('/doesnotexist') ... except OSError as e: ... print(f"Failed to delete file: {e}") ... Failed to delete file: [Errno 2] No such file or directory: '/doesnotexist' ``` yield finally: try: + os.remove(filepath) except OSError as e: log.misc.error(f"Failed to delete tempfile {filepath} ({e})!")
codereview_python_data_8165
@click.option( '--trading-calendar', metavar='TRADING-CALENDAR', - default='XNYS', - help="The calendar you want to use e.g. XLON. XNYS is the default." ) @click.option( '--print-algo/--no-print-algo', I'd probably leave this be for now. This is part of the public-facing API of the CLI, and more users are going to know NYSE than XNYS. @click.option( '--trading-calendar', metavar='TRADING-CALENDAR', + default='NYSE', + help="The calendar you want to use e.g. LSE. NYSE is the default." ) @click.option( '--print-algo/--no-print-algo',
codereview_python_data_8167
external_updates_subscription='subscription').put() data_types.Job(name='job').put() - self.testcase_0 = data_types.Testcase( open=True, status='Processed', job_type='external_job', nit: just testcase instead of testcase_0 external_updates_subscription='subscription').put() data_types.Job(name='job').put() + self.testcase = data_types.Testcase( open=True, status='Processed', job_type='external_job',
codereview_python_data_8177
for i, (atom, element) in enumerate(zip(ag, elements)): # create atom - rdatom = Chem.Atom(element) # disable adding H to the molecule rdatom.SetNoImplicit(True) # add PDB-like properties @jbarnoud didn't you already write code to this effect in the PDBWriter? Or was that for parsing still?+ for i, (atom, element) in enumerate(zip(ag, elements)): # create atom + rdatom = Chem.Atom(element.capitalize()) # disable adding H to the molecule rdatom.SetNoImplicit(True) # add PDB-like properties
codereview_python_data_8180
except ValueError as e: signals.status_message.send(message=str(e)) return - if not parts: - signals.status_message.send(message="Invalid Url") - return scheme, host, port, path = parts f = self.master.create_request(method, scheme, host, port, path) self.master.view.focus.flow = f `if not parts:` cannot happen anymore I think, let's remove it? except ValueError as e: signals.status_message.send(message=str(e)) return scheme, host, port, path = parts f = self.master.create_request(method, scheme, host, port, path) self.master.view.focus.flow = f
codereview_python_data_8181
scale_factor = 1.0 for i in range(bboxes.shape[0]): - if not isinstance(scale_factor, float) and not isinstance( - scale_factor, np.ndarray): scale_factor = scale_factor.cpu().numpy() bbox = (bboxes[i, :] / scale_factor).astype(np.int32) label = labels[i] `if not isinstance(scale_factor, (float, np.ndarray))` scale_factor = 1.0 for i in range(bboxes.shape[0]): + if not isinstance(scale_factor, (float, np.ndarray)): scale_factor = scale_factor.cpu().numpy() bbox = (bboxes[i, :] / scale_factor).astype(np.int32) label = labels[i]
codereview_python_data_8188
mode = os.fstat(file_obj.fileno()).st_mode return stat.S_ISFIFO(mode) or stat.S_ISREG(mode) - -def is_tty(): - return sys.stdout.isatty() This single instruction func not worth to exist mode = os.fstat(file_obj.fileno()).st_mode return stat.S_ISFIFO(mode) or stat.S_ISREG(mode)
codereview_python_data_8194
annotations. """ -raise RuntimeError("Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://github.com/biopython/biopython/issues/3156 for more information.") Wouldn't it be clearer to raise ``ImportError`` here? That would make sense with usage as in the examples on #3156 wouldn't it? annotations. """ +raise ImportError( + "Bio.Alphabet has been removed from Biopython. In many cases, the alphabet can simply be ignored and removed from scripts. In a few cases, you may need to specify the ``molecule_type`` as an annotation on a SeqRecord for your script to work correctly. Please see https://github.com/biopython/biopython/issues/3156 for more information." +)
codereview_python_data_8196
"""Get a list of assets from the assets table. Args: - asset_ids (list): a of list of ids for the assets to be retrieved from the database. Returns: a of list --> a list? :smile: """Get a list of assets from the assets table. Args: + asset_ids (list): a list of ids for the assets to be retrieved from the database. Returns:
codereview_python_data_8203
@contextmanager -def rc_context(rcparams): """ Context manager that temporarily overrides the pyplot rcParams. """ To avoid confusion maybe this should be ``_rc_context`` if we expect to use the decorator and not this context manager directly. Perhaps you could even inline this context manager inside ``mpl_rc_context``? @contextmanager +def _rc_context(rcparams): """ Context manager that temporarily overrides the pyplot rcParams. """
codereview_python_data_8204
def generate_c_code(self, env, options, result): # Check for a common gotcha for new users: naming your .pyx file after the .c file you want to wrap - if not is_cython_generated_file(result.c_file, allow_failed=True): - error(self.pos, 'The output file already exists and does not look like it was generated by Cython: "%s"' % - os.path.basename(result.c_file)) modules = self.referenced_modules Is it worth covering `.h` files too? (and arguably `_api.h` files, although I suspect they're less likely to happen by accident) def generate_c_code(self, env, options, result): # Check for a common gotcha for new users: naming your .pyx file after the .c file you want to wrap + if not is_cython_generated_file(result.c_file, allow_failed=True, if_not_found=True): + # Raising a fatal CompileError instead of calling error() to prevent castrating an existing file. + raise CompileError( + self.pos, 'The output file already exists and does not look like it was generated by Cython: "%s"' % + os.path.basename(result.c_file)) modules = self.referenced_modules
codereview_python_data_8206
arrowsize : int or list (default=10) For directed graphs, choose the size of the arrow head's length and - width. - If `list`, assign different size on each arrow head's length and width. See `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` for more info. ```suggestion width. A list of values can be passed in to assign a different size for arrow head's length and width. See `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` for more info. ``` Just a wording suggestion, feel free to ignore! arrowsize : int or list (default=10) For directed graphs, choose the size of the arrow head's length and + width. A list of values can be passed in to assign a different size for arrow head's length and width. See `matplotlib.patches.FancyArrowPatch` for attribute `mutation_scale` for more info.
codereview_python_data_8207
os.utime(file_path, None) if os.path.exists(file_path): API_FILE_PATHS[api] = file_path - chmod_r(file_path, 0o777) return API_FILE_PATHS.get(api) I think we can remove this `chmod_r` from here now (shouldn't be required anymore). os.utime(file_path, None) if os.path.exists(file_path): API_FILE_PATHS[api] = file_path return API_FILE_PATHS.get(api)
codereview_python_data_8208
query = QueryResult([hit11]) self.assertEqual(hit11, query["hit1"]) self.assertEqual(hit11, query["alt1"]) - self.assertEqual(hit11.id, "alt1") hit11._id_alt = [] def test_delitem_string_ok(self): The comparison here becomes incorrect. It should be `assertNotEqual` query = QueryResult([hit11]) self.assertEqual(hit11, query["hit1"]) self.assertEqual(hit11, query["alt1"]) + self.assertNotEqual(hit11.id, "alt1") hit11._id_alt = [] def test_delitem_string_ok(self):
codereview_python_data_8211
rst = graph.dstdata['ft'] # residual if self.res_fc is not None: - resval = self.res_fc(h_dst).view(h_dst.shape[0], -1, self._out_feats) rst = rst + resval # bias if self.bias is not None: - rst = rst + self.bias.view(1, -1, self._out_feats) # activation if self.activation: rst = self.activation(rst) ```suggestion rst = rst + self.bias.view(1, self._num_heads, self._out_feats) ``` This looks more clear to me. rst = graph.dstdata['ft'] # residual if self.res_fc is not None: + resval = self.res_fc(h_dst).view(h_dst.shape[0], self._num_heads, self._out_feats) rst = rst + resval # bias if self.bias is not None: + rst = rst + self.bias.view(1, self._num_heads, self._out_feats) # activation if self.activation: rst = self.activation(rst)
codereview_python_data_8212
def __init__(self, elements): SearchStrategy.__init__(self) - self.elements = d.check_sample(elements, SampledFromStrategy.__name__) assert self.elements def calc_has_reusable_values(self, recur): The name here can just be the string `'sampled_from'`, as that's the user-facing API. Similar notes for the other calls. def __init__(self, elements): SearchStrategy.__init__(self) + self.elements = d.check_sample(elements, 'sampled_from') assert self.elements def calc_has_reusable_values(self, recur):
codereview_python_data_8217
elif '.m3u8' in video_url: for stream in HLSStream.parse_variant_playlist(self.session, video_url).items(): yield stream - elif '.mp4' in video_url: match = self._mp4_bitrate_re.match(video_url) if match is not None: bitrate = match.group('bitrate') I think when the quality is bitrate it should have the `k` suffix. Streamlink does some conversion to bitrates so they are more comparable to resolution. Might not be a problem in this case, just an FYI. elif '.m3u8' in video_url: for stream in HLSStream.parse_variant_playlist(self.session, video_url).items(): yield stream + # HBB TV streams are not provided anymore by France Televisions + elif '.mp4' in video_url and '/hbbtv/' not in video_url: match = self._mp4_bitrate_re.match(video_url) if match is not None: bitrate = match.group('bitrate')
codereview_python_data_8219
import torch -from ..bbox import assign_and_sample, bbox2delta, build_assigner -from ..bbox.samplers.pseudo_sampler import PseudoSampler from ..utils import multi_apply `PseudoSampler` can also be imported from `..bbox` import torch +from ..bbox import PseudoSampler, assign_and_sample, bbox2delta, build_assigner from ..utils import multi_apply
codereview_python_data_8220
# Get maximum scores for foreground classes. if self.use_sigmoid_cls: - max_scores, _ = scores.max(dim=2) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class - max_scores, _ = scores[..., :-1].max(dim=2) _, topk_inds = max_scores.topk(nms_pre) batch_inds = torch.arange(batch_size).view( May use -1? # Get maximum scores for foreground classes. if self.use_sigmoid_cls: + max_scores, _ = scores.max(-1) else: # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class + max_scores, _ = scores[..., :-1].max(-1) _, topk_inds = max_scores.topk(nms_pre) batch_inds = torch.arange(batch_size).view(
codereview_python_data_8221
new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns)) new_dtypes = self._dtype_cache - new_dtypes.index = new_columns return self.__constructor__( new_data, new_index, new_columns, new_dtypes ).dropna(axis=axis, how="all") if `self._dtypes_cache` is `None` this will throw an error. new_index = pandas.RangeIndex(len(self.index)) if not axis else self.index new_columns = self.columns if not axis else pandas.RangeIndex(len(self.columns)) new_dtypes = self._dtype_cache + if new_dtypes: + new_dtypes.index = new_columns return self.__constructor__( new_data, new_index, new_columns, new_dtypes ).dropna(axis=axis, how="all")
codereview_python_data_8229
def test_method(self): q = self.req() - self.resp() assert self.q("~m get", q) assert not self.q("~m post", q) remove entirely? (same below) def test_method(self): q = self.req() assert self.q("~m get", q) assert not self.q("~m post", q)
codereview_python_data_8230
stop_price=None, style=None): """ - Place an order for a fixed amount of value. Equivalent to ``order(asset, value / data.current(asset, 'price'))``. "amount of value" sounds a little weird. Maybe just "value"? stop_price=None, style=None): """ + Place an order for a fixed amount of money. Equivalent to ``order(asset, value / data.current(asset, 'price'))``.
codereview_python_data_8231
""" Return the file content hash for a file. """ - with open(file_path, 'rb') as content: hasher = hashlib.sha256() hasher.update(content.read()) return hasher.hexdigest() ```suggestion with open(file_path, 'rb', encoding='utf-8', errors='ignore') as content: ``` Please fix this in other places too. There are a lot of places where `encoding` and `errors` parameters are not specified for open. """ Return the file content hash for a file. """ + with open(file_path) as content: hasher = hashlib.sha256() hasher.update(content.read()) return hasher.hexdigest()
codereview_python_data_8233
complex = 0, longness = 0, is_self_arg = cmethod_flag, templates = None) else: - base_type = p_c_base_type(s, nonempty = nonempty) declarator = p_c_declarator(s, ctx, nonempty = nonempty) if s.sy in ('not', 'or') and not s.in_python_file: kind = s.sy ```suggestion base_type = p_c_base_type(s, nonempty=nonempty) ``` complex = 0, longness = 0, is_self_arg = cmethod_flag, templates = None) else: + base_type = p_c_base_type(s, nonempty=nonempty) declarator = p_c_declarator(s, ctx, nonempty = nonempty) if s.sy in ('not', 'or') and not s.in_python_file: kind = s.sy
codereview_python_data_8235
], f'Invalid crop_type {crop_type}.' if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size we should also ensure that they are integers? ], f'Invalid crop_type {crop_type}.' if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 + assert isinstance(crop_size[0], int) and isinstance( + crop_size[1], int) else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size
codereview_python_data_8236
dali_cflags, dali_lflags = get_dali_build_flags() tf_cflags, tf_lflags = get_tf_build_flags() cuda_cflags, cuda_lflags = get_cuda_build_flags() - plugin_src = self.src_path + '/daliop.cc' + ' ' + self.src_path + '/dali_dataset_op.cc' lib_path = self.plugin_dest_dir + '/libdali_tf_current.so' cmd = compiler + ' -Wl,-R,\'$ORIGIN/..\' -std=c++11 -DNDEBUG -shared ' \ + plugin_src + ' -o ' + lib_path + ' -fPIC ' + dali_cflags + ' ' \ maybe create a list of filenames and then construct this in a more generic way: ```for filename in filenames: plugin_src = plugin_src + ' ' + self.src_path + '/' + filename ``` dali_cflags, dali_lflags = get_dali_build_flags() tf_cflags, tf_lflags = get_tf_build_flags() cuda_cflags, cuda_lflags = get_cuda_build_flags() + + filenames = ['daliop.cc', 'dali_dataset_op.cc'] + plugin_src = '' + for filename in filenames: + plugin_src = plugin_src + ' ' + self.src_path + '/' + filename + lib_path = self.plugin_dest_dir + '/libdali_tf_current.so' cmd = compiler + ' -Wl,-R,\'$ORIGIN/..\' -std=c++11 -DNDEBUG -shared ' \ + plugin_src + ' -o ' + lib_path + ' -fPIC ' + dali_cflags + ' ' \
codereview_python_data_8241
maximum_ball = ITEM_ULTRABALL if is_vip else ITEM_GREATBALL ideal_catch_rate_before_throw = 0.9 if is_vip else 0.35 - while True: - self.bot.latest_inventory = None - berry_count = self.bot.item_inventory_count(berry_id) - items_stock = self.bot.current_inventory() # find lowest available ball current_ball = ITEM_POKEBALL It would be better to modify the item count when spinning fort or using the item. And then only refresh when we need to recycle. maximum_ball = ITEM_ULTRABALL if is_vip else ITEM_GREATBALL ideal_catch_rate_before_throw = 0.9 if is_vip else 0.35 + berry_count = self.bot.item_inventory_count(berry_id) + items_stock = self.bot.current_inventory() + while True: # find lowest available ball current_ball = ITEM_POKEBALL
codereview_python_data_8248
d = th.cdist(x, x).to(F.cpu()) def check_knn(g, x, start, end): g = g.to(F.cpu()) for v in range(start, end): src, _ = g.in_edges(v) Probably also assert the context of `kg` and `x` before sending to cpu. d = th.cdist(x, x).to(F.cpu()) def check_knn(g, x, start, end): + assert g.device == x.device g = g.to(F.cpu()) for v in range(start, end): src, _ = g.in_edges(v)
codereview_python_data_8249
Test support of python object in annotations >>> test_cdef_return_object(3) 3 """ - return x For the sake of completeness could be a test that the exception raises correctly: ``` if x: return x else: raise RuntimeError() ``` and a test of both pathways in the doctest Test support of python object in annotations >>> test_cdef_return_object(3) 3 + >>> test_cdef_return_object(None) + Traceback (most recent call last): + ... + RuntimeError """ + if x: + return x + else: + raise RuntimeError()
codereview_python_data_8251
# GI:160837788 aka NP_075631.2 # the actin related protein 2/3 complex, subunit 1B [Mus musculus] self.run_qblast("blastp", "nr", "NP_075631.2", 0.001, - "rat [ORGN]", dict(megablast='FALSE'), ['9506405', '13592137', '37589612', '149064087', '56912225']) def test_pcr_primers(self): # This next example finds PCR primer matches in Chimpanzees, e.g. BRCA1: self.run_qblast("blastn", "nr", "GTACCTTGATTTCGTATTC" + ("N" * 30) + "GACTCTACTACCTTTACCC", - 10, "pan [ORGN]", dict(megablast='FALSE'), ["XM_009432096.2", "XM_009432102.2", "XM_009432101.2", "XM_016930487.1", "XM_009432104.2", "XM_009432099.2", "XR_001710553.1", "XM_016930485.1", "XM_009432089.2", Style wise, avoid the very long line by using more line breaks. Also why not write ``{'megablast': 'FALSE'}`` here (and likewise later on)? # GI:160837788 aka NP_075631.2 # the actin related protein 2/3 complex, subunit 1B [Mus musculus] self.run_qblast("blastp", "nr", "NP_075631.2", 0.001, + "rat [ORGN]", {'megablast': 'FALSE'}, + ['9506405', '13592137', '37589612', '149064087', '56912225']) def test_pcr_primers(self): # This next example finds PCR primer matches in Chimpanzees, e.g. BRCA1: self.run_qblast("blastn", "nr", "GTACCTTGATTTCGTATTC" + ("N" * 30) + "GACTCTACTACCTTTACCC", + 10, "pan [ORGN]", {'megablast': 'FALSE'}, ["XM_009432096.2", "XM_009432102.2", "XM_009432101.2", "XM_016930487.1", "XM_009432104.2", "XM_009432099.2", "XR_001710553.1", "XM_016930485.1", "XM_009432089.2",
codereview_python_data_8257
from indy_common.authorize.auth_actions import AuthActionAdd, AuthActionEdit from indy_common.authorize.auth_constraints import AuthConstraint, AuthConstraintOr, accepted_roles, IDENTITY_OWNER from indy_common.constants import TRUST_ANCHOR, POOL_CONFIG, VALIDATOR_INFO, POOL_UPGRADE, POOL_RESTART, NODE, \ - CLAIM_DEF, SCHEMA, NYM, ROLE, NETWORK_MONITOR, REVOC_REG_ENTRY, REVOC_REG_DEF from plenum.common.constants import TRUSTEE, STEWARD, VERKEY Please do not remove `AUTH_RULE` from import. from indy_common.authorize.auth_actions import AuthActionAdd, AuthActionEdit from indy_common.authorize.auth_constraints import AuthConstraint, AuthConstraintOr, accepted_roles, IDENTITY_OWNER from indy_common.constants import TRUST_ANCHOR, POOL_CONFIG, VALIDATOR_INFO, POOL_UPGRADE, POOL_RESTART, NODE, \ + CLAIM_DEF, SCHEMA, NYM, ROLE, AUTH_MAP, NETWORK_MONITOR, REVOC_REG_ENTRY, REVOC_REG_DEF from plenum.common.constants import TRUSTEE, STEWARD, VERKEY
codereview_python_data_8263
return outs[0] else: return tuple(outs) This will cause BC-breaking if we simply remove L2Norm from this file. return outs[0] else: return tuple(outs) + + +class L2Norm(ssd_neck.L2Norm): + + def __init__(self, **kwargs): + super(L2Norm, self).__init__(**kwargs) + warnings.warn('DeprecationWarning: L2Norm in ssd_vgg.py ' + 'is deprecated, please use L2Norm in ' + 'mmdet/models/necks/ssd_neck.py instead')
codereview_python_data_8270
self.modules = {"__builtin__" : Builtin.builtin_scope} self.cython_scope = CythonScope.create_cython_scope(self) self.modules["cython"] = self.cython_scope - self.include_directories = tuple(include_directories) self.future_directives = set() self.compiler_directives = compiler_directives self.cpp = cpp Does this really need to be a tuple? It seems like it should rather be a list, at least at this point, but also generally. Make a copy when necessary, but why use a tuple? self.modules = {"__builtin__" : Builtin.builtin_scope} self.cython_scope = CythonScope.create_cython_scope(self) self.modules["cython"] = self.cython_scope + self.include_directories = include_directories self.future_directives = set() self.compiler_directives = compiler_directives self.cpp = cpp
codereview_python_data_8274
def create_sqs_message_attributes(subscriber, attributes): - if subscriber['RawMessageDelivery'] == 'false': return {} message_attributes = {} If the key `"RawMessageDelivery"` is missing in `subscriber`, this would raise a `KeyError`. Perhaps we can change it to: ``` if subscriber.get('RawMessageDelivery') not in ('true', True): ... ``` def create_sqs_message_attributes(subscriber, attributes): + if subscriber.get('RawMessageDelivery') not in ('true', True): return {} message_attributes = {}
codereview_python_data_8276
kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R) # Choose a node with minimum degree. - deg = G.degree() - v = next(n for n, d in deg.items() if d == min(deg.values())) # Initial node cutset is all neighbors of the node with minimum degree. min_cut = set(G[v]) # Compute st node cuts between v and all its non-neighbors nodes in G. I believe that `min(deg.values())` is evaluated for each `n`, and this will end up forcing a quadratic running time. Supposedly, this can be as concise as `v = min(G, key=G.degree)`. kwargs = dict(flow_func=flow_func, auxiliary=H, residual=R) # Choose a node with minimum degree. + v = min(G, key=G.degree) # Initial node cutset is all neighbors of the node with minimum degree. min_cut = set(G[v]) # Compute st node cuts between v and all its non-neighbors nodes in G.
codereview_python_data_8282
from tests.test_libs import test_utils -@unittest.skipIf(sys.version_info.major != 3, 'Python 3 only') @test_utils.with_cloud_emulators('datastore') class OssFuzzGenerateCertsTest(unittest.TestCase): """Test oss_fuzz_generate_certs.""" there was some helper decorator for this ? from tests.test_libs import test_utils +@test_utils.python3_only @test_utils.with_cloud_emulators('datastore') class OssFuzzGenerateCertsTest(unittest.TestCase): """Test oss_fuzz_generate_certs."""
codereview_python_data_8286
# skip the image if there is no valid gt bbox if len(gt_bboxes) == 0 and self.skip_img_without_anno: - warnings.warn('Skip the image that has no valid gt bbox') return None # extra augmentation Maybe also print the filename? # skip the image if there is no valid gt bbox if len(gt_bboxes) == 0 and self.skip_img_without_anno: + warnings.warn('Skip the image "%s" that has no valid gt bbox' % + osp.join(self.img_prefix, img_info['filename'])) return None # extra augmentation
codereview_python_data_8287
import unittest from rational_numbers import Rational For now, we still need this import to pass tests in Python 2. +from __future__ import division import unittest from rational_numbers import Rational
codereview_python_data_8288
type=float, default=15.0 ) - parser.add_argument( - "-hr", - "--health_record", - help="Send anonymous bot event to GA for bot heath record, furthur ML will depend on it. Set \"health_record\":false if you need disable it.", - type=bool, - default=True - ) # Start to parse other attrs config = parser.parse_args() This should be defaulted to false. We should encourage people to opt-in, but it should never be the default for tracking. type=float, default=15.0 ) # Start to parse other attrs config = parser.parse_args()