id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_10124
def _apply(self, group): pairs_inner = distances.capped_distance(sel.positions, sys.positions, self.inRadius, box=box, return_distances=False) - inner = np.array(pairs_inner).T[1] - outer = np.array(pairs_outer).T[1] if pairs_outer.size > 0: sys_ind_outer = np.sort(np.unique(pairs_outer[:,1])) As far as I can tell these two variables are not used again? perhaps I am missing something. def _apply(self, group): pairs_inner = distances.capped_distance(sel.positions, sys.positions, self.inRadius, box=box, return_distances=False) if pairs_outer.size > 0: sys_ind_outer = np.sort(np.unique(pairs_outer[:,1]))
codereview_new_python_data_10125
def wrapper(*args, **kwargs): def check_atomgroup_not_empty(groupmethod): """Decorator triggering a ``ValueError`` if the underlying group is empty. - Avoids obscure computational errors on group methods. Raises ------ Does this work on all groups? If so we should change the name and test. Otherwise we need to change docstring. def wrapper(*args, **kwargs): def check_atomgroup_not_empty(groupmethod): """Decorator triggering a ``ValueError`` if the underlying group is empty. + Avoids downstream errors in computing properties of empty atomgroups. Raises ------
codereview_new_python_data_10126
def _load_offsets(self): "{self.filename}. Using slow offset calculation.") self._read_offsets(store=True) return - raise e with fasteners.InterProcessLock(lock_name) as filelock: if not isfile(fname): ```suggestion else: ``` def _load_offsets(self): "{self.filename}. Using slow offset calculation.") self._read_offsets(store=True) return + else: + raise with fasteners.InterProcessLock(lock_name) as filelock: if not isfile(fname):
codereview_new_python_data_10127
def return_data(self, data_selector: Union[str, List[str], None] = None) \ -> Dict[str, np.ndarray]: """ Returns the auxiliary data contained in the :class:`EDRReader`. Returns either all data or data specified as `data_selector` in form - of a str or a list of any of :attribute:`EDRReader.terms`. `Time` is always returned to allow easy plotting. """ if not data_selector or data_selector == "*": return self.auxdata ```suggestion of a str or a list of any of :attr:`EDRReader.terms`. `Time` is ``` def return_data(self, data_selector: Union[str, List[str], None] = None) \ -> Dict[str, np.ndarray]: """ Returns the auxiliary data contained in the :class:`EDRReader`. Returns either all data or data specified as `data_selector` in form + of a str or a list of any of :attr:`EDRReader.terms`. `Time` is always returned to allow easy plotting. """ if not data_selector or data_selector == "*": return self.auxdata
codereview_new_python_data_10128
def plot_mean_profile(self, bins=100, range=None, binned, bins = self.bin_radii(frames=frames, bins=bins, range=range) mean = np.array(list(map(np.mean, binned))) - midpoints = 0.5 * bins[1:] + 0.5 * bins[:-1] fig, ax = plt.subplots() if n_std: ```suggestion midpoints = 0.5 * (bins[1:] + bins[:-1]) ``` def plot_mean_profile(self, bins=100, range=None, binned, bins = self.bin_radii(frames=frames, bins=bins, range=range) mean = np.array(list(map(np.mean, binned))) + midpoints = 0.5 * (bins[1:] + bins[:-1]) fig, ax = plt.subplots() if n_std:
codereview_new_python_data_10318
from Bio import BiopythonParserWarning from Bio.Seq import Seq -from Bio.SeqFeature import ( - SimpleLocation, - Location, - Reference, - SeqFeature, - LocationParserError, -) # other Bio.GenBank stuff from .utils import FeatureValueCleaner Minor: Style wise although not formalised we've avoid the bracket multiline imports, I would use multiple import lines here - sorted alphabetically. (There are some issues about automatically sorting/formatting our imports) from Bio import BiopythonParserWarning from Bio.Seq import Seq +from Bio.SeqFeature import Location +from Bio.SeqFeature import Reference +from Bio.SeqFeature import SeqFeature +from Bio.SeqFeature import SimpleLocation +from Bio.SeqFeature import LocationParserError # other Bio.GenBank stuff from .utils import FeatureValueCleaner
codereview_new_python_data_10319
def is_android_cuttlefish(plt=None): def is_android_emulator(plt=None): """Return True if we are on android emulator platform.""" - return 'ANDROID_EMULATOR' in (plt or get_platform_group()) def is_android_kernel(plt=None): I think there's a footgun in here: ``` >>> 'x' in ('y' or 'x') False >>> ('y' or 'x') 'y' ``` I think it works if you only want to use get_platform_group() when plt is not passed, but I don't like this kind of magic. Let's get rid of the plt behavior since it's not even used anywhere. def is_android_cuttlefish(plt=None): def is_android_emulator(plt=None): """Return True if we are on android emulator platform.""" + return 'ANDROID_EMULATOR' in get_platform_group() def is_android_kernel(plt=None):
codereview_new_python_data_10320
r'.*assertion failed at\s.*\sin\s*.*: (.*)') ASSERT_REGEX_GLIBC = re.compile( r'.*:\s*assertion [`\'"]?(.*?)[`\'"]? failed\.?$', re.IGNORECASE) -ASSERT_DISENGAGED_VALUE = re.compile( - r'.*\S.*\/.*:\d+:\s*assertion .* failed:\s*' - r'(optional operator.* called on a disengaged value)' -) ASSERT_NOT_REACHED_REGEX = re.compile(r'^\s*SHOULD NEVER BE REACHED\s*$') CENTIPEDE_TIMEOUT_REGEX = re.compile( r'^========= Timeout of \d+ seconds exceeded; exiting') This part is a little too specific. Can we make this match on anything that comes after the "assertion failed" part? r'.*assertion failed at\s.*\sin\s*.*: (.*)') ASSERT_REGEX_GLIBC = re.compile( r'.*:\s*assertion [`\'"]?(.*?)[`\'"]? failed\.?$', re.IGNORECASE) +ASSERT_REGEX_GLIBC_SUFFIXED = re.compile( + r'.*\S.*\/.*:\d+:\s*assertion .* failed:\s*(\S.*)') ASSERT_NOT_REACHED_REGEX = re.compile(r'^\s*SHOULD NEVER BE REACHED\s*$') CENTIPEDE_TIMEOUT_REGEX = re.compile( r'^========= Timeout of \d+ seconds exceeded; exiting')
codereview_new_python_data_10321
def check_error_and_log(error_regex, log_message_format): # Randomly set to ignore long running inputs. if engine_common.decide_with_probability( self.strategies.IGNORE_TIMEOUTS_PROB): - self.set_arg(fuzz_args, constants.IGNORE_TIMEOUTS_ENV_VAR, None) # Randomly set new vs. old queue selection mechanism. if engine_common.decide_with_probability( I think you mean to do this: ``` environment.set_value(constants.IGNORE_TIMEOUTS_ENV_VAR, 1) ``` def check_error_and_log(error_regex, log_message_format): # Randomly set to ignore long running inputs. if engine_common.decide_with_probability( self.strategies.IGNORE_TIMEOUTS_PROB): + environment.set_value(constants.IGNORE_TIMEOUTS_ENV_VAR, 1) # Randomly set new vs. old queue selection mechanism. if engine_common.decide_with_probability(
codereview_new_python_data_10322
def test_pysecsan_command_os_system(self): data = self._read_test_data('pysecsan_command_os_system.txt') expected_type = 'PySecSan' expected_address = '' - expected_state = '' expected_stacktrace = data expected_security_flag = True self._validate_get_crash_data(data, expected_type, expected_address, Can we make this a bit more detailed about what exact type it is? def test_pysecsan_command_os_system(self): data = self._read_test_data('pysecsan_command_os_system.txt') expected_type = 'PySecSan' expected_address = '' + expected_state = 'abort_with_issue\nhook_pre_exec_os_system\nrun\n' expected_stacktrace = data expected_security_flag = True self._validate_get_crash_data(data, expected_type, expected_address,
codereview_new_python_data_10323
def test_pysecsan_command_os_system(self): data = self._read_test_data('pysecsan_command_os_system.txt') expected_type = 'PySecSan' expected_address = '' - expected_state = '' expected_stacktrace = data expected_security_flag = True self._validate_get_crash_data(data, expected_type, expected_address, This should be the top three frames picked from the Python stacktrace. Our existing Python frame parsing regex is here: https://github.com/google/clusterfuzz/blob/2f89cd16a3cb6f2f5493c62315ac1973aac1f864/src/clusterfuzz/stacktraces/constants.py#L335 def test_pysecsan_command_os_system(self): data = self._read_test_data('pysecsan_command_os_system.txt') expected_type = 'PySecSan' expected_address = '' + expected_state = 'abort_with_issue\nhook_pre_exec_os_system\nrun\n' expected_stacktrace = data expected_security_flag = True self._validate_get_crash_data(data, expected_type, expected_address,
codereview_new_python_data_10324
class FuzzResult(object): """Represents a result of a fuzzing session: a list of crashes found and the stats generated.""" - def __init__(self, logs, command, crashes, stats, time_executed, - process_timed_out=None): self.logs = logs self.command = command self.crashes = crashes self.stats = stats self.time_executed = time_executed - self.process_timed_out = process_timed_out class ReproduceResult(object): nit: should we just name this `timed_out` ? This is more consistent with the other flags (e.g. `time_executed` instead of `process_time_executed`). class FuzzResult(object): """Represents a result of a fuzzing session: a list of crashes found and the stats generated.""" + def __init__(self, + logs, + command, + crashes, + stats, + time_executed, + timed_out=None): self.logs = logs self.command = command self.crashes = crashes self.stats = stats self.time_executed = time_executed + self.timed_out = timed_out class ReproduceResult(object):
codereview_new_python_data_10325
def _group_testcases_based_on_variants(testcase_map): project_counter.items(), key=lambda x: x[1], reverse=True)[:10] log_string = "" for tid, count in top_matched_testcase: - log_string += "%s: %d, " % (tid, count) logs.log('VARIANT ANALYSIS (Project Report): project=%s, ' 'total_testcase_num=%d,' nit: please use f-strings. i'll fix this to get this deployed today. def _group_testcases_based_on_variants(testcase_map): project_counter.items(), key=lambda x: x[1], reverse=True)[:10] log_string = "" for tid, count in top_matched_testcase: + log_string += f'{tid}: {count}, ' logs.log('VARIANT ANALYSIS (Project Report): project=%s, ' 'total_testcase_num=%d,'
codereview_new_python_data_10326
def _get_runner(): def _get_reproducer_path(log, reproducers_dir): """Gets the reproducer path, if any.""" crash_match = _CRASH_REGEX.search(log) - if not crash_match or not crash_match.group(1): return None tmp_crash_path = Path(crash_match.group(1)) prm_crash_path = Path(reproducers_dir) / tmp_crash_path.name Under what circumstance can the second part (`not crash_match.group(1)`) occur? Please add a comment. def _get_runner(): def _get_reproducer_path(log, reproducers_dir): """Gets the reproducer path, if any.""" crash_match = _CRASH_REGEX.search(log) + if not crash_match: return None tmp_crash_path = Path(crash_match.group(1)) prm_crash_path = Path(reproducers_dir) / tmp_crash_path.name
codereview_new_python_data_10327
from local.butler import common -def execute(_): """Install all required dependencies for running tests, the appengine, and the bot.""" common.install_dependencies() appengine.symlink_dirs() I think you've said that `del X` is the preferred style these days. from local.butler import common +def execute(args): """Install all required dependencies for running tests, the appengine, and the bot.""" + del args common.install_dependencies() appengine.symlink_dirs()
codereview_new_python_data_10328
def store_fuzzer_run_results(testcase_file_paths, fuzzer, fuzzer_command, fuzzer_return_code_string = 'Fuzzer timed out.' truncated_fuzzer_output = truncate_fuzzer_output(fuzzer_output, data_types.ENTITY_SIZE_LIMIT) - console_output = '%s: %s\n%s\n%s' % (bot_name, fuzzer_return_code_string, - fuzzer_command, truncated_fuzzer_output) # Refresh the fuzzer object. fuzzer = data_types.Fuzzer.query(data_types.Fuzzer.name == fuzzer.name).get() Maybe just change this to an f-string. def store_fuzzer_run_results(testcase_file_paths, fuzzer, fuzzer_command, fuzzer_return_code_string = 'Fuzzer timed out.' truncated_fuzzer_output = truncate_fuzzer_output(fuzzer_output, data_types.ENTITY_SIZE_LIMIT) + console_output = (f'{bot_name}: {fuzzer_return_code_string}\n{fuzzer_command}' + f'\n{truncated_fuzzer_output}') # Refresh the fuzzer object. fuzzer = data_types.Fuzzer.query(data_types.Fuzzer.name == fuzzer.name).get()
codereview_new_python_data_10329
def test_ignore_linux_gate(self): expected_state, expected_stacktrace, expected_security_flag) - def test_capture_shell_bug(self): - """Test capturing shell bugs detected by extra sanitizers""" - data = self._read_test_data('shell_bug.txt') - expected_type = 'Shell bug' expected_address = '' expected_state = 'wait4\ndo_system\ntarget.cpp\n' expected_stacktrace = data Could we rename this to "Command injection" ? def test_ignore_linux_gate(self): expected_state, expected_stacktrace, expected_security_flag) + def test_capture_command_injection(self): + """Test capturing command injection bugs detected by extra sanitizers""" + data = self._read_test_data('command_injection_bug.txt') + expected_type = 'Command injection' expected_address = '' expected_state = 'wait4\ndo_system\ntarget.cpp\n' expected_stacktrace = data
codereview_new_python_data_10330
def on_event_request_status(self, wallet, key, status): @event_listener def on_event_invoice_status(self, wallet, key, status): if wallet == self.wallet: - self._logger.debug('invoice status update for key %s' % key) - # FIXME event doesn't pass the new status, so we need to retrieve - # invoice = self.wallet.get_invoice(key) - # if invoice: - # status = self.wallet.get_invoice_status(invoice) - # self.invoiceStatusChanged.emit(key, status) - # else: - # self._logger.debug(f'No invoice found for key {key}') self.invoiceStatusChanged.emit(key, status) @qt_event_listener `qeinvoicelistmodel.py` also needs updating (?) https://github.com/spesmilo/electrum/blob/eb00012c951e10855841d9e9daa19125cb3c04a2/electrum/gui/qml/qeinvoicelistmodel.py#L143 def on_event_request_status(self, wallet, key, status): @event_listener def on_event_invoice_status(self, wallet, key, status): if wallet == self.wallet: + self._logger.debug(f'invoice status update for key {key} to {status}') self.invoiceStatusChanged.emit(key, status) @qt_event_listener
codereview_new_python_data_10334
def generate_evaluation_code(self, code): code.putln('}') else: if item.value.type.is_array: - code.putln("memcpy(%s.%s, %s, sizeof %s);" % ( self.result(), item.key.value, item.value.result(), ```suggestion code.putln("memcpy(%s.%s, %s, sizeof(%s));" % ( ``` def generate_evaluation_code(self, code): code.putln('}') else: if item.value.type.is_array: + code.putln("memcpy(%s.%s, %s, sizeof(%s));" % ( self.result(), item.key.value, item.value.result(),
codereview_new_python_data_10336
class TemplateCode(object): """ _placeholder_count = 0 - def __init__(self, writer = None, placeholders = None, extra_stats=None): self.writer = PyxCodeWriter() if writer is None else writer self.placeholders = {} if placeholders is None else placeholders self.extra_stats = [] if extra_stats is None else extra_stats ```suggestion def __init__(self, writer=None, placeholders=None, extra_stats=None): ``` class TemplateCode(object): """ _placeholder_count = 0 + def __init__(self, writer=None, placeholders=None, extra_stats=None): self.writer = PyxCodeWriter() if writer is None else writer self.placeholders = {} if placeholders is None else placeholders self.extra_stats = [] if extra_stats is None else extra_stats
codereview_new_python_data_10337
class SoftCComplexType(CComplexType): def __init__(self): super(SoftCComplexType, self).__init__(c_double_type) - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): base_result = super(SoftCComplexType, self).declaration_code( entity_code, for_display=for_display, dll_linkage=dll_linkage, - pyrex=pyrex ) if for_display: return "soft %s" % base_result ```suggestion pyrex=pyrex, ``` class SoftCComplexType(CComplexType): def __init__(self): super(SoftCComplexType, self).__init__(c_double_type) + def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0): base_result = super(SoftCComplexType, self).declaration_code( entity_code, for_display=for_display, dll_linkage=dll_linkage, + pyrex=pyrex, ) if for_display: return "soft %s" % base_result
codereview_new_python_data_10338
def analyse_declarations(self, env): is_frozen = False dataclass_directives = env.directives["dataclasses.dataclass"] if dataclass_directives: - frozen_directive = dataclass_directives[1].get('frozen', None) is_frozen = frozen_directive and frozen_directive.is_literal and frozen_directive.value - if is_frozen: - scope.is_dataclass = "frozen" - else: - scope.is_dataclass = True if self.doc and Options.docstrings: scope.doc = embed_position(self.pos, self.doc) I really like conditional expressions for cases like this because they underline the intention to set a variable or attribute, just in different ways depending on a condition. ```suggestion scope.is_dataclass = "frozen" if is_frozen else True ``` def analyse_declarations(self, env): is_frozen = False dataclass_directives = env.directives["dataclasses.dataclass"] if dataclass_directives: + frozen_directive = dataclass_directives[1].get('frozen') is_frozen = frozen_directive and frozen_directive.is_literal and frozen_directive.value + scope.is_dataclass = "frozen" if is_frozen else True if self.doc and Options.docstrings: scope.doc = embed_position(self.pos, self.doc)
codereview_new_python_data_10339
def analyse_declarations(self, env): is_frozen = False dataclass_directives = env.directives["dataclasses.dataclass"] if dataclass_directives: - frozen_directive = dataclass_directives[1].get('frozen', None) is_frozen = frozen_directive and frozen_directive.is_literal and frozen_directive.value - if is_frozen: - scope.is_dataclass = "frozen" - else: - scope.is_dataclass = True if self.doc and Options.docstrings: scope.doc = embed_position(self.pos, self.doc) We probably already know the node's `constant_result` at this point. (You can ask `has_constant_result()`, but there's hopefully some prior piece of validation code that makes sure that we *do* know the constant result of this flag at compile time.) def analyse_declarations(self, env): is_frozen = False dataclass_directives = env.directives["dataclasses.dataclass"] if dataclass_directives: + frozen_directive = dataclass_directives[1].get('frozen') is_frozen = frozen_directive and frozen_directive.is_literal and frozen_directive.value + scope.is_dataclass = "frozen" if is_frozen else True if self.doc and Options.docstrings: scope.doc = embed_position(self.pos, self.doc)
codereview_new_python_data_10340
def create_args_parser(): epilog="""Environment variables: CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless of modification times and changes. - Environment variables accepted by setuptools are supported to configure the C compiler and build: https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" ) ```suggestion ``` Code style doesn't like some whitespace def create_args_parser(): epilog="""Environment variables: CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless of modification times and changes. Environment variables accepted by setuptools are supported to configure the C compiler and build: https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" )
codereview_new_python_data_10342
def compile_multiple(sources, options): a CompilationResultSet. Performs timestamp checking and/or recursion if these are specified in the options. """ - if len(sources) > 1 and options.module_name: raise RuntimeError('Full module name can only be set ' 'for single source compilation') # run_pipeline creates the context I'd reverse this. The issue is not that there are multiple sources, but that the module name was passed, which then dictates that there can only be one source. (Also, it's probably less likely for users to pass a module name than multiple sources.) ```suggestion if options.module_name and len(sources) > 1: ``` def compile_multiple(sources, options): a CompilationResultSet. Performs timestamp checking and/or recursion if these are specified in the options. """ + if options.module_name and len(sources) > 1: raise RuntimeError('Full module name can only be set ' 'for single source compilation') # run_pipeline creates the context
codereview_new_python_data_10343
def generate_result_code(self, code): flags.append('CO_VARARGS') if self.def_node.starstar_arg: flags.append('CO_VARKEYWORDS') - if self.def_node.is_generator and self.def_node.is_coroutine: flags.append('CO_COROUTINE') - if self.def_node.is_asyncgen: - flags.append('CO_ASYNC_GENERATOR') code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % ( self.result_code, ```suggestion if self.def_node.is_asyncgen: flags.append('CO_ASYNC_GENERATOR') ``` def generate_result_code(self, code): flags.append('CO_VARARGS') if self.def_node.starstar_arg: flags.append('CO_VARKEYWORDS') + if self.def_node.is_asyncgen: + flags.append('CO_ASYNC_GENERATOR') + elif self.def_node.is_coroutine: flags.append('CO_COROUTINE') + elif self.def_node.is_generator: + flags.append('CO_GENERATOR') code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % ( self.result_code,
codereview_new_python_data_10345
def from_py_call_code(self, source_code, result_code, error_pos, code, return code.error_goto_if_neg(call_code, error_pos) def error_condition(self, result_code): - # It isn't possible to use CArrays return type so the error_condition - # is irrelevant. Returning a Falsey value does avoid an error when getting - # from_py_call_code from a typedef return "" From a web search, "falsy" seems a somewhat more common spelling than "falsey", although both seem to be used. ```suggestion # It isn't possible to use CArrays as return type so the error_condition # is irrelevant. Returning a falsy value does avoid an error when getting # from_py_call_code from a typedef. ``` def from_py_call_code(self, source_code, result_code, error_pos, code, return code.error_goto_if_neg(call_code, error_pos) def error_condition(self, result_code): + # It isn't possible to use CArrays as return type so the error_condition + # is irrelevant. Returning a falsy value does avoid an error when getting + # from_py_call_code from a typedef. return ""
codereview_new_python_data_10346
def test_invalid_ellipsis(self): try: ast.parse(textwrap.dedent(code)) except SyntaxError as exc: - assert "invalid syntax" in str(exc), str(exc) else: assert False, "Invalid Python code '%s' failed to raise an exception" % code I'd rather not check for specifics of the error message here. There have been huge changes in CPython since the parser rewrite, and improving error messages is a major goal for them, especially regarding syntax errors and suggestions about what might be wrong. There's quite a chance that this message might change in the future. ```suggestion assert True ``` def test_invalid_ellipsis(self): try: ast.parse(textwrap.dedent(code)) except SyntaxError as exc: + assert True else: assert False, "Invalid Python code '%s' failed to raise an exception" % code
codereview_new_python_data_10347
class ControlFlow(object): entries set tracked entries loops list stack for loop descriptors exceptions list stack for exception descriptors - in_try_block int track if we're in a try...except or try...finally blcok """ def __init__(self): ```suggestion in_try_block int track if we're in a try...except or try...finally block ``` class ControlFlow(object): entries set tracked entries loops list stack for loop descriptors exceptions list stack for exception descriptors + in_try_block int track if we're in a try...except or try...finally block """ def __init__(self):
codereview_new_python_data_10348
def assure_gil(code_path, code=code): assure_gil('error') if code.funcstate.error_without_exception: - code.put("if (PyErr_Occurred()) ") code.put_add_traceback(self.entry.qualified_name) else: warning(self.entry.pos, "Unraisable exception in function '%s'." % This is making the non-local assumption that `put_add_traceback()` issues a single command. If that ever stops being true, this code may lead to subtle bugs. Curly braces are cheap in C, let's just use them. Also, we may be able to use `__Pyx_PyErr_Occurred()` here, which avoids a lookup of the thread state (if it has been looked up before, so not sure if this always applies). An alternative might be a C flag variable that remembers when no exception was set. def assure_gil(code_path, code=code): assure_gil('error') if code.funcstate.error_without_exception: + code.putln("if (PyErr_Occurred()) {") code.put_add_traceback(self.entry.qualified_name) + if code.funcstate.error_without_exception: + code.putln("}") else: warning(self.entry.pos, "Unraisable exception in function '%s'." %
codereview_new_python_data_10349
def optimise_numeric_binop(operator, node, ret_type, arg0, arg1): if not numval.has_constant_result(): return None - is_float = isinstance(numval, ExprNodes.FloatNode) num_type = PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type if is_float: if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'): This seems simpler (yes, I'm aware that this code was just moved around): ```suggestion is_float = numval.type.is_float ``` def optimise_numeric_binop(operator, node, ret_type, arg0, arg1): if not numval.has_constant_result(): return None + is_float = numval.type.is_float num_type = PyrexTypes.c_double_type if is_float else PyrexTypes.c_long_type if is_float: if operator not in ('Add', 'Subtract', 'Remainder', 'TrueDivide', 'Divide', 'Eq', 'Ne'):
codereview_new_python_data_10363
def set_environment(key, default): #set_environment('NCCL_SOCKET_IFNAME', 'hsi') set_environment('MIOPEN_DEBUG_DISABLE_FIND_DB', '1') set_environment('MIOPEN_DISABLE_CACHE', '1') - set_environment('NCCL_DEBUG', 'INFO') - set_environment('NCCL_DEBUG_SUBSYS', 'INIT') set_environment('LD_LIBRARY_PATH', '"${CRAY_LD_LIBRARY_PATH}:${LD_LIBRARY_PATH}"') # Optimizations for Sierra-like systems I don't think we want to keep these long-term. def set_environment(key, default): #set_environment('NCCL_SOCKET_IFNAME', 'hsi') set_environment('MIOPEN_DEBUG_DISABLE_FIND_DB', '1') set_environment('MIOPEN_DISABLE_CACHE', '1') set_environment('LD_LIBRARY_PATH', '"${CRAY_LD_LIBRARY_PATH}:${LD_LIBRARY_PATH}"') # Optimizations for Sierra-like systems
codereview_new_python_data_10382
def run_step( if step.transformation: LOGGER.debug(f"transforming output with `{step.transformation}`") env = os.environ.copy() - env["INTEGRATION_DB_DIRECTORY"] = db_dir try: out = subprocess.run( [step.transformation], This variable should be prefixed with `VAST_`. def run_step( if step.transformation: LOGGER.debug(f"transforming output with `{step.transformation}`") env = os.environ.copy() + env["VAST_INTEGRATION_DB_DIRECTORY"] = db_dir try: out = subprocess.run( [step.transformation],
codereview_new_python_data_10395
def is_new_contributor(self, locale): """Return True if the user hasn't made contributions to the locale yet.""" return ( not self.translation_set.filter(locale=locale) - .exclude(entity__resource__project__slug="tutorial") .exists() ) I would generalize this to `entity__resource__project__system_project=True`. def is_new_contributor(self, locale): """Return True if the user hasn't made contributions to the locale yet.""" return ( not self.translation_set.filter(locale=locale) + .exclude(entity__resource__project__system_project=True) .exists() )
codereview_new_python_data_10396
def _get_site_url_netloc(): def _default_from_email(): return os.environ.get( - "DEFAULT_FROM_EMAIL", f"Pontoon <noreplay@{_get_site_url_netloc()}>" ) ```suggestion "DEFAULT_FROM_EMAIL", f"Pontoon <noreply@{_get_site_url_netloc()}>" ``` def _get_site_url_netloc(): def _default_from_email(): return os.environ.get( + "DEFAULT_FROM_EMAIL", f"Pontoon <noreply@{_get_site_url_netloc()}>" )
codereview_new_python_data_10397
class UserProfile(models.Model): # Visibility class Visibility(models.TextChoices): - ALL = "All users", "All users" TRANSLATORS = "Translators", "Users with translator rights" class VisibilityLoggedIn(models.TextChoices): At least to me, "All users" does not clearly communicate that this information will be publicly and anonymously available. Could we use "Public" or something similar instead? class UserProfile(models.Model): # Visibility class Visibility(models.TextChoices): + ALL = "Public", "Public" TRANSLATORS = "Translators", "Users with translator rights" class VisibilityLoggedIn(models.TextChoices):
codereview_new_python_data_10432
# https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires # It is not considered best practice to use install_requires to pin dependencies to specific versions. install_requires=[ - "aioquic>=0.9.20", "asgiref>=3.2.10,<3.6", "Brotli>=1.0,<1.1", "certifi>=2019.9.11", # no semver here - this should always be on the last release! Let's add an `<0.10` upper bound, otherwise we're in for fun surprises. # https://packaging.python.org/en/latest/discussions/install-requires-vs-requirements/#install-requires # It is not considered best practice to use install_requires to pin dependencies to specific versions. install_requires=[ + "aioquic>=0.9.20,<0.10", "asgiref>=3.2.10,<3.6", "Brotli>=1.0,<1.1", "certifi>=2019.9.11", # no semver here - this should always be on the last release!
codereview_new_python_data_10514
def _stamp_headers(self, visitor_headers=None, **headers): else: headers["stamped_headers"] = stamped_headers for stamp in headers.keys(): - if stamp != "stamped_headers" and stamp not in headers["stamped_headers"]: headers["stamped_headers"].append(stamp) headers["stamped_headers"] = list(set(headers["stamped_headers"])) _merge_dictionaries(headers, self.options, aggregate_duplicates=False) We should add a comment which explains that the loop adds all missing stamped headers to the `stamped_headers` header so that we can save them in the result backend if `result_extended` is `True` def _stamp_headers(self, visitor_headers=None, **headers): else: headers["stamped_headers"] = stamped_headers for stamp in headers.keys(): + if stamp == "stamped_headers": + continue + if stamp not in headers["stamped_headers"]: headers["stamped_headers"].append(stamp) headers["stamped_headers"] = list(set(headers["stamped_headers"])) _merge_dictionaries(headers, self.options, aggregate_duplicates=False)
codereview_new_python_data_10515
def _freeze_group_tasks(self, _id=None, group_id=None, chord=None, # Create two new generators from the original generator of the group tasks (cloning the tasks). tasks1, tasks2 = itertools.tee(self._unroll_tasks(self.tasks)) - # Use the first generator to freeze the group tasks to aquire the AsyncResult for each task. results = regen(self._freeze_tasks(tasks1, group_id, chord, root_id, parent_id)) # Use the second generator to replace the exhausted generator of the group tasks. self.tasks = regen(tasks2) ```suggestion # Use the first generator to freeze the group tasks to acquire the AsyncResult for each task. ``` def _freeze_group_tasks(self, _id=None, group_id=None, chord=None, # Create two new generators from the original generator of the group tasks (cloning the tasks). tasks1, tasks2 = itertools.tee(self._unroll_tasks(self.tasks)) + # Use the first generator to freeze the group tasks to acquire the AsyncResult for each task. results = regen(self._freeze_tasks(tasks1, group_id, chord, root_id, parent_id)) # Use the second generator to replace the exhausted generator of the group tasks. self.tasks = regen(tasks2)
codereview_new_python_data_10516
def prepare_steps(self, args, kwargs, tasks, chain(group(signature1, signature2), signature3) --> Upgrades to chord([signature1, signature2], signature3) The responsibility of this method is to assure that the chain is - correctly unpacked, and that the correct callbacks are set up along the way. Arguments: args (Tuple): Partial args to be prepended to the existing args. ```suggestion The responsibility of this method is to ensure that the chain is ``` def prepare_steps(self, args, kwargs, tasks, chain(group(signature1, signature2), signature3) --> Upgrades to chord([signature1, signature2], signature3) The responsibility of this method is to assure that the chain is + correctly unpacked, and then the correct callbacks are set up along the way. Arguments: args (Tuple): Partial args to be prepended to the existing args.
codereview_new_python_data_10517
def stamp(self, visitor=None, **headers): """ headers = headers.copy() if visitor is not None: - headers.update(visitor.on_signature(self, **headers)) else: headers["stamped_headers"] = [header for header in headers.keys() if header not in self.options] _merge_dictionaries(headers, self.options) additional unit test will be useful for this part def stamp(self, visitor=None, **headers): """ headers = headers.copy() if visitor is not None: + visitor_headers = visitor.on_signature(self, **headers) + if "stamped_headers" not in visitor_headers: + visitor_headers["stamped_headers"] = list(visitor_headers.keys()) + headers.update(visitor_headers) else: headers["stamped_headers"] = [header for header in headers.keys() if header not in self.options] _merge_dictionaries(headers, self.options)
codereview_new_python_data_10518
def on_stop(self) -> None: def on_apply( self, target: TargetFunction, - args: tuple | None = None, kwargs: dict[str, Any] | None = None, callback: Callable[..., Any] | None = None, accept_callback: Callable[..., Any] | None = None, ```suggestion args: tuple[Any, ...] | None = None, ``` def on_stop(self) -> None: def on_apply( self, target: TargetFunction, + args: tuple[Any, ...] | None = None, kwargs: dict[str, Any] | None = None, callback: Callable[..., Any] | None = None, accept_callback: Callable[..., Any] | None = None,
codereview_new_python_data_10519
def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep, def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs - return super().__reduce__(args, dict(kwargs, url=self.url)) def _find_path(self, url): if not url: ```suggestion return super().__reduce__(args, {kwargs, url=self.url}) ``` def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep, def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs + return super().__reduce__(args, {**kwargs, 'url': self.url}) def _find_path(self, url): if not url:
codereview_new_python_data_10521
def chordal_cycle_graph(p, create_using=None): def paley_graph(p, create_using=None): - """Returns the Paley $\\frac{(p-1)}{2}-$regular graph on $p$ nodes. The returned graph is a graph on $ \\mathbb{Z}/p\\mathbb{Z}$ with edges between $x$ and $y$ if and only if $x-y$ is a nonzero square in $\\mathbb{Z}/p\\mathbb{Z}$. ```suggestion """Returns the Paley $\\frac{(p-1)}{2}$ -regular graph on $p$ nodes. ``` def chordal_cycle_graph(p, create_using=None): def paley_graph(p, create_using=None): + """Returns the Paley $\\frac{(p-1)}{2}$ -regular graph on $p$ nodes. The returned graph is a graph on $ \\mathbb{Z}/p\\mathbb{Z}$ with edges between $x$ and $y$ if and only if $x-y$ is a nonzero square in $\\mathbb{Z}/p\\mathbb{Z}$.
codereview_new_python_data_10522
"""Functions for computing and measuring community structure. -.. warning:: The functions in this class are not imported into the top-level -:mod:`networkx` namespace. - -They can be imported using the :mod:`networkx.algorithms.community` module, -then accessing the functions as attributes of ``community``. For example:: >>> from networkx.algorithms import community >>> G = nx.barbell_graph(5, 1) Users can now access these function via e.g. `nx.community.girvan_newman` without a new import statement. Should we advertise that more in places like this? We should go through the codebase and update claims like these now that we are using lazy_loading. But that (and maybe this comment) belong in a different PR. """Functions for computing and measuring community structure. +The functions in this class are not imported into the top-level +:mod:`networkx` namespace. You can access these functions by importing +the :mod:`networkx.algorithms.community` module, then accessing the +functions as attributes of ``community``. For example:: >>> from networkx.algorithms import community >>> G = nx.barbell_graph(5, 1)
codereview_new_python_data_10523
def test_claw(self): pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_edgeless_graph(self): - # This graph has 2 nodes and 0 edges G = nx.Graph() - G_nodes = [0, 1] - G.add_nodes_from(G_nodes) pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_non_line_graph(self): Just a nit, but this could be streamlined a bit ```suggestion G = nx.Graph() G.add_nodes_from([0, 1]) ``` def test_claw(self): pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_edgeless_graph(self): G = nx.Graph() + G.add_nodes_from([0, 1]) pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_non_line_graph(self):
codereview_new_python_data_10524
def test_claw(self): pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_edgeless_graph(self): - # This graph has 2 nodes and 0 edges G = nx.Graph() - G_nodes = [0, 1] - G.add_nodes_from(G_nodes) pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_non_line_graph(self): Just another minor suggestion - using `pytest.raises` as a context manager is generally more readable (gets rid of signature forwarding). We can also match the exception message to ensure we're hitting the expected exception branch ```suggestion with pytest.raises(nx.NetworkXError, match=".*edgeless graph"): nx.inverse_line_graph(G) ``` def test_claw(self): pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_edgeless_graph(self): G = nx.Graph() + G.add_nodes_from([0, 1]) pytest.raises(nx.NetworkXError, nx.inverse_line_graph, G) def test_non_line_graph(self):
codereview_new_python_data_10525
def test_edge_cases_directed_edge_swap(): graph = nx.path_graph(4, create_using=nx.DiGraph) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) - graph = nx.DiGraph() - edges = [(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)] - graph.add_edges_from(edges) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=2, max_tries=20, seed=1) Just a nit, but we can save a few lines by creating the graph directly from the edgelist: ```suggestion graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) ``` def test_edge_cases_directed_edge_swap(): graph = nx.path_graph(4, create_using=nx.DiGraph) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) + graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=2, max_tries=20, seed=1)
codereview_new_python_data_10526
def laplacian_spectrum(G, weight="weight"): Examples -------- - The multiplicity of O as an eigenvalue of the laplacian matrix is equal to the number of connected components of G. >>> import numpy as np ```suggestion The multiplicity of 0 as an eigenvalue of the laplacian matrix is equal ``` def laplacian_spectrum(G, weight="weight"): Examples -------- + The multiplicity of 0 as an eigenvalue of the laplacian matrix is equal to the number of connected components of G. >>> import numpy as np
codereview_new_python_data_10527
def laplacian_matrix(G, nodelist=None, weight="weight"): to a block diagonal matrix where each block is the respective Laplacian matrix for each component. - >>> G = nx.graph_atlas(26) #This graph from the Graph Atlas has 2 connected components. - >>> print(nx.laplacian_matrix(G).todense()) - [[ 1 -1 0 0 0] - [-1 2 -1 0 0] - [ 0 -1 1 0 0] - [ 0 0 0 1 -1] - [ 0 0 0 -1 1]] """ import scipy as sp Using `toarray` would be preferable to `todense` here - we can also then get rid of the `print` and just rely on the array repr. def laplacian_matrix(G, nodelist=None, weight="weight"): to a block diagonal matrix where each block is the respective Laplacian matrix for each component. + >>> G = nx.Graph([(1,2), (2, 3), (4, 5)]) + >>> nx.laplacian_matrix(G).toarray() + array([[ 1, -1, 0, 0, 0], + [-1, 2, -1, 0, 0], + [ 0, -1, 1, 0, 0], + [ 0, 0, 0, 1, -1], + [ 0, 0, 0, -1, 1]]) """ import scipy as sp
codereview_new_python_data_10528
def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") if len(G.edges) < 2: - raise nx.NetworkXError("Graph has less than two edges") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. Grammatical nit - it wouldn't hurt to update the fewer-than-4-nodes exception message as well. Not a blocker though! ```suggestion raise nx.NetworkXError("Graph has fewer than two edges") ``` def lattice_reference(G, niter=5, D=None, connectivity=True, seed=None): if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") if len(G.edges) < 2: + raise nx.NetworkXError("Graph has fewer than two edges") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree.
codereview_new_python_data_10529
def test_omega(): for o in omegas: assert -1 <= o <= 1 - -def test_graph_no_edges(): G = nx.Graph() G.add_nodes_from([0, 1, 2, 3]) - pytest.raises(nx.NetworkXError, nx.random_reference, G) - pytest.raises(nx.NetworkXError, nx.lattice_reference, G) ```suggestion ``` The tests will need separation by two blank lines. def test_omega(): for o in omegas: assert -1 <= o <= 1 +@pytest.mark.parametrize("f", (nx.random_reference, nx.lattice_reference)) +def test_graph_no_edges(f): G = nx.Graph() G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match="Graph has less than 2 edges"): + f(G)
codereview_new_python_data_10530
def test_degree_seq_c4(): assert degrees == sorted(d for n, d in G.degree()) -def test_no_edges(): G = nx.DiGraph() G.add_nodes_from([0, 1, 2]) - pytest.raises(nx.NetworkXError, nx.directed_edge_swap, G) - G = nx.Graph() - G.add_nodes_from([0, 1, 2, 3]) - pytest.raises(nx.NetworkXError, nx.double_edge_swap, G) def test_less_than_3_edges(): - G = nx.DiGraph() - G.add_edges_from([(0, 1), (1, 2)]) - pytest.raises(nx.NetworkXError, nx.directed_edge_swap, G) Just another minor suggestion: using `pytest.raises` as a context manager with the `match=` check is a really nice feature that allows us to better test we're hitting the exact exception branch that we expect. def test_degree_seq_c4(): assert degrees == sorted(d for n, d in G.degree()) +def test_fewer_than_4_nodes(): G = nx.DiGraph() G.add_nodes_from([0, 1, 2]) + with pytest.raises(nx.NetworkXError, match=".*fewer than four nodes."): + nx.directed_edge_swap(G) def test_less_than_3_edges(): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([3, 4]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 3 edges"): + nx.directed_edge_swap(G) + + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 2 edges"): + nx.double_edge_swap(G)
codereview_new_python_data_10531
G = nx.path_graph(20) # An example graph center_node = 5 # Or any other node to be in the center edge_nodes = set(G) - {center_node} -pos = nx.circular_layout( - G.subgraph(edge_nodes) -) # Ensures the nodes around the circle are evenly distributed pos[center_node] = np.array([0, 0]) # Or off-center - whatever the user needs nx.draw(G, pos, with_labels=True) Minor formatting nit ```suggestion # Ensures the nodes around the circle are evenly distributed pos = nx.circular_layout(G.subgraph(edge_nodes)) ``` G = nx.path_graph(20) # An example graph center_node = 5 # Or any other node to be in the center edge_nodes = set(G) - {center_node} +# Ensures the nodes around the circle are evenly distributed +pos = nx.circular_layout(G.subgraph(edge_nodes)) pos[center_node] = np.array([0, 0]) # Or off-center - whatever the user needs nx.draw(G, pos, with_labels=True)
codereview_new_python_data_10532
def test_multigraph_weighted_default_weight(self): G = nx.MultiDiGraph([(1, 2), (2, 3)]) # Unweighted edges G.add_weighted_edges_from([(1, 3, 1), (1, 3, 5), (1, 3, 2)]) - # Default value for default weight is 0 assert nx.dag_longest_path(G) == [1, 3] assert nx.dag_longest_path(G, default_weight=3) == [1, 2, 3] ```suggestion # Default value for default weight is 1 ``` def test_multigraph_weighted_default_weight(self): G = nx.MultiDiGraph([(1, 2), (2, 3)]) # Unweighted edges G.add_weighted_edges_from([(1, 3, 1), (1, 3, 5), (1, 3, 2)]) + # Default value for default weight is 1 assert nx.dag_longest_path(G) == [1, 3] assert nx.dag_longest_path(G, default_weight=3) == [1, 2, 3]
codereview_new_python_data_10533
def compute_v_structures(G): Returns ------- vstructs : iterator of tuples - The v structures within the graph. Each set has a 3-tuple with the parent, collider, and other parent. Notes ```suggestion The v structures within the graph. Each v structure is a 3-tuple with the parent, collider, and other parent. ``` def compute_v_structures(G): Returns ------- vstructs : iterator of tuples + The v structures within the graph. Each v structure is a 3-tuple with the parent, collider, and other parent. Notes
codereview_new_python_data_10534
def test_hardest_prob(self): def test_strategy_saturation_largest_first(self): def color_remaining_nodes(G, colored_vertices): color_assignments = [] - aux_colored_vertices = { - key: value for key, value in colored_vertices.items() - } scratch_iterator = nx.algorithms.coloring.greedy_coloring.strategy_saturation_largest_first( G, aux_colored_vertices ) Is this different than `colored_vertices.copy()`? If not, the copy version is more readable. def test_hardest_prob(self): def test_strategy_saturation_largest_first(self): def color_remaining_nodes(G, colored_vertices): color_assignments = [] + aux_colored_vertices = colored_vertices.copy() + scratch_iterator = nx.algorithms.coloring.greedy_coloring.strategy_saturation_largest_first( G, aux_colored_vertices )
codereview_new_python_data_10535
def assign_labels(G1, G2, mapped_nodes=None): return G1, G2 -def get_labes(G1, G2): return nx.get_node_attributes(G1, "label"), nx.get_node_attributes(G2, "label") ```suggestion def get_labels(G1, G2): ``` def assign_labels(G1, G2, mapped_nodes=None): return G1, G2 +def get_labels(G1, G2): return nx.get_node_attributes(G1, "label"), nx.get_node_attributes(G2, "label")
codereview_new_python_data_10536
def _cut_PT(u, v, graph_params, state_params): ): return True - if len(T1.intersection(G1_nbh)) != len(T2.intersection(G2_nbh)) or len( - T1_out.intersection(G1_nbh) - ) != len(T2_out.intersection(G2_nbh)): return True return False ```suggestion if len(T1.intersection(G1_nbh)) != len(T2.intersection(G2_nbh)): return True if len(T1_out.intersection(G1_nbh)) != len(T2_out.intersection(G2_nbh)): return True ``` The ugly black-caused spacing is unfortunate. This might make it more readable. Your choice... def _cut_PT(u, v, graph_params, state_params): ): return True + if len(T1.intersection(G1_nbh)) != len(T2.intersection(G2_nbh)): + return True + if len(T1_out.intersection(G1_nbh)) != len(T2_out.intersection(G2_nbh)): return True return False
codereview_new_python_data_10537
def test_updating(self): m, m_rev, T1, T1_tilde, T2, T2_tilde = sparams # Add node to the mapping - m.update({4: self.mapped[4]}) - m_rev.update({self.mapped[4]: 4}) _update_Tinout(4, self.mapped[4], gparams, sparams) assert T1 == {3, 5, 9} Just a nit, but it'd be slightly more canonical to use `__setitem__` directly: ```suggestion m[4] = self.mapped[4] m_rev[self.mapped[4]] = 4 ``` ditto other instances - not critically important though! def test_updating(self): m, m_rev, T1, T1_tilde, T2, T2_tilde = sparams # Add node to the mapping + m[4] = self.mapped[4] + m_rev[self.mapped[4]] = 4 _update_Tinout(4, self.mapped[4], gparams, sparams) assert T1 == {3, 5, 9}
codereview_new_python_data_10573
# Script to run automated C++ tests. # -# Types of automated tests. # 1. Requires credentials, permissions, and AWS resources. # 2. Requires credentials and permissions. # 3. Does not require credentials (mocked if necessary). ```suggestion # Types of automated tests: ``` # Script to run automated C++ tests. # +# Types of automated tests: # 1. Requires credentials, permissions, and AWS resources. # 2. Requires credentials and permissions. # 3. Does not require credentials (mocked if necessary).
codereview_new_python_data_10575
def hello_stepfunctions(stepfunctions_client): """ Use the AWS SDK for Python (Boto3) to create an AWS Step Functions client and list - the state machines in your account. This list may be empty if you have not created any state machines. This example uses the default settings specified in your shared credentials and config files. may --> might have not --> haven't def hello_stepfunctions(stepfunctions_client): """ Use the AWS SDK for Python (Boto3) to create an AWS Step Functions client and list + the state machines in your account. This list might be empty if you haven't created any state machines. This example uses the default settings specified in your shared credentials and config files.
codereview_new_python_data_10629
async def get_operation_link(self, request: web.Request): 'description': 'UUID of the link object to retrieve results of.' }]) @aiohttp_apispec.querystring_schema(BaseGetOneQuerySchema) - @aiohttp_apispec.response_schema(LinkResultSchema(partial=True), description='Contains a dictionary with the requested link and its results dictionary.') async def get_operation_link_result(self, request: web.Request): operation_id = request.match_info.get('id') Is there a case where we'd want `partial=True`? ```suggestion @aiohttp_apispec.response_schema(LinkResultSchema(), ``` async def get_operation_link(self, request: web.Request): 'description': 'UUID of the link object to retrieve results of.' }]) @aiohttp_apispec.querystring_schema(BaseGetOneQuerySchema) + @aiohttp_apispec.response_schema(LinkResultSchema(), description='Contains a dictionary with the requested link and its results dictionary.') async def get_operation_link_result(self, request: web.Request): operation_id = request.match_info.get('id')
codereview_new_python_data_10630
def __getitem__(self, key): # loc['level_one_key', 'column_name']. It's possible for both to be valid # when we have a multiindex on axis=0, and it seems pandas uses # interpretation 1 if that's possible. Do the same. try: - return self._helper_for__getitem__( - key, *self._parse_row_and_column_locators((key, slice(None))) - ) except KeyError: pass return self._helper_for__getitem__( ```suggestion locators = self._parse_row_and_column_locators((key, slice(None))) try: return self._helper_for__getitem__(key, *locators) except KeyError: pass ``` ? as otherwise we risk masking errors in `_parse_row_and_column_locators()` def __getitem__(self, key): # loc['level_one_key', 'column_name']. It's possible for both to be valid # when we have a multiindex on axis=0, and it seems pandas uses # interpretation 1 if that's possible. Do the same. + locators = self._parse_row_and_column_locators((key, slice(None))) try: + return self._helper_for__getitem__(key, *locators) except KeyError: pass return self._helper_for__getitem__(
codereview_new_python_data_10631
def get_positions_from_labels(self, row_loc, col_loc): if axis_loc.stop is None or not is_number(axis_loc.stop): slice_stop = axis_loc.stop else: - slice_stop = axis_loc.stop - ( - 0 if axis_loc.step is None else axis_loc.step - ) axis_lookup = axis_labels.slice_indexer( axis_loc.start, slice_stop, ```suggestion slice_stop = axis_loc.stop - axis_loc.step ``` `axis_loc.step` cannot be None here as you've checked for that in condition above def get_positions_from_labels(self, row_loc, col_loc): if axis_loc.stop is None or not is_number(axis_loc.stop): slice_stop = axis_loc.stop else: + slice_stop = axis_loc.stop - axis_loc.step axis_lookup = axis_labels.slice_indexer( axis_loc.start, slice_stop,
codereview_new_python_data_10632
def concat( for obj in list_of_objs if ( isinstance(obj, (Series, pandas.Series)) - or ( - isinstance(obj, (DataFrame, Series)) - and obj._query_compiler.lazy_execution - ) or sum(obj.shape) > 0 ) ] I'm still not sure if a separate condition for Series is needed here. We could continue the [discussion](https://github.com/modin-project/modin/pull/5673/files#r1116179037) here. A small summary: you claim that pandas does not filter empty series at all. I believe that empty series are not filtered only in the case when all objects for concatenation are Series according to https://github.com/pandas-dev/pandas/blob/main/pandas/core/reshape/concat.py#L476. def concat( for obj in list_of_objs if ( isinstance(obj, (Series, pandas.Series)) + or (isinstance(obj, DataFrame) and obj._query_compiler.lazy_execution) or sum(obj.shape) > 0 ) ]
codereview_new_python_data_10633
def concat( for obj in list_of_objs if ( isinstance(obj, (Series, pandas.Series)) - or ( - isinstance(obj, (DataFrame, Series)) - and obj._query_compiler.lazy_execution - ) or sum(obj.shape) > 0 ) ] ```suggestion isinstance(obj, DataFrame) ``` def concat( for obj in list_of_objs if ( isinstance(obj, (Series, pandas.Series)) + or (isinstance(obj, DataFrame) and obj._query_compiler.lazy_execution) or sum(obj.shape) > 0 ) ]
codereview_new_python_data_10634
def isin(self, values, ignore_indices=False, **kwargs): # noqa: PR02 Boolean mask for self of whether an element at the corresponding position is contained in `values`. """ - # We drop `shape_hint` argument that may be passed from the API layer. - # BaseQC doesn't need to know how to handle it. shape_hint = kwargs.pop("shape_hint", None) if isinstance(values, type(self)) and ignore_indices: # Pandas logic is that it ignores indexing if 'values' is a 1D object Should we remove the comment above? def isin(self, values, ignore_indices=False, **kwargs): # noqa: PR02 Boolean mask for self of whether an element at the corresponding position is contained in `values`. """ shape_hint = kwargs.pop("shape_hint", None) if isinstance(values, type(self)) and ignore_indices: # Pandas logic is that it ignores indexing if 'values' is a 1D object
codereview_new_python_data_10635
def stack(self, level, dropna): # These operations are operations that apply a function to every partition. def isin(self, values, ignore_indices=False, shape_hint=None): if isinstance(values, type(self)): - # HACK: if we won't cast to pandas then the execution engine will try to - # # propagate the distributed Series to workers and most likely would have # some performance problems. # TODO: A better way of doing so could be passing this `values` as a query compiler # and broadcast accordingly. ```suggestion # HACK: if we don't cast to pandas, then the execution engine will try to # propagate the distributed Series to workers and most likely would have ``` def stack(self, level, dropna): # These operations are operations that apply a function to every partition. def isin(self, values, ignore_indices=False, shape_hint=None): if isinstance(values, type(self)): + # HACK: if we don't cast to pandas, then the execution engine will try to + # propagate the distributed Series to workers and most likely would have # some performance problems. # TODO: A better way of doing so could be passing this `values` as a query compiler # and broadcast accordingly.
codereview_new_python_data_10636
def func(df) -> np.ndarray: ser = ser.astype("category", copy=False) return ser.cat.codes.to_frame(name=MODIN_UNNAMED_SERIES_LABEL) - res = self._modin_frame.apply_full_axis( - axis=0, - func=func, - new_index=self._modin_frame._index_cache, - new_columns=[MODIN_UNNAMED_SERIES_LABEL], - new_row_lengths=self._modin_frame._row_lengths_cache, - new_column_widths=self._modin_frame._column_widths_cache, ) return self.__constructor__(res, shape_hint="column") I'm still -1 about introducing these parameters as already explained in [this comment](https://github.com/modin-project/modin/pull/5587#discussion_r1094484673). I still believe that we can precompute new row/column lengths cache based only on the `new_index` and `new_columns` arguments. def func(df) -> np.ndarray: ser = ser.astype("category", copy=False) return ser.cat.codes.to_frame(name=MODIN_UNNAMED_SERIES_LABEL) + res = self._modin_frame.fold( + axis=0, func=func, new_columns=[MODIN_UNNAMED_SERIES_LABEL] ) return self.__constructor__(res, shape_hint="column")
codereview_new_python_data_10637
def map(self, arg, na_action=None): # noqa: PR01, RT01, D200 """ if isinstance(arg, type(self)): # HACK: if we won't cast to pandas then the execution engine will try to - # # propagate the distributed Series to workers and most likely would have # some performance problems. # TODO: A better way of doing so could be passing this `arg` as a query compiler # and broadcast accordingly. ```suggestion # propagate the distributed Series to workers and most likely would have ``` def map(self, arg, na_action=None): # noqa: PR01, RT01, D200 """ if isinstance(arg, type(self)): # HACK: if we won't cast to pandas then the execution engine will try to + # propagate the distributed Series to workers and most likely would have # some performance problems. # TODO: A better way of doing so could be passing this `arg` as a query compiler # and broadcast accordingly.
codereview_new_python_data_10638
def map(self, arg, na_action=None): # noqa: PR01, RT01, D200 Map values of Series according to input correspondence. """ if isinstance(arg, type(self)): - # HACK: if we won't cast to pandas then the execution engine will try to # propagate the distributed Series to workers and most likely would have # some performance problems. # TODO: A better way of doing so could be passing this `arg` as a query compiler ```suggestion # HACK: if we don't cast to pandas, then the execution engine will try to ``` def map(self, arg, na_action=None): # noqa: PR01, RT01, D200 Map values of Series according to input correspondence. """ if isinstance(arg, type(self)): + # HACK: if we don't cast to pandas, then the execution engine will try to # propagate the distributed Series to workers and most likely would have # some performance problems. # TODO: A better way of doing so could be passing this `arg` as a query compiler
codereview_new_python_data_10639
def _set_item(df, row_loc): new_columns=self._modin_frame._columns_cache, keep_partitioning=False, ) - return self.__constructor__(new_modin_frame, shape_hint="column") # END __setitem__ methods ```suggestion return self.__constructor__(new_modin_frame) ``` No need for this. def _set_item(df, row_loc): new_columns=self._modin_frame._columns_cache, keep_partitioning=False, ) + return self.__constructor__(new_modin_frame) # END __setitem__ methods
codereview_new_python_data_10640
def broadcast_axis_partitions( num_splits = NPartitions.get() preprocessed_map_func = cls.preprocess_func(apply_func) left_partitions = cls.axis_partition(left, axis) - if right is None: - right_partitions = None - else: - right_partitions = cls.axis_partition(right, axis) - if len(left_partitions) != len(right_partitions): - raise RuntimeError( - "Partitioning of the left and right frames is different." - ) # For mapping across the entire axis, we don't maintain partitioning because we # may want to line to partitioning up with another BlockPartitions object. Since # we don't need to maintain the partitioning, this gives us the opportunity to ```suggestion right_partitions = None if right is None else cls.axis_partition(right, axis) ``` def broadcast_axis_partitions( num_splits = NPartitions.get() preprocessed_map_func = cls.preprocess_func(apply_func) left_partitions = cls.axis_partition(left, axis) + right_partitions = None if right is None else cls.axis_partition(right, axis) # For mapping across the entire axis, we don't maintain partitioning because we # may want to line to partitioning up with another BlockPartitions object. Since # we don't need to maintain the partitioning, this gives us the opportunity to
codereview_new_python_data_10641
def materialize(cls, obj_id): @classmethod def put(cls, data, **kwargs): """ - Put a data into the object store. Parameters ---------- data : object Data to be put. Returns ------- unidist.ObjectRef A reference to `data`. """ - return unidist.put(data, **kwargs) @unidist.remote ```suggestion Put data into the object store. ``` def materialize(cls, obj_id): @classmethod def put(cls, data, **kwargs): """ + Put data into the object store. Parameters ---------- data : object Data to be put. + **kwargs : dict + Additional keyword arguments (mostly for compatibility). Returns ------- unidist.ObjectRef A reference to `data`. """ + return unidist.put(data) @unidist.remote
codereview_new_python_data_10642
def materialize(cls, obj_id): @classmethod def put(cls, data, **kwargs): """ - Put a data into the object store. Parameters ---------- data : object Data to be put. Returns ------- unidist.ObjectRef A reference to `data`. """ - return unidist.put(data, **kwargs) @unidist.remote unidist.put doesn't take any kwarg. def materialize(cls, obj_id): @classmethod def put(cls, data, **kwargs): """ + Put data into the object store. Parameters ---------- data : object Data to be put. + **kwargs : dict + Additional keyword arguments (mostly for compatibility). Returns ------- unidist.ObjectRef A reference to `data`. """ + return unidist.put(data) @unidist.remote
codereview_new_python_data_10643
def wait(self): wait([self._data]) # If unidist has not been initialized yet by Modin, - # unidist itself handles initialization when calling `UnidistWrapper.put`. _iloc = execution_wrapper.put(PandasDataframePartition._iloc) def mask(self, row_labels, col_labels): ```suggestion # unidist itself handles initialization when calling `unidist.put`. ``` unidist doesn't know anything about UnidistWrapper. Also, let's add a comment similar to one above for Ray. def wait(self): wait([self._data]) # If unidist has not been initialized yet by Modin, + # it will be initialized when calling `UnidistWrapper.put`. _iloc = execution_wrapper.put(PandasDataframePartition._iloc) def mask(self, row_labels, col_labels):
codereview_new_python_data_10644
# If unidist has not been initialized yet by Modin, -# unidist itself handles initialization when calling `UnidistWrapper.put`. _DEPLOY_AXIS_FUNC = UnidistWrapper.put(PandasDataframeAxisPartition.deploy_axis_func) _DRAIN = UnidistWrapper.put(PandasDataframeAxisPartition.drain) ```suggestion # unidist itself handles initialization when calling `unidist.put`. ``` # If unidist has not been initialized yet by Modin, +# it will be initialized when calling `UnidistWrapper.put`. _DEPLOY_AXIS_FUNC = UnidistWrapper.put(PandasDataframeAxisPartition.deploy_axis_func) _DRAIN = UnidistWrapper.put(PandasDataframeAxisPartition.drain)
codereview_new_python_data_10645
def wait(self): wait([self._data]) # If unidist has not been initialized yet by Modin, - # it will be initialized when calling `UnidistWrapper.put`. _iloc = execution_wrapper.put(PandasDataframePartition._iloc) def mask(self, row_labels, col_labels): ```suggestion # unidist itself handles initialization when calling `unidist.put`, # which is called inside of ``UnidistWrapper.put``. ``` Looks clearer to me because it might be not obvious for a Modin developer how `UnidistWrapper.put` inititializes unidist (there is no any init call). def wait(self): wait([self._data]) # If unidist has not been initialized yet by Modin, + # unidist itself handles initialization when calling `unidist.put`, + # which is called inside of `UnidistWrapper.put`. _iloc = execution_wrapper.put(PandasDataframePartition._iloc) def mask(self, row_labels, col_labels):
codereview_new_python_data_10646
def mean(x1, axis=None, dtype=None, out=None, keepdims=None, *, where=True): # Maximum and minimum are ufunc's in NumPy, which means that our array's __array_ufunc__ -# implementation will automatically handle this, so we can just use NumPy's maximum/minimum -# since that will route to our array's ufunc. def maximum( x1, x2, out=None, where=True, casting="same_kind", order="K", dtype=None, subok=True ): this comment seems outdated now def mean(x1, axis=None, dtype=None, out=None, keepdims=None, *, where=True): # Maximum and minimum are ufunc's in NumPy, which means that our array's __array_ufunc__ +# implementation will automatically handle this. We still need the function though, so that +# if the operands are modin.pandas objects, we can convert them to arrays, but after that +# we can just use NumPy's maximum/minimum since that will route to our array's ufunc. def maximum( x1, x2, out=None, where=True, casting="same_kind", order="K", dtype=None, subok=True ):
codereview_new_python_data_10647
def isscalar(e): less_equal = _dispatch_logic("less_equal") equal = _dispatch_logic("equal") not_equal = _dispatch_logic("not_equal") -array_equal = _dispatch_logic("array_equal") We don't define `array_equal` anywhere. Should this be `equal`? def isscalar(e): less_equal = _dispatch_logic("less_equal") equal = _dispatch_logic("equal") not_equal = _dispatch_logic("not_equal")
codereview_new_python_data_10648
def func(df) -> np.ndarray: # if the dfs have categorical columns # so we intentionaly restore the right dtype. # TODO: revert the change when https://github.com/pandas-dev/pandas/issues/51362 is fixed. - ser = df.astype("category", copy=False).iloc[:, 0] return ser.cat.codes res = self._modin_frame.apply_full_axis(axis=0, func=func) we've already raised a similar issue in modin, we should probably link it here as well: #2513 def func(df) -> np.ndarray: # if the dfs have categorical columns # so we intentionaly restore the right dtype. # TODO: revert the change when https://github.com/pandas-dev/pandas/issues/51362 is fixed. + ser = df.iloc[:, 0] + if ser.dtype != "category": + ser = ser.astype("category", copy=False) return ser.cat.codes res = self._modin_frame.apply_full_axis(axis=0, func=func)
codereview_new_python_data_10649
def func(df) -> np.ndarray: # if the dfs have categorical columns # so we intentionaly restore the right dtype. # TODO: revert the change when https://github.com/pandas-dev/pandas/issues/51362 is fixed. - ser = df.astype("category", copy=False).iloc[:, 0] return ser.cat.codes res = self._modin_frame.apply_full_axis(axis=0, func=func) ```suggestion ser = df.iloc[:, 0] if ser.dtype != "category": ser = ser.astype("category", copy=False) ``` - should be faster to cast a single column rather than the whole frame - let's not do extra-casting if not necessary def func(df) -> np.ndarray: # if the dfs have categorical columns # so we intentionaly restore the right dtype. # TODO: revert the change when https://github.com/pandas-dev/pandas/issues/51362 is fixed. + ser = df.iloc[:, 0] + if ser.dtype != "category": + ser = ser.astype("category", copy=False) return ser.cat.codes res = self._modin_frame.apply_full_axis(axis=0, func=func)
codereview_new_python_data_10650
def _compute_duplicated(df): ) return result - if len(self._modin_frame.column_widths) > 1: # if the number of columns (or column partitions) we are checking for duplicates is larger than 1, # we must first hash them to generate a single value that can be compared across rows. hashed_modin_frame = self._modin_frame.reduce( `column_widths` may not be available at this point, meaning that this line would trigger its computation, can we use `._partitions.shape` here? it seems to have the same result ```suggestion if self._modin_frame._partitions.shape[1] > 1: ``` def _compute_duplicated(df): ) return result + if self._modin_frame._partitions.shape[1] > 1: # if the number of columns (or column partitions) we are checking for duplicates is larger than 1, # we must first hash them to generate a single value that can be compared across rows. hashed_modin_frame = self._modin_frame.reduce(
codereview_new_python_data_10651
def __init__( self._dtypes = dtypes self._validate_axes_lengths() - if all(obj is not None for obj in (index, columns, row_lengths, column_widths)): - # this hint allows to filter empty partitions out without triggering metadata computation - # in order to avoid useless computation over empty partitions - compute_metadata = True - else: - compute_metadata = False - self._filter_empties(compute_metadata=compute_metadata) def _validate_axes_lengths(self): """Validate that labels are split correctly if split is known.""" I'm not getting it, doesn't this condition inside `._filter_empties` means the same? https://github.com/modin-project/modin/blob/6afee33df4871578e0056dde614878b4870f8d2a/modin/core/dataframe/pandas/dataframe/dataframe.py#L471-L478 I mean, it won't trigger metadata computation anyway if `compute_metadata=False` was passed, does it? def __init__( self._dtypes = dtypes self._validate_axes_lengths() + self._filter_empties(compute_metadata=False) def _validate_axes_lengths(self): """Validate that labels are split correctly if split is known."""
codereview_new_python_data_10652
def __init__( self._dtypes = dtypes self._validate_axes_lengths() - if all(obj is not None for obj in (index, columns, row_lengths, column_widths)): - # this hint allows to filter empty partitions out without triggering metadata computation - # in order to avoid useless computation over empty partitions - compute_metadata = True - else: - compute_metadata = False - self._filter_empties(compute_metadata=compute_metadata) def _validate_axes_lengths(self): """Validate that labels are split correctly if split is known.""" ```suggestion self._filter_empties(compute_metadata=False) ``` def __init__( self._dtypes = dtypes self._validate_axes_lengths() + self._filter_empties(compute_metadata=False) def _validate_axes_lengths(self): """Validate that labels are split correctly if split is known."""
codereview_new_python_data_10653
def _reset(df, *axis_lengths, partition_idx): self._modin_frame.apply_full_axis( axis=1, func=_reset, - other=None, enumerate_partitions=True, new_columns=new_columns, sync_labels=False, ```suggestion ``` `apply_full_axis` method doesn't have `other` parameter so we should remove it when calling. def _reset(df, *axis_lengths, partition_idx): self._modin_frame.apply_full_axis( axis=1, func=_reset, enumerate_partitions=True, new_columns=new_columns, sync_labels=False,
codereview_new_python_data_10654
def func(df, **kw): return pandas.DataFrame() result = qc._modin_frame.apply_full_axis( - 1, func, other=None, new_index=[], new_columns=[], enumerate_partitions=True ) result.to_pandas() ```suggestion 1, func, new_index=[], new_columns=[], enumerate_partitions=True ``` def func(df, **kw): return pandas.DataFrame() result = qc._modin_frame.apply_full_axis( + 1, func, new_index=[], new_columns=[], enumerate_partitions=True ) result.to_pandas()
codereview_new_python_data_10655
def func(df, **kw): return pandas.DataFrame() result = qc._modin_frame.apply_full_axis( - 1, func, other=None, new_index=[], new_columns=[], enumerate_partitions=True ) result.to_pandas() ```suggestion 1, func, new_index=[], new_columns=[], enumerate_partitions=True ``` def func(df, **kw): return pandas.DataFrame() result = qc._modin_frame.apply_full_axis( + 1, func, new_index=[], new_columns=[], enumerate_partitions=True ) result.to_pandas()
codereview_new_python_data_10656
def test_indexing_duplicate_axis(data): "series_of_integers", ], ) -def test_set_index(data, key_func): - eval_general(*create_test_dfs(data), lambda df: df.set_index(key_func(df))) @pytest.mark.parametrize("index", ["a", ["a", ("b", "")]]) If we are not testing `drop==False` parameter, then let's start. def test_indexing_duplicate_axis(data): "series_of_integers", ], ) +@pytest.mark.parametrize( + "drop_kwargs", + [{"drop": True}, {"drop": False}, {}], + ids=["drop_True", "drop_False", "no_drop_param"], +) +def test_set_index(data, key_func, drop_kwargs, request): + if ( + "list_of_index_and_first_column_name" in request.node.name + and "drop_False" in request.node.name + ): + pytest.xfail( + reason="KeyError: https://github.com/modin-project/modin/issues/5636" + ) + eval_general( + *create_test_dfs(data), lambda df: df.set_index(key_func(df), **drop_kwargs) + ) @pytest.mark.parametrize("index", ["a", ["a", ("b", "")]])
codereview_new_python_data_10657
def files(self): try: files = self.dataset.files except AttributeError: - # compatibility with 4.0.1 <= pyarrow < 8.0.0 files = self.dataset._dataset.files self._files = self._get_files(files) return self._files tested the code locally for the next `pyarrow` versions: 4.0.1, 5.0.0, 6.0.0, 7.0.0, 8.0.0 def files(self): try: files = self.dataset.files except AttributeError: + # compatibility at least with 3.0.0 <= pyarrow < 8.0.0 files = self.dataset._dataset.files self._files = self._get_files(files) return self._files
codereview_new_python_data_10658
def merge_partitioning(left, right, axis=1): ------- int """ - # Avoiding circular imports from pandas query compiler - from modin.core.storage_formats.pandas.utils import compute_chunksize - lshape = left._row_lengths_cache if axis == 0 else left._column_widths_cache rshape = right._row_lengths_cache if axis == 0 else right._column_widths_cache ## Module imports itself The module 'modin.core.storage_formats.pandas.utils' imports itself. [Show more details](https://github.com/modin-project/modin/security/code-scanning/582) def merge_partitioning(left, right, axis=1): ------- int """ lshape = left._row_lengths_cache if axis == 0 else left._column_widths_cache rshape = right._row_lengths_cache if axis == 0 else right._column_widths_cache
codereview_new_python_data_10659
def getitem_column_array(self, key, numeric=False): new_modin_frame = self._modin_frame.take_2d_labels_or_positional( col_labels=key ) - return self.__constructor__(new_modin_frame, shape_hint) def getitem_row_array(self, key): return self.__constructor__( ```suggestion return self.__constructor__(new_modin_frame, shape_hint=shape_hint) ``` If a parameter is one of the kwargs, I prefer to pass it as a keyword arg. def getitem_column_array(self, key, numeric=False): new_modin_frame = self._modin_frame.take_2d_labels_or_positional( col_labels=key ) + return self.__constructor__(new_modin_frame, shape_hint=shape_hint) def getitem_row_array(self, key): return self.__constructor__(
codereview_new_python_data_10660
def reindex(self, axis, labels, **kwargs): def reset_index(self, **kwargs): if self._modin_frame._index_cache is None: - def _reset(df, *columns_idx, partition_idx): _kw = dict(kwargs) if len(columns_idx) > 1 and partition_idx == 0: old_cols = columns[0].append(columns[1:]) ```suggestion def _reset(df, *columns, partition_idx): ``` def reindex(self, axis, labels, **kwargs): def reset_index(self, **kwargs): if self._modin_frame._index_cache is None: + def _reset(df, *columns, partition_idx): _kw = dict(kwargs) if len(columns_idx) > 1 and partition_idx == 0: old_cols = columns[0].append(columns[1:])
codereview_new_python_data_10661
def broadcast_apply_full_axis( the provided hints in order to save time on syncing them. pass_cols_to_partitions : bool, default: False Whether pass columns into applied `func` or not. - Note that `func` must be able to obtain `*columns` arg. Returns ------- ```suggestion Note that `func` must be able to obtain `df, *columns`. ``` def broadcast_apply_full_axis( the provided hints in order to save time on syncing them. pass_cols_to_partitions : bool, default: False Whether pass columns into applied `func` or not. + Note that `func` must be able to obtain `df, *columns`. Returns -------
codereview_new_python_data_10662
def broadcast_axis_partitions( Note that `apply_func` must be able to accept `partition_idx` kwarg. lengths : list of ints, default: None The list of lengths to shuffle the object. - apply_func_args : bool, optional - Whether pass extra args to `func` or not. - Note that `func` must be able to obtain `df, *args`. **kwargs : dict Additional options that could be used by different engines. ```suggestion apply_func_args : list-like, optional Positional arguments to pass to the `func`. ``` def broadcast_axis_partitions( Note that `apply_func` must be able to accept `partition_idx` kwarg. lengths : list of ints, default: None The list of lengths to shuffle the object. + apply_func_args : list-like, optional + Positional arguments to pass to the `func`. **kwargs : dict Additional options that could be used by different engines.
codereview_new_python_data_10663
def broadcast_apply_full_axis( kw = {} if dtypes == "copy": kw["dtypes"] = self._dtypes - elif dtypes is not None and new_columns is not None: columns = new_columns if new_columns is not None else self.columns kw["dtypes"] = pandas.Series( [np.dtype(dtypes)] * len(columns), index=columns This branch will continue to trigger the calculation of the columns, but this is not important for `pivot_table`, since we will not get into this branch. def broadcast_apply_full_axis( kw = {} if dtypes == "copy": kw["dtypes"] = self._dtypes + elif dtypes is not None: columns = new_columns if new_columns is not None else self.columns kw["dtypes"] = pandas.Series( [np.dtype(dtypes)] * len(columns), index=columns
codereview_new_python_data_10664
def broadcast_apply_full_axis( kw = {} if dtypes == "copy": kw["dtypes"] = self._dtypes - elif dtypes is not None and new_columns is not None: columns = new_columns if new_columns is not None else self.columns kw["dtypes"] = pandas.Series( [np.dtype(dtypes)] * len(columns), index=columns Why do we need double check for `new_columns`? ```suggestion elif dtypes is not None: ``` def broadcast_apply_full_axis( kw = {} if dtypes == "copy": kw["dtypes"] = self._dtypes + elif dtypes is not None: columns = new_columns if new_columns is not None else self.columns kw["dtypes"] = pandas.Series( [np.dtype(dtypes)] * len(columns), index=columns
codereview_new_python_data_10665
def broadcast_apply_full_axis( kw = {} if dtypes == "copy": kw["dtypes"] = self._dtypes - elif dtypes is not None and new_columns is not None: columns = new_columns if new_columns is not None else self.columns kw["dtypes"] = pandas.Series( [np.dtype(dtypes)] * len(columns), index=columns `self.columns` generally are not equal to new cols ```suggestion columns = new_columns if new_columns is not None else compute_new_columns() ``` def broadcast_apply_full_axis( kw = {} if dtypes == "copy": kw["dtypes"] = self._dtypes + elif dtypes is not None: columns = new_columns if new_columns is not None else self.columns kw["dtypes"] = pandas.Series( [np.dtype(dtypes)] * len(columns), index=columns
codereview_new_python_data_10666
def broadcast_apply_full_axis( Setting it to True disables shuffling data from one partition to another. synchronize : boolean, default: True Synchronize external indexes (`new_index`, `new_columns`) with internal indexes. Returns ------- always better to add an example for better clarity :) ```suggestion Synchronize external indexes (`new_index`, `new_columns`) with internal indexes. This could be used when you're certain that the indices in partitions are equal to the provided hints in order to save time on syncing them. ``` def broadcast_apply_full_axis( Setting it to True disables shuffling data from one partition to another. synchronize : boolean, default: True Synchronize external indexes (`new_index`, `new_columns`) with internal indexes. + This could be used when you're certain that the indices in partitions are equal to + the provided hints in order to save time on syncing them. Returns -------