id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_5097
def _analyse_name_as_type(name, pos, env): if global_entry and global_entry.is_type: type = global_entry.type if (not env.in_c_type_context and - name == 'int' and type == Builtin.int_type): # While we still support Python2 this needs to be downgraded # to a generic Python object to include both int and long type = py_object_type ```suggestion name == 'int' and type is Builtin.int_type): ``` def _analyse_name_as_type(name, pos, env): if global_entry and global_entry.is_type: type = global_entry.type if (not env.in_c_type_context and + name == 'int' and type is Builtin.int_type): # While we still support Python2 this needs to be downgraded # to a generic Python object to include both int and long type = py_object_type
codereview_new_python_data_5098
def py_version_hex(major, minor=0, micro=0, release_level=0, release_serial=0): return (major << 24) | (minor << 16) | (micro << 8) | (release_level << 4) | (release_serial) # there's a few places where it's useful to iterate over all of these -used_macros_and_types = [ - ('__Pyx_CyFunction_USED', [cyfunction_type_cname]), - ('__Pyx_FusedFunction_USED', [fusedfunction_type_cname]), - ('__Pyx_Generator_USED', ['__pyx_GeneratorType']), - ('__Pyx_IterableCoroutine_USED', ['__pyx_IterableCoroutineType']), - ('__Pyx_Coroutine_USED', ['__pyx_CoroutineType', '__pyx_CoroutineAwaitType']) ] This isn't a dict. I think we can simplify this by duplicating the "USED" entries for each type cname that they guard. Why not, it's cheap. I could even imagine reversing this and putting the type cname first (and then have the option to turn it into a dict), because it seems more reasonable to look for the guard of a type than for the types that a guard protects. def py_version_hex(major, minor=0, micro=0, release_level=0, release_serial=0): return (major << 24) | (minor << 16) | (micro << 8) | (release_level << 4) | (release_serial) # there's a few places where it's useful to iterate over all of these +used_types_and_macros = [ + (cyfunction_type_cname, '__Pyx_CyFunction_USED'), + (fusedfunction_type_cname, '__Pyx_FusedFunction_USED'), + ('__pyx_GeneratorType', '__Pyx_Generator_USED'), + ('__pyx_IterableCoroutineType', '__Pyx_IterableCoroutine_USED'), + ('__pyx_CoroutineAwaitType', '__Pyx_Coroutine_USED'), + ('__pyx_CoroutineType', '__Pyx_Coroutine_USED'), ]
codereview_new_python_data_5099
def __init__(self, stream=None): def empty(self): if self.stream.tell(): return False - return all(child.empty() for child in self.prepended_children) def getvalue(self): content = [] I think we can avoid the overhead of a generator expression here. ```suggestion return all([child.empty() for child in self.prepended_children]) if self.prepended_children else True ``` def __init__(self, stream=None): def empty(self): if self.stream.tell(): return False + return all([child.empty() for child in self.prepended_children]) if self.prepended_children else True def getvalue(self): content = []
codereview_new_python_data_5100
def __init__(self, stream=None): def empty(self): if self.stream.tell(): return False - return all(child.empty() for child in self.prepended_children) def getvalue(self): content = [] You can declare this as a `cpdef` method in the pxd file. def __init__(self, stream=None): def empty(self): if self.stream.tell(): return False + return all([child.empty() for child in self.prepended_children]) if self.prepended_children else True def getvalue(self): content = []
codereview_new_python_data_5102
def py_operation_function(self, code): class PowNode(NumBinopNode): # '**' operator. - cpow = None cpow_false_changed_result_type = False # was the result type affected by cpow==False def _check_cpow(self, env): ```suggestion is_cpow = None ``` def py_operation_function(self, code): class PowNode(NumBinopNode): # '**' operator. + is_cpow = None cpow_false_changed_result_type = False # was the result type affected by cpow==False def _check_cpow(self, env):
codereview_new_python_data_5104
def py_operation_function(self, code): class PowNode(NumBinopNode): # '**' operator. - cpow = None cpow_false_changed_result_type = False # was the result type affected by cpow==False def _check_cpow(self, env): Cautious proposal for clearer name - "soft complex" is IMO quite self-descriptive (and appears in several places), so why not use it for naming? ```suggestion returns_soft_complex = False # was the result type affected by is_cpow==False ``` def py_operation_function(self, code): class PowNode(NumBinopNode): # '**' operator. + is_cpow = None cpow_false_changed_result_type = False # was the result type affected by cpow==False def _check_cpow(self, env):
codereview_new_python_data_5105
class SoftCComplexType(CComplexType): def __init__(self): super(SoftCComplexType, self).__init__(c_double_type) - def declaration_code(self, entity_code, - for_display = 0, dll_linkage = None, pyrex = 0): base_result = super(SoftCComplexType, self).declaration_code( entity_code, for_display=for_display, dll_linkage=dll_linkage, - pyrex=pyrex ) if for_display: return "soft %s" % base_result ```suggestion def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0): ``` class SoftCComplexType(CComplexType): def __init__(self): super(SoftCComplexType, self).__init__(c_double_type) + def declaration_code(self, entity_code, for_display=0, dll_linkage=None, pyrex=0): base_result = super(SoftCComplexType, self).declaration_code( entity_code, for_display=for_display, dll_linkage=dll_linkage, + pyrex=pyrex, ) if for_display: return "soft %s" % base_result
codereview_new_python_data_5106
def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ - if not isinstance(self.sequence, SimpleCallNode): return False func = self.sequence.function if func.is_name and func.name == "reversed": ```suggestion if not (isinstance(self.sequence, SimpleCallNode) and len(self.sequence.args) == 1): ``` I'm just going to add an extra check in that it matches what we expect. def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ + if not (isinstance(self.sequence, SimpleCallNode) and + len(self.sequence.args) == 1): return False func = self.sequence.function if func.is_name and func.name == "reversed":
codereview_new_python_data_5107
def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ if not (isinstance(self.sequence, SimpleCallNode) and - len(self.sequence.args) == 1): return False func = self.sequence.function if func.is_name and func.name == "reversed": ```suggestion self.sequence.args_tuple and len(self.sequence.args_tuple) == 1): ``` def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ if not (isinstance(self.sequence, SimpleCallNode) and + self.sequence.args_tuple and len(self.sequence.args_tuple) == 1): return False func = self.sequence.function if func.is_name and func.name == "reversed":
codereview_new_python_data_5108
def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ if not (isinstance(self.sequence, SimpleCallNode) and - self.sequence.args_tuple and len(self.sequence.args_tuple) == 1): return False func = self.sequence.function if func.is_name and func.name == "reversed": ```suggestion self.sequence.arg_tuple and len(self.sequence.arg_tuple) == 1): ``` def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ if not (isinstance(self.sequence, SimpleCallNode) and + self.sequence.arg_tuple and len(self.sequence.arg_tuple) == 1): return False func = self.sequence.function if func.is_name and func.name == "reversed":
codereview_new_python_data_5109
def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ if not (isinstance(self.sequence, SimpleCallNode) and - self.sequence.arg_tuple and len(self.sequence.arg_tuple) == 1): return False func = self.sequence.function if func.is_name and func.name == "reversed": ```suggestion self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1): ``` def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ if not (isinstance(self.sequence, SimpleCallNode) and + self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1): return False func = self.sequence.function if func.is_name and func.name == "reversed":
codereview_new_python_data_5110
def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ - if not (isinstance(self.sequence, SimpleCallNode) and self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1): return False func = self.sequence.function ```suggestion if not (isinstance(self.sequence, SimpleCallNode) and self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1): ``` def is_reversed_cpp_iteration(self): This supports C++ classes with reverse_iterator implemented. """ + if not (isinstance(self.sequence, SimpleCallNode) and self.sequence.arg_tuple and len(self.sequence.arg_tuple.args) == 1): return False func = self.sequence.function
codereview_new_python_data_5112
# tag: openmp from cython.parallel import parallel -from cython.cimports.openmp import omp_set_dynamic num_threads = cython.declare(cython.int) omp_set_dynamic(1) with cython.nogil, parallel(): - num_threads = openmp.omp_get_num_threads() # ... Presumably this needs to be part of ` from cython.cimports.openmp import omp_set_dynamic` # tag: openmp from cython.parallel import parallel +from cython.cimports.openmp import omp_set_dynamic, omp_get_num_threads num_threads = cython.declare(cython.int) omp_set_dynamic(1) with cython.nogil, parallel(): + num_threads = omp_get_num_threads() # ...
codereview_new_python_data_5113
def create_args_parser(): parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, epilog="""Environment variables: - CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless of modification times and changes. - Environment variables accepted by setuptools are supported: https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" ) I think it's fine to add `RawDescriptionHelpFormatter` into the game, but then we should manually wrap the text to avoid overly long lines. ```suggestion CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless of modification times and changes. Environment variables accepted by setuptools are supported to configure the C compiler and build: https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" ``` def create_args_parser(): parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, epilog="""Environment variables: + CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless + of modification times and changes. + Environment variables accepted by setuptools are supported to configure the C compiler and build: https://setuptools.pypa.io/en/latest/userguide/ext_modules.html#compiler-and-linker-options""" )
codereview_new_python_data_5114
def create_args_parser(): parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, - epilog="""Environment variables: CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless of modification times and changes. Environment variables accepted by setuptools are supported to configure the C compiler and build: ```suggestion epilog="""\ Environment variables: CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless ``` def create_args_parser(): parser = ArgumentParser( formatter_class=RawDescriptionHelpFormatter, + epilog="""\ +Environment variables: CYTHON_FORCE_REGEN: if set to 1, forces cythonize to regenerate the output files regardless of modification times and changes. Environment variables accepted by setuptools are supported to configure the C compiler and build:
codereview_new_python_data_5115
def add(self, e): def __bool__(self): return bool(self._set) - # Class decorator that adds a metaclass and recreates the class with it. # Copied from 'six'. We still need `__nonzero__` in Py2. ```suggestion def __bool__(self): return bool(self._set) __nonzero__ = __bool__ ``` def add(self, e): def __bool__(self): return bool(self._set) + + __nonzero__ = __bool__ # Class decorator that adds a metaclass and recreates the class with it. # Copied from 'six'.
codereview_new_python_data_5116
def __bool__(self): __nonzero__ = __bool__ # Class decorator that adds a metaclass and recreates the class with it. # Copied from 'six'. def add_metaclass(metaclass): ```suggestion __nonzero__ = __bool__ ``` def __bool__(self): __nonzero__ = __bool__ + # Class decorator that adds a metaclass and recreates the class with it. # Copied from 'six'. def add_metaclass(metaclass):
codereview_new_python_data_5117
def build_hex_version(version_string): def write_depfile(target, source, dependencies): - src_base_dir, _ = os.path.split(source) if not src_base_dir.endswith(os.sep): src_base_dir += os.sep # paths below the base_dir are relative, otherwise absolute I'd also rename `mainfile` to `target_file` to distinguish it from `source`. ```suggestion def write_depfile(mainfile, source_file, dependencies): src_base_dir = os.path.dirname(source_file) ``` def build_hex_version(version_string): def write_depfile(target, source, dependencies): + src_base_dir = os.path.dirname(source) if not src_base_dir.endswith(os.sep): src_base_dir += os.sep # paths below the base_dir are relative, otherwise absolute
codereview_new_python_data_5119
def get_param(option): sys.exit(1) if len(sources) == 0 and not options.show_version: bad_usage() - if options.embed and len(sources) > 1: sys.stderr.write( "cython: Only one source file allowed when using --embed\n") sys.exit(1) ```suggestion if Options.embed and len(sources) > 1: ``` Suspect that wants to stay capitalized (although it probably doesn't help to have two variables only differing by a capital letter) def get_param(option): sys.exit(1) if len(sources) == 0 and not options.show_version: bad_usage() + if Options.embed and len(sources) > 1: sys.stderr.write( "cython: Only one source file allowed when using --embed\n") sys.exit(1)
codereview_new_python_data_5120
def generate_result_code(self, code): flags.append('CO_VARARGS') if self.def_node.starstar_arg: flags.append('CO_VARKEYWORDS') - if self.def_node.is_generator and self.def_node.is_coroutine: flags.append('CO_COROUTINE') - if self.def_node.is_asyncgen: - flags.append('CO_ASYNC_GENERATOR') code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % ( self.result_code, I think it should be this: ```suggestion if self.def_node.is_asyncgen: flags.append('CO_ASYNC_GENERATOR') elif self.def_node.is_coroutine: flags.append('CO_COROUTINE') elif self.def_node.is_generator: flags.append('CO_GENERATOR') ``` def generate_result_code(self, code): flags.append('CO_VARARGS') if self.def_node.starstar_arg: flags.append('CO_VARKEYWORDS') + if self.def_node.is_asyncgen: + flags.append('CO_ASYNC_GENERATOR') + elif self.def_node.is_coroutine: flags.append('CO_COROUTINE') + elif self.def_node.is_generator: + flags.append('CO_GENERATOR') code.putln("%s = (PyObject*)__Pyx_PyCode_New(%d, %d, %d, %d, 0, %s, %s, %s, %s, %s, %s, %s, %s, %s, %d, %s); %s" % ( self.result_code,
codereview_new_python_data_5122
def create_to_py_utility_code(self, env): "module_name": module_name, "is_flag": True, }, - outer_module_scope=self.entry.scope # ensure that "name" is findable )) return True ```suggestion outer_module_scope=self.entry.scope # ensure that "name" is findable ``` def create_to_py_utility_code(self, env): "module_name": module_name, "is_flag": True, }, + outer_module_scope=self.entry.scope # ensure that "name" is findable )) return True
codereview_new_python_data_5124
def generate_type_ready_code(entry, code, bases_tuple_cname=None, check_heap_typ base_type = type.base_type while base_type: - if type.base_type.is_external and not base_type.objstruct_cname == "PyTypeObject": # 'type' is special-cased because it is actually based on PyHeapTypeObject # Variable length bases are allowed if the current class doesn't grow code.putln("if (sizeof(%s%s) != sizeof(%s%s)) {" % ( Did you really mean `type.base_type` here or just `base_type`? def generate_type_ready_code(entry, code, bases_tuple_cname=None, check_heap_typ base_type = type.base_type while base_type: + if base_type.is_external and not base_type.objstruct_cname == "PyTypeObject": # 'type' is special-cased because it is actually based on PyHeapTypeObject # Variable length bases are allowed if the current class doesn't grow code.putln("if (sizeof(%s%s) != sizeof(%s%s)) {" % (
codereview_new_python_data_5125
def generate_result_code(self, code): res = self.result() code.putln("%s = (PyObject *) *%s;" % (res, self.buffer_ptr_code)) # NumPy does (occasionally) allow NULL to denote None. - code.putln("if (%s == NULL) %s = Py_None;" % (res, res)) code.putln("__Pyx_INCREF((PyObject*)%s);" % res) def free_subexpr_temps(self, code): I think we should declare this case as unlikely to the C compiler, just in case it can make something of it. ```suggestion code.putln("if (unlikely(%s == NULL)) %s = Py_None;" % (res, res)) ``` def generate_result_code(self, code): res = self.result() code.putln("%s = (PyObject *) *%s;" % (res, self.buffer_ptr_code)) # NumPy does (occasionally) allow NULL to denote None. + code.putln("if (unlikely(%s == NULL)) %s = Py_None;" % (res, res)) code.putln("__Pyx_INCREF((PyObject*)%s);" % res) def free_subexpr_temps(self, code):
codereview_new_python_data_5126
def p_namedexpr_test(s): if s.sy == ':=': position = s.position() if not lhs.is_name: - s.error("Left-hand side of assignment expression must be an identifier") s.next() rhs = p_test(s) return ExprNodes.AssignmentExpressionNode(position, lhs=lhs, rhs=rhs) Would it make sense to turn this into a non-fatal exception? We detected `:=`, so it seems safe to continue parsing from here. It's usually helpful to give more errors at a time to speed up the "fix to compile" cycle. ```suggestion s.error("Left-hand side of assignment expression must be an identifier", fatal=False) ``` def p_namedexpr_test(s): if s.sy == ':=': position = s.position() if not lhs.is_name: + s.error("Left-hand side of assignment expression must be an identifier", fatal=False) s.next() rhs = p_test(s) return ExprNodes.AssignmentExpressionNode(position, lhs=lhs, rhs=rhs)
codereview_new_python_data_5146
import lbann import lbann.models import lbann.modules as lm - class CosmoFlow(lm.Module): """The CosmoFlow neural network. This file needs a numpy import: `import numpy as np` import lbann import lbann.models import lbann.modules as lm +import numpy as np class CosmoFlow(lm.Module): """The CosmoFlow neural network.
codereview_new_python_data_5147
def get_distconv_environment(parallel_io=False, num_io_partitions=1): return { 'DISTCONV_WS_CAPACITY_FACTOR': 0.8, -# 'DISTCONV_OVERLAP_HALO_EXCHANGE': 1, 'LBANN_DISTCONV_HALO_EXCHANGE': 'AL', 'LBANN_DISTCONV_TENSOR_SHUFFLER': 'AL', -# 'LBANN_DISTCONV_HALO_EXCHANGE': 'HYBRID', -# 'LBANN_DISTCONV_TENSOR_SHUFFLER': 'HYBRID', 'LBANN_DISTCONV_CONVOLUTION_FWD_ALGORITHM': 'AUTOTUNE', 'LBANN_DISTCONV_CONVOLUTION_BWD_DATA_ALGORITHM': 'AUTOTUNE', 'LBANN_DISTCONV_CONVOLUTION_BWD_FILTER_ALGORITHM': 'AUTOTUNE', ```suggestion 'LBANN_DISTCONV_HALO_EXCHANGE': 'AL', 'LBANN_DISTCONV_TENSOR_SHUFFLER': 'AL', ``` def get_distconv_environment(parallel_io=False, num_io_partitions=1): return { 'DISTCONV_WS_CAPACITY_FACTOR': 0.8, 'LBANN_DISTCONV_HALO_EXCHANGE': 'AL', 'LBANN_DISTCONV_TENSOR_SHUFFLER': 'AL', 'LBANN_DISTCONV_CONVOLUTION_FWD_ALGORITHM': 'AUTOTUNE', 'LBANN_DISTCONV_CONVOLUTION_BWD_DATA_ALGORITHM': 'AUTOTUNE', 'LBANN_DISTCONV_CONVOLUTION_BWD_FILTER_ALGORITHM': 'AUTOTUNE',
codereview_new_python_data_5149
def prepend_environment_path(key, prefix): #set_environment('NCCL_SOCKET_IFNAME', 'hsi') set_environment('MIOPEN_DEBUG_DISABLE_FIND_DB', '1') set_environment('MIOPEN_DISABLE_CACHE', '1') - prepend_environment_path('LD_LIBRARY_PATH', '/opt/rocm-5.3.0/llvm/lib:' + os.getenv("CRAY_LD_LIBRARY_PATH")) # Optimizations for Sierra-like systems if system in ('sierra', 'lassen', 'rzansel'): ```suggestion prepend_environment_path('LD_LIBRARY_PATH', os.getenv('CRAY_LD_LIBRARY_PATH')) prepend_environment_path('LD_LIBRARY_PATH', '/opt/rocm-5.3.0/llvm/lib') ``` def prepend_environment_path(key, prefix): #set_environment('NCCL_SOCKET_IFNAME', 'hsi') set_environment('MIOPEN_DEBUG_DISABLE_FIND_DB', '1') set_environment('MIOPEN_DISABLE_CACHE', '1') + prepend_environment_path('LD_LIBRARY_PATH', os.getenv('CRAY_LD_LIBRARY_PATH')) + prepend_environment_path('LD_LIBRARY_PATH', '/opt/rocm-5.3.0/llvm/lib') # Optimizations for Sierra-like systems if system in ('sierra', 'lassen', 'rzansel'):
codereview_new_python_data_5167
# TODO: make the path configurable CONFIG_FILES = ["config.yaml", "config.yml"] -def create(): settings = Dynaconf( settings_files=CONFIG_FILES, load_dotenv=True, envvar_prefix="VAST", ) settings.validators.register( - Validator("console_verbosity", default="debug"), - Validator("file_verbosity", default="quiet"), - Validator("filename", default="vast.log"), ) settings.validators.validate_all() - return settings @mavam I am wondering about the config namespace we should use here. - Do we want the config to be the same as for VAST? That would mean that setting VAST_CONSOLE_VERBOSITY=debug sets the debug logging for both the bindings and the binary. - [VAST config](https://github.com/tenzir/vast/blob/master/vast.yaml.example#L68) uses `-` and we use `_` here, should we unify this? # TODO: make the path configurable CONFIG_FILES = ["config.yaml", "config.yml"] +PREFIX = "python." +class Config: + def __init__(self, conf: Dynaconf): + self.conf = conf + + def get(self, key): + return self.conf.get(PREFIX + key) + + +def create() -> Config: settings = Dynaconf( settings_files=CONFIG_FILES, load_dotenv=True, envvar_prefix="VAST", ) settings.validators.register( + Validator(PREFIX + "console-verbosity", default="debug"), + Validator(PREFIX + "file-verbosity", default="quiet"), + Validator(PREFIX + "log-file", default="vast.log"), ) settings.validators.validate_all() + return Config(settings)
codereview_new_python_data_5196
async def run(self): logger.warning(f"failed to parse MISP event as STIX: {e}") continue finally: - logger.info(f"terminating MISP") self.misp = None socket.setsockopt(zmq.LINGER, 0) socket.close() It's terminating the MISP VAST app, not MISP itself, so I found this to be a bit misleading. async def run(self): logger.warning(f"failed to parse MISP event as STIX: {e}") continue finally: + logger.info(f"terminating") self.misp = None socket.setsockopt(zmq.LINGER, 0) socket.close()
codereview_new_python_data_5197
import pyarrow as pa -class IPAddressType(pa.PyExtensionType): - def __init__(self): - pa.PyExtensionType.__init__(self, pa.binary(16)) - - def __reduce__(self): - return IPAddressType, () - - def __arrow_ext_scalar_class__(self): - # TODO: we should probably write our own IP address type that supports - # the v4-in-v6 embedding natively. - return ipaddress.IPv6Address - - def names(schema: pa.Schema): meta = schema.metadata return [meta[key].decode() for key in meta if key.startswith(b"VAST:name:")] In VAST this type is called `vast.address`, and its name in code is `address_type`. I think we should name it `AddressType` to match developer expectations here. import pyarrow as pa def names(schema: pa.Schema): meta = schema.metadata return [meta[key].decode() for key in meta if key.startswith(b"VAST:name:")]
codereview_new_python_data_5198
import pyarrow as pa -class IPAddressType(pa.PyExtensionType): - def __init__(self): - pa.PyExtensionType.__init__(self, pa.binary(16)) - - def __reduce__(self): - return IPAddressType, () - - def __arrow_ext_scalar_class__(self): - # TODO: we should probably write our own IP address type that supports - # the v4-in-v6 embedding natively. - return ipaddress.IPv6Address - - def names(schema: pa.Schema): meta = schema.metadata return [meta[key].decode() for key in meta if key.startswith(b"VAST:name:")] Not sure about the API, but don't you need to put the extension type identifier `vast.address` somewhere? import pyarrow as pa def names(schema: pa.Schema): meta = schema.metadata return [meta[key].decode() for key in meta if key.startswith(b"VAST:name:")]
codereview_new_python_data_5238
def request_item(request, locale=None): template = get_template("teams/email_request_item.jinja") mail_body = template.render(payload) EmailMessage( subject=mail_subject, body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, - cc=list( - locale.managers_group.user_set.exclude(pk=user.pk).values_list( - "email", flat=True - ) - ) - + [user.email] - if locale - else "", reply_to=[user.email], ).send() else: We should use contact email address here if it is set. def request_item(request, locale=None): template = get_template("teams/email_request_item.jinja") mail_body = template.render(payload) + cc = list(locale.managers_group.user_set.values_list("email", flat=True)) + if user.profile.contact_email: + cc = set(cc + [user.profile.contact_email]) + EmailMessage( subject=mail_subject, body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, + cc=cc, reply_to=[user.email], ).send() else:
codereview_new_python_data_5239
def request_item(request, locale=None): template = get_template("teams/email_request_item.jinja") mail_body = template.render(payload) EmailMessage( subject=mail_subject, body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, - cc=list( - locale.managers_group.user_set.exclude(pk=user.pk).values_list( - "email", flat=True - ) - ) - + [user.email] - if locale - else "", reply_to=[user.email], ).send() else: We no longer need to exclude the person that makes the request. def request_item(request, locale=None): template = get_template("teams/email_request_item.jinja") mail_body = template.render(payload) + cc = list(locale.managers_group.user_set.values_list("email", flat=True)) + if user.profile.contact_email: + cc = set(cc + [user.profile.contact_email]) + EmailMessage( subject=mail_subject, body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, + cc=cc, reply_to=[user.email], ).send() else:
codereview_new_python_data_5240
def request_item(request, locale=None): template = get_template("teams/email_request_item.jinja") mail_body = template.render(payload) EmailMessage( subject=mail_subject, body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, - cc=list( - locale.managers_group.user_set.exclude(pk=user.pk).values_list( - "email", flat=True - ) - ) - + [user.email] - if locale - else "", reply_to=[user.email], ).send() else: It's possible that the person that makes the request is also a locale manager, in which case we'll list their email address twice. Let's prevent this by wrapping the entire value of `cc` in `set()`. def request_item(request, locale=None): template = get_template("teams/email_request_item.jinja") mail_body = template.render(payload) + cc = list(locale.managers_group.user_set.values_list("email", flat=True)) + if user.profile.contact_email: + cc = set(cc + [user.profile.contact_email]) + EmailMessage( subject=mail_subject, body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, + cc=cc, reply_to=[user.email], ).send() else:
codereview_new_python_data_5241
def request_item(request, locale=None): body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, - cc=cc, reply_to=[user.contact_email()], ).send() else: Have you removed this check intentionally? I think we should keep it around to support the "New locale" requests. def request_item(request, locale=None): body=mail_body, from_email=settings.LOCALE_REQUEST_FROM_EMAIL, to=settings.PROJECT_MANAGERS, + cc=cc if locale else "", reply_to=[user.contact_email()], ).send() else:
codereview_new_python_data_5242
def remove_changed_entity_locale_entries_for_repository_projects(apps, schema_editor): - projects = Project.objects.filter(data_source=Project.DataSource.DATABASE) - resources = Resource.objects.filter(project__in=projects) - entities = Entity.objects.filter(resource__in=resources) - ChangedEntityLocale.objects.filter(entity__in=entities).delete() class Migration(migrations.Migration): You can [follow relationship](https://docs.djangoproject.com/en/4.1/topics/db/queries/#lookups-that-span-relationships) in QuerySet lookups: ```python ChangedEntityLocale.objects.filter( entity__resource__project__data_source=Project.DataSource.DATABASE ).delete() ``` def remove_changed_entity_locale_entries_for_repository_projects(apps, schema_editor): + ChangedEntityLocale.objects.filter( + entity__resource__project__data_source=Project.DataSource.DATABASE + ).delete() class Migration(migrations.Migration):
codereview_new_python_data_5243
class Migration(migrations.Migration): operations = [ migrations.RunPython( code=remove_changed_entity_locale_entries_for_repository_projects, ), ] Sorry just realized that we must add the `reverse_code` here - [noop](https://docs.djangoproject.com/en/4.1/ref/migration-operations/#django.db.migrations.operations.RunPython.noop) is fine. See migration [`0023_fuzzy_to_missing.py`](https://github.com/mozilla/pontoon/blob/1ae0a58ad0537eb35d80393187202fd523df8bec/pontoon/base/migrations/0027_keep_pretranslation_enabled.py) for example. class Migration(migrations.Migration): operations = [ migrations.RunPython( code=remove_changed_entity_locale_entries_for_repository_projects, + reverse_code=migrations.RunPython.noop, ), ]
codereview_new_python_data_5244
class Direction(models.TextChoices): default=0, help_text=""" Number of native speakers. Find locale code in CLDR territoryInfo.json: - https://github.com/unicode-org/cldr-json/blob/main/cldr-json/cldr-core/supplemental/territoryInfo.json and multiply its "_populationPercent" with the territory "_population". Repeat if multiple occurrences of locale code exist and sum products. """, Please remove the trailing space. class Direction(models.TextChoices): default=0, help_text=""" Number of native speakers. Find locale code in CLDR territoryInfo.json: + https://github.com/unicode-org/cldr-json/blob/main/cldr-json/cldr-core/supplemental/territoryInfo.json and multiply its "_populationPercent" with the territory "_population". Repeat if multiple occurrences of locale code exist and sum products. """,
codereview_new_python_data_5245
from apscheduler.schedulers.blocking import BlockingScheduler -# Read dotenv file and inject it's values into the environment dotenv.load_dotenv(dotenv_path=os.environ.get("DOTENV_PATH")) # Set the default Django settings module ```suggestion # Read dotenv file and inject its values into the environment ``` from apscheduler.schedulers.blocking import BlockingScheduler +# Read dotenv file and inject its values into the environment dotenv.load_dotenv(dotenv_path=os.environ.get("DOTENV_PATH")) # Set the default Django settings module
codereview_new_python_data_5246
def mark_system_users(apps, schema_editor): def revert_mark_system_users(apps, schema_editor): UserProfile = apps.get_model("base", "UserProfile") UserProfile.objects.filter(user__email__in=system_user_emails).update( - system_user=None ) `None` -> `False` (which is the default value, set before this migration runs). def mark_system_users(apps, schema_editor): def revert_mark_system_users(apps, schema_editor): UserProfile = apps.get_model("base", "UserProfile") UserProfile.objects.filter(user__email__in=system_user_emails).update( + system_user=False )
codereview_new_python_data_5247
def users_with_translations_counts( # Assign properties to user objects. contributors = User.objects.filter(pk__in=user_stats.keys()) - # Exclude system users - contributors = contributors.exclude(profile__system_user=True) # Exclude deleted users. contributors = contributors.filter(is_active=True) To be consistent with the line you added above, please invert this QuerySet to use `exclude()` instead of `filter()` and filp the boolean. Nit: Please also make sure punctuation in the comments is consistent. :) def users_with_translations_counts( # Assign properties to user objects. contributors = User.objects.filter(pk__in=user_stats.keys()) + # Exclude system users. + contributors = contributors.filter(profile__system_user=False) # Exclude deleted users. contributors = contributors.filter(is_active=True)
codereview_new_python_data_5248
def get_google_automl_translation(text, locale): "message": f"{e}", } - project_id = "85591518533" model_id = locale.google_automl_model location = "us-central1" parent = f"projects/{project_id}/locations/{location}" This magic constant should probably be a well commented config value, yes? def get_google_automl_translation(text, locale): "message": f"{e}", } + project_id = settings.GOOGLE_AUTOML_PROJECT_ID + + if not project_id: + log.error("GOOGLE_AUTOML_PROJECT_ID not set") + return { + "status": False, + "message": "Bad Request: Missing Project ID.", + } + model_id = locale.google_automl_model location = "us-central1" parent = f"projects/{project_id}/locations/{location}"
codereview_new_python_data_5249
def get_google_automl_translation(text, locale): "message": f"{e}", } - project_id = "85591518533" model_id = locale.google_automl_model location = "us-central1" parent = f"projects/{project_id}/locations/{location}" This also looks like a magic constant. At the very least it should have a code comment explaining itself. def get_google_automl_translation(text, locale): "message": f"{e}", } + project_id = settings.GOOGLE_AUTOML_PROJECT_ID + + if not project_id: + log.error("GOOGLE_AUTOML_PROJECT_ID not set") + return { + "status": False, + "message": "Bad Request: Missing Project ID.", + } + model_id = locale.google_automl_model location = "us-central1" parent = f"projects/{project_id}/locations/{location}"
codereview_new_python_data_5250
def get_contributions(user, contribution_type=None): total = sum(contributions.values()) return { "contributions": json.dumps(contributions), "title": f"{ intcomma(total) } contribution{ pluralize(total) } in the last year", } Why does this need to get double-encoded like this? If there is a need for this, some code comment here explaining the situation would be a really good idea. def get_contributions(user, contribution_type=None): total = sum(contributions.values()) return { + # Encode contributions dict to make it usable in the data- attribute "contributions": json.dumps(contributions), "title": f"{ intcomma(total) } contribution{ pluralize(total) } in the last year", }
codereview_new_python_data_5251
class Migration(migrations.Migration): ] operations = [ - migrations.RemoveField( model_name="userprofile", - name="matrix", - ), - migrations.AddField( - model_name="userprofile", - name="chat", - field=models.CharField( - blank=True, max_length=255, null=True, verbose_name="Chat username" - ), ), migrations.AlterField( model_name="userprofile", Should this be a renamed to "chat" instead, or is it intentional to lose the previous values? In general, this matrix -> char part of the PR seems rather disconnected from the rest, but extracting it now into its own PR would probably be needless work. class Migration(migrations.Migration): ] operations = [ + migrations.RenameField( model_name="userprofile", + old_name="matrix", + new_name="chat", ), migrations.AlterField( model_name="userprofile",
codereview_new_python_data_5254
def main(args): if (conf.commands.reboot == 'when-changed' or (conf.commands.reboot == 'when-needed' and base.reboot_needed())): - if os.waitstatus_to_exitcode(os.system(conf.commands.reboot_command)) != 0: return 1 except dnf.exceptions.Error as exc: logger.error(_('Error: %s'), ucd(exc)) Does it make sense to log an error message here? Something along the lines "reboot command failed". def main(args): if (conf.commands.reboot == 'when-changed' or (conf.commands.reboot == 'when-needed' and base.reboot_needed())): + exit_code = os.waitstatus_to_exitcode(os.system(conf.commands.reboot_command)) + if exit_code != 0: + logger.error('Error: reboot command returned nonzero exit code: %d', exit_code) return 1 except dnf.exceptions.Error as exc: logger.error(_('Error: %s'), ucd(exc))
codereview_new_python_data_5255
def format_line(group): lines.append(format_line(group)) pkglist_lines.append((action, lines)) # show skipped conflicting packages - if not self.conf.best and self.base._goal.actions & forward_actions: lines = [] skipped_conflicts, skipped_broken = self.base._skipped_packages( report_problems=True, transaction=transaction) Another condition was added to keep existing behavior with `--skip-broken` option. When using this option, no problem and no skipped package is reported in the output. Maybe the list of skipped packages could be useful in this context, but current condition is connected also with reporting the problem in the output which we don't want to in this case. It would need some refactoring... def format_line(group): lines.append(format_line(group)) pkglist_lines.append((action, lines)) # show skipped conflicting packages + if (not self.conf.best or not self.conf.strict) and self.base._goal.actions & forward_actions: lines = [] skipped_conflicts, skipped_broken = self.base._skipped_packages( report_problems=True, transaction=transaction)
codereview_new_python_data_5381
def port(self) -> int: @port.setter def port(self, port: int) -> None: - self.data.port = port self._update_host_and_authority() def _update_host_and_authority(self) -> None: - val = url.hostport( - self.scheme, - self.host, - # test_http.py::TestRequestCore::test_port sets port = b"foo" as the port - 0 if not isinstance(self.port, int) else self.port, - ) # Update host header if "Host" in self.data.headers: Let's check that `port` is an `int` in `port.setter` and raise a ValueError if that's not the case. We can then adjust the test and skip this check here :) def port(self) -> int: @port.setter def port(self, port: int) -> None: + if isinstance(port, int): + self.data.port = port + else: + raise ValueError("Invalid port type, must be of type int.") self._update_host_and_authority() def _update_host_and_authority(self) -> None: + val = url.hostport(self.scheme, self.host, self.port) # Update host header if "Host" in self.data.headers:
codereview_new_python_data_5382
def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]: # Implementations MUST ignore and discard any frame that has a type that is unknown. yield Log(f"Ignoring unknown HTTP/2 frame type: {event.frame.type}") elif isinstance(event, h2.events.AlternativeServiceAvailable): - yield Log("Temporarily ignoring an Alt-Svc header") else: raise AssertionError(f"Unexpected event: {event!r}") return False Let's reduce this to DEBUG verbosity (most people will not care) and make the message a bit clearer. ```suggestion yield Log("Received HTTP/2 Alt-Svc frame, which will not be forwarded.", DEBUG) ``` def handle_h2_event(self, event: h2.events.Event) -> CommandGenerator[bool]: # Implementations MUST ignore and discard any frame that has a type that is unknown. yield Log(f"Ignoring unknown HTTP/2 frame type: {event.frame.type}") elif isinstance(event, h2.events.AlternativeServiceAvailable): + yield Log("Received HTTP/2 Alt-Svc frame, which will not be forwarded.", DEBUG) else: raise AssertionError(f"Unexpected event: {event!r}") return False
codereview_new_python_data_5383
def get(self): try: match = flowfilter.parse(self.request.arguments["filter"][0].decode()) - except (KeyError, IndexError, ValueError): # Key+Index: ["filter"][0], Value: parsing problem match = bool # returns always true with BytesIO() as bio: We should return an HTTP error if the filter is not valid (and add a test for this), so let's not just catch the ValueError here. :) def get(self): try: match = flowfilter.parse(self.request.arguments["filter"][0].decode()) + except ValueError: # thrown py flowfilter.parse if filter is invalid + raise APIError(400, f"Invalid filter argument") + except (KeyError, IndexError): # Key+Index: ["filter"][0] can fail, if it's not set match = bool # returns always true with BytesIO() as bio:
codereview_new_python_data_5384
def _next_layer( # handle QUIC connections first_layer = context.layers[0] if isinstance(first_layer, quic.QuicLayer): - if context.client.alpn is None: - return None # should never happen, as ask is called after handshake if context.client.alpn == b"h3" or context.client.alpn.startswith(b"h3-"): if first_layer.instance.mode.mode == "regular": mode = HTTPMode.regular Then let's just `assert context.client.alpn` :-) def _next_layer( # handle QUIC connections first_layer = context.layers[0] if isinstance(first_layer, quic.QuicLayer): + assert context.client.alpn is not None # ask is called after handshake if context.client.alpn == b"h3" or context.client.alpn.startswith(b"h3-"): if first_layer.instance.mode.mode == "regular": mode = HTTPMode.regular
codereview_new_python_data_5385
def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]: assert isinstance(spec, ReverseMode) self.context.server.address = spec.address - if ( - spec.scheme == "https" or spec.scheme == "http3" - or spec.scheme == "quic" or spec.scheme == "tls" or spec.scheme == "dtls" - ): if not self.context.options.keep_host_header: self.context.server.sni = spec.address[0] if (spec.scheme == "http3" or spec.scheme == "quic"): ```suggestion if spec.scheme in ("https", "http3", "quic", "tls", "dtls"): ``` def _handle_event(self, event: events.Event) -> layer.CommandGenerator[None]: assert isinstance(spec, ReverseMode) self.context.server.address = spec.address + if spec.scheme in ("https", "http3", "quic", "tls", "dtls"): if not self.context.options.keep_host_header: self.context.server.sni = spec.address[0] if (spec.scheme == "http3" or spec.scheme == "quic"):
codereview_new_python_data_5386
def load(self, loader): None, """Set the local IP address that mitmproxy should use when connecting to upstream servers.""", ) - loader.add_option( - "quic_connection_id_length", - int, - 8, - """The length in bytes of local QUIC connection IDs.""", - ) def running(self): self.is_running = True Is there a particular reason why we want this to be configurable? def load(self, loader): None, """Set the local IP address that mitmproxy should use when connecting to upstream servers.""", ) def running(self): self.is_running = True
codereview_new_python_data_5387
def test_parse_specific_modes(): assert ProxyMode.parse("socks5") assert ProxyMode.parse("dns").resolve_local assert ProxyMode.parse("dns:reverse:8.8.8.8") - assert ProxyMode.parse("dtls:reverse:127.0.0.1:0") with pytest.raises(ValueError, match="invalid port"): ProxyMode.parse("regular@invalid-port") Let's use a valid destination port here in case we ever get to tightening the check. def test_parse_specific_modes(): assert ProxyMode.parse("socks5") assert ProxyMode.parse("dns").resolve_local assert ProxyMode.parse("dns:reverse:8.8.8.8") + assert ProxyMode.parse("dtls:reverse:127.0.0.1:8004") with pytest.raises(ValueError, match="invalid port"): ProxyMode.parse("regular@invalid-port")
codereview_new_python_data_5630
"""Text formatting utilities.""" import io import re from functools import partial ```suggestion """Text formatting utilities.""" from __future__ import annotations ``` """Text formatting utilities.""" +from __future__ import annotations import io import re from functools import partial
codereview_new_python_data_5631
def _stamp_headers(self, visitor_headers=None, **headers): else: headers["stamped_headers"] = stamped_headers for stamp in headers.keys(): - if stamp != "stamped_headers" and stamp not in headers["stamped_headers"]: headers["stamped_headers"].append(stamp) headers["stamped_headers"] = list(set(headers["stamped_headers"])) _merge_dictionaries(headers, self.options, aggregate_duplicates=False) ```suggestion if stamp == "stamped_headers": continue if stamp not in headers["stamped_headers"]: headers["stamped_headers"].append(stamp) ``` Should be easier to read def _stamp_headers(self, visitor_headers=None, **headers): else: headers["stamped_headers"] = stamped_headers for stamp in headers.keys(): + if stamp == "stamped_headers": + continue + if stamp not in headers["stamped_headers"]: headers["stamped_headers"].append(stamp) headers["stamped_headers"] = list(set(headers["stamped_headers"])) _merge_dictionaries(headers, self.options, aggregate_duplicates=False)
codereview_new_python_data_5632
def _stamp_headers(self, visitor_headers=None, **headers): else: headers["stamped_headers"] = stamped_headers for stamp in headers.keys(): - if stamp != "stamped_headers" and stamp not in headers["stamped_headers"]: headers["stamped_headers"].append(stamp) headers["stamped_headers"] = list(set(headers["stamped_headers"])) _merge_dictionaries(headers, self.options, aggregate_duplicates=False) Isn't it more efficient to initialize a set before the loop and avoid the de-duplication process in the loop and instead convert the set to a list after it has been initialized. ```python new_stamped_headers = {stamp for stamp in itertools.chain(headers, headers["stamped_headers"]) if stamp != "stamped_headers"} headers["stamped_headers"] = list(new_stamped_headers) ``` That way you don't need the O(n) lookup to check if the stamped header already exists in the stamped headers list and instead, you'll get an O(1) lookup since that is one of the set's properties. `itertools.chain` creates an iterator from multiple iterables. def _stamp_headers(self, visitor_headers=None, **headers): else: headers["stamped_headers"] = stamped_headers for stamp in headers.keys(): + if stamp == "stamped_headers": + continue + if stamp not in headers["stamped_headers"]: headers["stamped_headers"].append(stamp) headers["stamped_headers"] = list(set(headers["stamped_headers"])) _merge_dictionaries(headers, self.options, aggregate_duplicates=False)
codereview_new_python_data_5633
def on_replace(self, sig): assert headers['header'] == 'value' @pytest.mark.parametrize('sig_to_replace', [ group(signature('sig1'), signature('sig2')), chain(signature('sig1'), signature('sig2')), ]) ```suggestion group(signature(f'sig{i}') for i in range(2)), group([signature('sig1'), signature('sig2')]), group((signature('sig1'), signature('sig2'))), group(signature('sig1'), signature('sig2')), ``` def on_replace(self, sig): assert headers['header'] == 'value' @pytest.mark.parametrize('sig_to_replace', [ + group(signature(f'sig{i}') for i in range(2)), + group([signature('sig1'), signature('sig2')]), + group((signature('sig1'), signature('sig2'))), group(signature('sig1'), signature('sig2')), chain(signature('sig1'), signature('sig2')), ])
codereview_new_python_data_5634
def replace(self, sig): sig.options['stamped_headers'] = stamped_headers sig.options.update(stamps) links = sig.options['link'] if 'link' in sig.options else [] links.extend(sig.options['link_error'] if 'link_error' in sig.options else []) We should probably add comments here explaining what we're doing def replace(self, sig): sig.options['stamped_headers'] = stamped_headers sig.options.update(stamps) + # Collecting all of the links (callback/errback) to stamp them links = sig.options['link'] if 'link' in sig.options else [] links.extend(sig.options['link_error'] if 'link_error' in sig.options else [])
codereview_new_python_data_5635
def prepare_steps(self, args, kwargs, tasks, chain(group(signature1, signature2), signature3) --> Upgrades to chord([signature1, signature2], signature3) The responsibility of this method is to assure that the chain is - correctly unpacked, and that the correct callbacks are set up along the way. Arguments: args (Tuple): Partial args to be prepended to the existing args. ```suggestion correctly unpacked, and then the correct callbacks are set up along the way. ``` def prepare_steps(self, args, kwargs, tasks, chain(group(signature1, signature2), signature3) --> Upgrades to chord([signature1, signature2], signature3) The responsibility of this method is to assure that the chain is + correctly unpacked, and then the correct callbacks are set up along the way. Arguments: args (Tuple): Partial args to be prepended to the existing args.
codereview_new_python_data_5636
def test_disabling_flag_allow_error_cb_on_chord_header(self, manager, subtests): # Double check assert not redis_connection.exists(body_key), 'Chord body was called when it should have not' - with subtests.test(msg='Confirm there only one errback was called'): await_redis_echo((errback_msg,), redis_key=errback_key, timeout=10) with pytest.raises(TimeoutError): await_redis_echo((errback_msg,), redis_key=errback_key, timeout=10) Is this the right way to implement this assertion? def test_disabling_flag_allow_error_cb_on_chord_header(self, manager, subtests): # Double check assert not redis_connection.exists(body_key), 'Chord body was called when it should have not' + with subtests.test(msg='Confirm only one errback was called'): await_redis_echo((errback_msg,), redis_key=errback_key, timeout=10) with pytest.raises(TimeoutError): await_redis_echo((errback_msg,), redis_key=errback_key, timeout=10)
codereview_new_python_data_5637
MHcCAQEEIOj98rAhc4ToQkHby+Iegvhm3UBx+3TwpfNza+2Vn8d7oAoGCCqGSM49 AwEHoUQDQgAEhlUYUzS49td6FPnmzbKrdl3u0K83oYwakTb4pJmpO0M/lzvHbdC8 FgXqr9Pwws1YJIFPFoRGm+3xcv6Sw5ny9A== ------END EC PRIVATE KEY-----""" \ No newline at end of file ```suggestion -----END EC PRIVATE KEY-----""" ``` MHcCAQEEIOj98rAhc4ToQkHby+Iegvhm3UBx+3TwpfNza+2Vn8d7oAoGCCqGSM49 AwEHoUQDQgAEhlUYUzS49td6FPnmzbKrdl3u0K83oYwakTb4pJmpO0M/lzvHbdC8 FgXqr9Pwws1YJIFPFoRGm+3xcv6Sw5ny9A== \ No newline at end of file +-----END EC PRIVATE KEY-----"""
codereview_new_python_data_5638
def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep, def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs - return super().__reduce__(args, dict(kwargs, url=self.url)) def _find_path(self, url): if not url: ```suggestion return super().__reduce__(args, {**kwargs, 'url': self.url}) ``` With this change it works. Is this preferable or should I go with the first option? I usually prefer avoiding a string literal if possible. def __init__(self, url=None, open=open, unlink=os.unlink, sep=os.sep, def __reduce__(self, args=(), kwargs=None): kwargs = {} if not kwargs else kwargs + return super().__reduce__(args, {**kwargs, 'url': self.url}) def _find_path(self, url): if not url:
codereview_new_python_data_5639
def test_spectral_bisection(): pytest.importorskip("scipy") G = nx.barbell_graph(3, 0) C = nx.spectral_bisection(G) - assert C == [{0, 1, 2}, {3, 4, 5}] mapping = dict(enumerate("badfec")) G = nx.relabel_nodes(G, mapping) C = nx.spectral_bisection(G) - assert C == [ {mapping[0], mapping[1], mapping[2]}, {mapping[3], mapping[4], mapping[5]}, - ] def check_eigenvector(A, l, x): Does this need scipy or numpy? The function seems to use numpy. ```suggestion pytest.importorskip("numpy") ``` def test_spectral_bisection(): pytest.importorskip("scipy") G = nx.barbell_graph(3, 0) C = nx.spectral_bisection(G) + assert C == ({0, 1, 2}, {3, 4, 5}) mapping = dict(enumerate("badfec")) G = nx.relabel_nodes(G, mapping) C = nx.spectral_bisection(G) + assert C == ( {mapping[0], mapping[1], mapping[2]}, {mapping[3], mapping[4], mapping[5]}, + ) def check_eigenvector(A, l, x):
codereview_new_python_data_5640
def spectral_bisection( import numpy as np v = nx.fiedler_vector(G, weight=weight) - node_list = list(G) - x, y = np.where(v < 0)[0], np.where(v >= 0)[0] - - return [ - {node_list[i] for i in x}, - {node_list[i] for i in y}, - ] Maybe: ```suggestion nodes = np.array(list(G)) pos_vals = v >= 0 return [set(nodes[~pos_vals]), set(nodes[pos_vals])] ``` This should be more performant and is (at least I think) no less readable. def spectral_bisection( import numpy as np v = nx.fiedler_vector(G, weight=weight) + nodes = np.array(list(G)) + pos_vals = v >= 0 + + return [set(nodes[~pos_vals]), set(nodes[pos_vals])]
codereview_new_python_data_5641
def chordal_cycle_graph(p, create_using=None): def paley_graph(p, create_using=None): - """Returns the Paley $\\frac{(p-1)}{2}-$regular graph on $p$ nodes. The returned graph is a graph on $ \\mathbb{Z}/p\\mathbb{Z}$ with edges between $x$ and $y$ if and only if $x-y$ is a nonzero square in $\\mathbb{Z}/p\\mathbb{Z}$. ```suggestion The returned graph is a graph on $\\mathbb{Z}/p\\mathbb{Z}$ with edges between $x$ and $y$ ``` def chordal_cycle_graph(p, create_using=None): def paley_graph(p, create_using=None): + """Returns the Paley $\\frac{(p-1)}{2}$ -regular graph on $p$ nodes. The returned graph is a graph on $ \\mathbb{Z}/p\\mathbb{Z}$ with edges between $x$ and $y$ if and only if $x-y$ is a nonzero square in $\\mathbb{Z}/p\\mathbb{Z}$.
codereview_new_python_data_5642
def to_latex_raw( if not isinstance(pos, dict): pos = nx.get_node_attributes(G, pos) if not pos: - # circular layout with radius 1 - pos = {n: f"({round(360.0 * i / len(G), 3)}:10)" for i, n in enumerate(G)} for node in G: if node not in pos: raise nx.NetworkXError(f"node {node} has no specified pos {pos}") Thanks for this @dimpase! would it make sense to reduce the radius here? I tried this out and something like a `nx.path_graph(4)` compiles to a huge graph which doesn't fit on a single page. ```suggestion pos = {n: f"({round(360.0 * i / len(G), 3)}:{default_radius})" for i, n in enumerate(G)} ``` I don't the right `default_radius` here. def to_latex_raw( if not isinstance(pos, dict): pos = nx.get_node_attributes(G, pos) if not pos: + # circular layout with radius 2 + pos = {n: f"({round(360.0 * i / len(G), 3)}:2)" for i, n in enumerate(G)} for node in G: if node not in pos: raise nx.NetworkXError(f"node {node} has no specified pos {pos}")
codereview_new_python_data_5643
def _dict_product(d1, d2): return {k: (d1.get(k), d2.get(k)) for k in set(d1) | set(d2)} -# Generators for producing graph products def _node_product(G, H): for u, v in product(G, H): yield ((u, v), _dict_product(G.nodes[u], H.nodes[v])) Maybe this isn't a typo. We are indeed "producting" graph products. def _dict_product(d1, d2): return {k: (d1.get(k), d2.get(k)) for k in set(d1) | set(d2)} +# Generators for producting graph products def _node_product(G, H): for u, v in product(G, H): yield ((u, v), _dict_product(G.nodes[u], H.nodes[v]))
codereview_new_python_data_5644
def could_be_isomorphic(G1, G2): ----- Checks for matching degree, triangle, and number of cliques sequences. The triangle sequence contains the number of triangles each node is part of. - The clique sequence contains for each node the size of the maximal clique involving that node. """ ```suggestion The clique sequence contains for each node the number of maximal cliques involving that node. ``` Usually, a node will be part of more than one maximal clique. A maximal clique cannot add any nodes while remaining a clique. That is different from the maximum clique for a node which is the largest of all the maximal cliques. This function checks the number of maximal cliques in which this node participates. def could_be_isomorphic(G1, G2): ----- Checks for matching degree, triangle, and number of cliques sequences. The triangle sequence contains the number of triangles each node is part of. + The clique sequence contains for each node the number of maximal cliques involving that node. """
codereview_new_python_data_5645
def barbell_graph(m1, m2, create_using=None): Notes ----- - For $m1 > 2$ and $m2 >= 0$. Two identical complete graphs $K_{m1}$ form the left and right bells, and are connected by a path $P_{m2}$. ```suggestion ``` I think we can remove this bit, the information is captured in the Parameters section. def barbell_graph(m1, m2, create_using=None): Notes ----- Two identical complete graphs $K_{m1}$ form the left and right bells, and are connected by a path $P_{m2}$.
codereview_new_python_data_5646
def _empty_generator(): def _all_simple_paths_graph(G, source, targets, cutoff): - visited = dict.fromkeys([source], True) stack = [iter(G[source])] while stack: children = stack[-1] Would this be more readable? ```suggestion visited = {source: True} ``` def _empty_generator(): def _all_simple_paths_graph(G, source, targets, cutoff): + visited = {source: True} stack = [iter(G[source])] while stack: children = stack[-1]
codereview_new_python_data_5647
def is_simple_path(G, nodes): False """ - assert isinstance(nodes, list), "Object passed as `nodes` must be a list." - - # The empty list is not a valid path. Could also return - # NetworkXPointlessConcept here. - if not nodes: - return False - # check that all nodes in the list are in the graph, if at least one # is not in the graph, then this is not a simple path if not all(n in G for n in nodes): return False -1 on this assertion - the docstring is clear on the expected type, and assertions are really intended to check that scenarios that should not ever exist are not hit. def is_simple_path(G, nodes): False """ # check that all nodes in the list are in the graph, if at least one # is not in the graph, then this is not a simple path + # Also, the empty list is not a valid path. if not all(n in G for n in nodes): return False
codereview_new_python_data_5648
def is_simple_path(G, nodes): False """ - assert isinstance(nodes, list), "Object passed as `nodes` must be a list." - - # The empty list is not a valid path. Could also return - # NetworkXPointlessConcept here. - if not nodes: - return False - # check that all nodes in the list are in the graph, if at least one # is not in the graph, then this is not a simple path if not all(n in G for n in nodes): return False Similarly, I'm -1 on this too. It makes sense with the above, but not by itself. def is_simple_path(G, nodes): False """ # check that all nodes in the list are in the graph, if at least one # is not in the graph, then this is not a simple path + # Also, the empty list is not a valid path. if not all(n in G for n in nodes): return False
codereview_new_python_data_5649
def draw_networkx_edges( alpha : float or array of floats (default=None) The edge transparency. This can be a single alpha value, - in which case it will be applied to all the nodes of color. Otherwise, if it is an array, the elements of alpha will be applied to the colors in order (cycling through alpha multiple times if necessary). Since this is in `draw_networkx_edges`, shouldn't it specify the alpha value of the edges? In which case maybe wording should be something like: ```suggestion in which case it will be applied to all specified edges. Otherwise, ``` def draw_networkx_edges( alpha : float or array of floats (default=None) The edge transparency. This can be a single alpha value, + in which case it will be applied to all specified edges. Otherwise, if it is an array, the elements of alpha will be applied to the colors in order (cycling through alpha multiple times if necessary).
codereview_new_python_data_5650
def test_dls_labeled_edges_depth_1(self): edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1)) forward = [(u, v) for (u, v, d) in edges if d == "forward"] assert forward == [(5, 5), (5, 4), (5, 6)] - # Note: reverse-depth_limit edge types were not reported before gh-6239 assert edges == [ (5, 5, "forward"), (5, 4, "forward"), ```suggestion # Note: reverse-depth_limit edge types were not reported before gh-6240 ``` Just a tiny nit pick, #6239 is the issue, 6240 is where this behavior will be added. def test_dls_labeled_edges_depth_1(self): edges = list(nx.dfs_labeled_edges(self.G, source=5, depth_limit=1)) forward = [(u, v) for (u, v, d) in edges if d == "forward"] assert forward == [(5, 5), (5, 4), (5, 6)] + # Note: reverse-depth_limit edge types were not reported before gh-6240 assert edges == [ (5, 5, "forward"), (5, 4, "forward"),
codereview_new_python_data_5651
def test_empty_numpy(self): nx.eigenvector_centrality_numpy(nx.Graph()) def test_zero_nstart(self): - G = nx.Graph() - G.add_nodes_from([1, 2, 3]) - G.add_edges_from([(1, 2), (1, 3), (2, 3)]) - with pytest.raises(nx.NetworkXException): nx.eigenvector_centrality(G, nstart={v: 0 for v in G}) ```suggestion G = nx.Graph([(1, 2), (1, 3), (2, 3)]) ``` Just a more compact setup of the graph... Thanks for this coverage improvement. def test_empty_numpy(self): nx.eigenvector_centrality_numpy(nx.Graph()) def test_zero_nstart(self): + G = nx.Graph([(1, 2), (1, 3), (2, 3)]) + with pytest.raises( + nx.NetworkXException, match="initial vector cannot have all zero values" + ): nx.eigenvector_centrality(G, nstart={v: 0 for v in G})
codereview_new_python_data_5652
def test_path_projected_graph(self): P = bipartite.projected_graph(G, [0, 2]) assert nodes_equal(list(P), [0, 2]) assert edges_equal(list(P.edges()), [(0, 2)]) - with pytest.raises(nx.NetworkXError): - G = nx.MultiGraph() - G.add_edge(0, 1) bipartite.projected_graph(G, [0]) def test_path_projected_properties_graph(self): The best-practice here would be to only have the code that you expect to raise the warning within the context, the reason being that you can accidentally hit a NetworkXError from a branch you don't expect and would be none the wiser. Of course, using the `match=` kwarg helps with this as well, guaranteeing you're hitting the exact branch you think you're testing! ```suggestion G = nx.MultiGraph([(0, 1)]) with pytest.raises(nx.NetworkXError, match="not defined"): bipartite.projected_graph(G, [0]) ``` def test_path_projected_graph(self): P = bipartite.projected_graph(G, [0, 2]) assert nodes_equal(list(P), [0, 2]) assert edges_equal(list(P.edges()), [(0, 2)]) + G = nx.MultiGraph([(0, 1)]) + with pytest.raises(nx.NetworkXError, match="not defined for multigraphs"): bipartite.projected_graph(G, [0]) def test_path_projected_properties_graph(self):
codereview_new_python_data_5653
def test_path_weighted_projected_graph(self): def test_digraph_conversion(self): G = nx.DiGraph() - edges = [(0, 1, 1), (1, 2, 1), (2, 3, 1), (3, 4, 2)] - G.add_weighted_edges_from(edges) P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0} assert len(P.nodes()) == 2 Any reason to use weighted edges here? AFAICT edge weights in the original bipartite graph are not considered in the algorithm. The weights in the output graph are the result of the algorithm. That actually might be a bit confusing as the node attributes are copied over from the input to the projected graph, but the edge attributes are not (and if there are weighted edges, the meaning of the "weight" attribute changes between the input and output graphs). def test_path_weighted_projected_graph(self): def test_digraph_conversion(self): G = nx.DiGraph() + edges = [(0, 1), (1, 2), (2, 3), (3, 4)] + G.add_edges_from(edges) P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0} assert len(P.nodes()) == 2
codereview_new_python_data_5654
def test_path_weighted_projected_graph(self): assert edges_equal(list(P.edges()), [(0, 2)]) P[0][2]["weight"] = 1 - def test_digraph_conversion(self): G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0} Maybe a better test name would be: ??? ```suggestion def test_digraph_weighted_projection(self): ``` def test_path_weighted_projected_graph(self): assert edges_equal(list(P.edges()), [(0, 2)]) P[0][2]["weight"] = 1 + def test_digraph_weighted_projection(self): G = nx.DiGraph([(0, 1), (1, 2), (2, 3), (3, 4)]) P = bipartite.overlap_weighted_projected_graph(G, [1, 3]) assert nx.get_edge_attributes(P, "weight") == {(1, 3): 1.0}
codereview_new_python_data_5655
def test_is_frozen(self): assert G.frozen def test_node_attributes_are_still_mutable_on_frozen_graph(self): - G = nx.freeze(self.G) node = G.nodes[0] node["node_attribute"] = True assert node["node_attribute"] == True def test_edge_attributes_are_still_mutable_on_frozen_graph(self): - G = nx.freeze(self.G) edge = G.edges[(0, 1)] edge["edge_attribute"] = True assert edge["edge_attribute"] == True The tests look good, but let's use a fresh graph instance instead of the class instance to prevent potential crosstalk with other tests, e.g.: ```suggestion def test_node_attributes_are_still_mutable_on_frozen_graph(self): G = nx.freeze(nx.path_graph(3)) G.nodes[0]["node_attribute"] = True assert node["node_attribute"] == True ``` The same for the edge attr test as well. def test_is_frozen(self): assert G.frozen def test_node_attributes_are_still_mutable_on_frozen_graph(self): + G = nx.freeze(nx.path_graph(3)) node = G.nodes[0] node["node_attribute"] = True assert node["node_attribute"] == True def test_edge_attributes_are_still_mutable_on_frozen_graph(self): + G = nx.freeze(nx.path_graph(3)) edge = G.edges[(0, 1)] edge["edge_attribute"] = True assert edge["edge_attribute"] == True
codereview_new_python_data_5656
def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0): normalized : bool If True (default) normalize by the embededness of the nodes (u and v). alpha, b, c : float - If normalized is True (default), try out different values of alpha, b and, c to obtain maximum - performance of normalized dispersion. Returns ------- In this case I think the best thing to do would be to explicitly describe how the parameters are used in the embeddedness normalization. def dispersion(G, u=None, v=None, normalized=True, alpha=1.0, b=0.0, c=0.0): normalized : bool If True (default) normalize by the embededness of the nodes (u and v). alpha, b, c : float + Parameters for the normalization procedure. When `normalized` is True, + the dispersion value is normalized by:: + + result = ((dispersion + b) ** alpha) / (embeddedness + c) + + as long as the denominator is nonzero. Returns -------
codereview_new_python_data_5657
def test_edge_cases_directed_edge_swap(): graph = nx.path_graph(4, create_using=nx.DiGraph) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) - graph = nx.DiGraph() - edges = [(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)] - graph.add_edges_from(edges) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=2, max_tries=20, seed=1) Generally looks good - let's add a `match=` to the `raises` context managers to ensure we are indeed catching the exceptions we expect for these cases. def test_edge_cases_directed_edge_swap(): graph = nx.path_graph(4, create_using=nx.DiGraph) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) + graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) with pytest.raises(nx.NetworkXAlgorithmError): nx.directed_edge_swap(graph, nswap=2, max_tries=20, seed=1)
codereview_new_python_data_5658
def test_directed_edge_swap(): def test_edge_cases_directed_edge_swap(): e = ( - f"Maximum number of swap attempts \\(11\\) exceeded " - f"before desired swaps achieved \\(4\\)." ) - graph = nx.path_graph(4, create_using=nx.DiGraph) with pytest.raises(nx.NetworkXAlgorithmError, match=e): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) with pytest.raises(nx.NetworkXAlgorithmError, match=e): - nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) def test_double_edge_swap(): IIUC, the exception raised here is actually a bit misleading as the edge swapping is impossible for this case. We can still add the test for the sake of coverage, but let's also add a comment explaining it a bit. In principle, someone could (in a perfect world) adjust the `directed_edge_swap` function to detect when an edge swap isn't possible, and report a better error. If/when this happens, we'd want to be able to identify this test case and know that it's safe to delete! def test_directed_edge_swap(): def test_edge_cases_directed_edge_swap(): + # Tests cases when swaps are impossible, either too few edges exist, or self loops/cycles are unavoidable + # TODO: Rewrite function to explicitly check for impossible swaps and raise error e = ( + "Maximum number of swap attempts \\(11\\) exceeded " + "before desired swaps achieved \\(\\d\\)." ) + graph = nx.DiGraph([(0, 1), (2, 3)]) with pytest.raises(nx.NetworkXAlgorithmError, match=e): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) with pytest.raises(nx.NetworkXAlgorithmError, match=e): + nx.directed_edge_swap(graph, nswap=1, max_tries=10, seed=1) def test_double_edge_swap():
codereview_new_python_data_5659
def test_directed_edge_swap(): def test_edge_cases_directed_edge_swap(): e = ( - f"Maximum number of swap attempts \\(11\\) exceeded " - f"before desired swaps achieved \\(4\\)." ) - graph = nx.path_graph(4, create_using=nx.DiGraph) with pytest.raises(nx.NetworkXAlgorithmError, match=e): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) with pytest.raises(nx.NetworkXAlgorithmError, match=e): - nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) def test_double_edge_swap(): Thanks @Mjh9122 - just one last note: no need to use f-strings here since there is no formatted insertion going on. def test_directed_edge_swap(): def test_edge_cases_directed_edge_swap(): + # Tests cases when swaps are impossible, either too few edges exist, or self loops/cycles are unavoidable + # TODO: Rewrite function to explicitly check for impossible swaps and raise error e = ( + "Maximum number of swap attempts \\(11\\) exceeded " + "before desired swaps achieved \\(\\d\\)." ) + graph = nx.DiGraph([(0, 1), (2, 3)]) with pytest.raises(nx.NetworkXAlgorithmError, match=e): nx.directed_edge_swap(graph, nswap=4, max_tries=10, seed=1) graph = nx.DiGraph([(0, 0), (0, 1), (1, 0), (2, 3), (3, 2)]) with pytest.raises(nx.NetworkXAlgorithmError, match=e): + nx.directed_edge_swap(graph, nswap=1, max_tries=10, seed=1) def test_double_edge_swap():
codereview_new_python_data_5660
def laplacian_spectrum(G, weight="weight"): Examples -------- - The multiplicity of O as an eigenvalue of the laplacian matrix is equal to the number of connected components of G. >>> import numpy as np IMO it'd make the example more clear if the reader knew what the graph was (I doubt many have the graph atlas memorized :joy: ): ```suggestion >>> G = nx.Graph() # Create a graph with 5 nodes and 3 connected components >>> G.add_nodes_from(range(5)) >>> G.add_edges_from([(0, 2), (3, 4)]) ``` def laplacian_spectrum(G, weight="weight"): Examples -------- + The multiplicity of 0 as an eigenvalue of the laplacian matrix is equal to the number of connected components of G. >>> import numpy as np
codereview_new_python_data_5661
def laplacian_matrix(G, nodelist=None, weight="weight"): to a block diagonal matrix where each block is the respective Laplacian matrix for each component. - >>> G = nx.graph_atlas(26) #This graph from the Graph Atlas has 2 connected components. - >>> print(nx.laplacian_matrix(G).todense()) - [[ 1 -1 0 0 0] - [-1 2 -1 0 0] - [ 0 -1 1 0 0] - [ 0 0 0 1 -1] - [ 0 0 0 -1 1]] """ import scipy as sp Similar comment here to the other PR - let's use explicit examples for the input Graph def laplacian_matrix(G, nodelist=None, weight="weight"): to a block diagonal matrix where each block is the respective Laplacian matrix for each component. + >>> G = nx.Graph([(1,2), (2, 3), (4, 5)]) + >>> nx.laplacian_matrix(G).toarray() + array([[ 1, -1, 0, 0, 0], + [-1, 2, -1, 0, 0], + [ 0, -1, 1, 0, 0], + [ 0, 0, 0, 1, -1], + [ 0, 0, 0, -1, 1]]) """ import scipy as sp
codereview_new_python_data_5662
def fiedler_vector( Given a connected graph the signs of the values in the Fiedler vector can be used to partition the graph into two components. - >>> G = nx.cycle_graph(4) - >>> print(nx.fiedler_vector(G, normalized=True, seed = 1)) - [-0.69141345 -0.1481467 0.69141344 0.14814671] - - The connected components are {0,1} and {2,3}. """ import numpy as np ```suggestion >>> G = nx.barbell_graph(5, 0) >>> nx.fiedler_vector(G, normalized=True, seed=1) array([-0.32864129, -0.32864129, -0.32864129, -0.32864129, -0.26072899, 0.26072899, 0.32864129, 0.32864129, 0.32864129, 0.32864129]) >>> nx.fiedler_vector(G, normalized=True, seed=1) array([-0.32864129, -0.32864129, -0.32864129, -0.32864129, -0.26072899, 0.26072899, 0.32864129, 0.32864129, 0.32864129, 0.32864129]) The connected components are the two 5-node cliques of the barbell graph. ``` def fiedler_vector( Given a connected graph the signs of the values in the Fiedler vector can be used to partition the graph into two components. + >>> G = nx.barbell_graph(5, 0) + >>> nx.fiedler_vector(G, normalized=True, seed=1) + array([-0.32864129, -0.32864129, -0.32864129, -0.32864129, -0.26072899, + 0.26072899, 0.32864129, 0.32864129, 0.32864129, 0.32864129]) + >>> nx.fiedler_vector(G, normalized=True, seed=1) + array([-0.32864129, -0.32864129, -0.32864129, -0.32864129, -0.26072899, + 0.26072899, 0.32864129, 0.32864129, 0.32864129, 0.32864129]) + + The connected components are the two 5-node cliques of the barbell graph. """ import numpy as np
codereview_new_python_data_5663
def algebraic_connectivity( Examples -------- For undirected graphs algebraic connectivity can tell us if a graph is connected or not - G is connected iff :math: `algebraic\_connectivity(G)>0` >>> #if G is a complete grpah then G is connected >>> G = nx.complete_graph(5) Just a nit here but I think this is more readable (in the docstring at least) as an inline literal rather than trying to format it as math: ```suggestion `G` is connected iff ``algebraic_connectivity(G) > 0``: ``` def algebraic_connectivity( Examples -------- For undirected graphs algebraic connectivity can tell us if a graph is connected or not + `G` is connected iff ``algebraic_connectivity(G) > 0``: >>> #if G is a complete grpah then G is connected >>> G = nx.complete_graph(5)
codereview_new_python_data_5664
def test_omega(): for o in omegas: assert -1 <= o <= 1 - -def test_graph_no_edges(): G = nx.Graph() G.add_nodes_from([0, 1, 2, 3]) - pytest.raises(nx.NetworkXError, nx.random_reference, G) - pytest.raises(nx.NetworkXError, nx.lattice_reference, G) Test looks good, just a few minor suggestions to make it even better! ```suggestion @pytest.mark.parametrize("f", (nx.random_reference, nx.lattice_reference)) def test_graph_no_edges(f): G = nx.Graph() G.add_nodes_from([0, 1, 2, 3]) with pytest.raises(nx.NetworkXError, match="Graph has less than 2 edges"): f(G) ``` def test_omega(): for o in omegas: assert -1 <= o <= 1 +@pytest.mark.parametrize("f", (nx.random_reference, nx.lattice_reference)) +def test_graph_no_edges(f): G = nx.Graph() G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match="Graph has less than 2 edges"): + f(G)
codereview_new_python_data_5665
def double_edge_swap(G, nswap=1, max_tries=100, seed=None): ------ NetworkXError If `G` is directed, or - If nswap > max_tries, or If there are fewer than 4 nodes or 2 edges in `G`. NetworkXAlgorithmError If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made ```suggestion If `nswap` > `max_tries`, or ``` def double_edge_swap(G, nswap=1, max_tries=100, seed=None): ------ NetworkXError If `G` is directed, or + If `nswap` > `max_tries`, or If there are fewer than 4 nodes or 2 edges in `G`. NetworkXAlgorithmError If the number of swap attempts exceeds `max_tries` before `nswap` swaps are made
codereview_new_python_data_5666
def test_degree_seq_c4(): assert degrees == sorted(d for n, d in G.degree()) -def test_no_edges(): G = nx.DiGraph() G.add_nodes_from([0, 1, 2]) - pytest.raises(nx.NetworkXError, nx.directed_edge_swap, G) - G = nx.Graph() - G.add_nodes_from([0, 1, 2, 3]) - pytest.raises(nx.NetworkXError, nx.double_edge_swap, G) def test_less_than_3_edges(): - G = nx.DiGraph() - G.add_edges_from([(0, 1), (1, 2)]) - pytest.raises(nx.NetworkXError, nx.directed_edge_swap, G) ```suggestion G = nx.DiGraph([(0, 1), (1, 2)]) with pytest.raises(nx.NetworkXError, match=".*fewer than 3 edges"): nx.directed_edge_swap(G) ``` Note that the below wording for `match=` is predicated on the assumption that we update 'less' -> 'fewer' in the exception message. def test_degree_seq_c4(): assert degrees == sorted(d for n, d in G.degree()) +def test_fewer_than_4_nodes(): G = nx.DiGraph() G.add_nodes_from([0, 1, 2]) + with pytest.raises(nx.NetworkXError, match=".*fewer than four nodes."): + nx.directed_edge_swap(G) def test_less_than_3_edges(): + G = nx.DiGraph([(0, 1), (1, 2)]) + G.add_nodes_from([3, 4]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 3 edges"): + nx.directed_edge_swap(G) + + G = nx.Graph() + G.add_nodes_from([0, 1, 2, 3]) + with pytest.raises(nx.NetworkXError, match=".*fewer than 2 edges"): + nx.double_edge_swap(G)
codereview_new_python_data_5667
def eulerian_path(G, source=None, keys=False): def eulerize(G): """Transforms a graph into an Eulerian graph. - If `G` is Eulerian the result is `G`, otherwise the result is a smallest (in terms of the number of edges) multigraph whose underlying simple graph is `G`. Parameters Should this perhaps mention that the result is always a multigraph, i.e. something like "If `G` is Eulerian the result is `G` as a MultiGraph..."? def eulerian_path(G, source=None, keys=False): def eulerize(G): """Transforms a graph into an Eulerian graph. + If `G` is Eulerian the result is `G` as a MultiGraph, otherwise the result is a smallest (in terms of the number of edges) multigraph whose underlying simple graph is `G`. Parameters
codereview_new_python_data_5668
def test_find_cliques_trivial(self): assert sorted(nx.find_cliques(G)) == [] assert sorted(nx.find_cliques_recursive(G)) == [] - def test_make_max_clique_graph_2(self): - "Test the create_using parameter" G = nx.Graph() G.add_edges_from([(1, 2), (3, 1), (4, 1), (5, 6)]) assert sorted(nx.make_max_clique_graph(G, nx.Graph)) == [0, 1, 2, 3] It's a best-practice to have informative test names, as that makes it easier to parse if/when something fails. ```suggestion def test_make_max_clique_graph_create_using(self): ``` def test_find_cliques_trivial(self): assert sorted(nx.find_cliques(G)) == [] assert sorted(nx.find_cliques_recursive(G)) == [] + def test_make_max_clique_graph_create_using(self): G = nx.Graph() G.add_edges_from([(1, 2), (3, 1), (4, 1), (5, 6)]) assert sorted(nx.make_max_clique_graph(G, nx.Graph)) == [0, 1, 2, 3]
codereview_new_python_data_5669
def test_find_cliques_trivial(self): assert sorted(nx.find_cliques(G)) == [] assert sorted(nx.find_cliques_recursive(G)) == [] - def test_make_max_clique_graph_2(self): - "Test the create_using parameter" G = nx.Graph() G.add_edges_from([(1, 2), (3, 1), (4, 1), (5, 6)]) assert sorted(nx.make_max_clique_graph(G, nx.Graph)) == [0, 1, 2, 3] Just a nit, but might as well simplify this and put it all on one line! ```suggestion G = nx.Graph([(1, 2), (3, 1), (4, 1), (5, 6)]) ``` def test_find_cliques_trivial(self): assert sorted(nx.find_cliques(G)) == [] assert sorted(nx.find_cliques_recursive(G)) == [] + def test_make_max_clique_graph_create_using(self): G = nx.Graph() G.add_edges_from([(1, 2), (3, 1), (4, 1), (5, 6)]) assert sorted(nx.make_max_clique_graph(G, nx.Graph)) == [0, 1, 2, 3]
codereview_new_python_data_5670
def test_find_cliques_trivial(self): assert sorted(nx.find_cliques(G)) == [] assert sorted(nx.find_cliques_recursive(G)) == [] - def test_make_max_clique_graph_2(self): - "Test the create_using parameter" G = nx.Graph() G.add_edges_from([(1, 2), (3, 1), (4, 1), (5, 6)]) assert sorted(nx.make_max_clique_graph(G, nx.Graph)) == [0, 1, 2, 3] This check isn't very specific - it basically just checks that there are 4 nodes in the max_clique_graph, but doesn't test anything else about it. What do we expect the output graph of `max_clique_graph` to look like in this case? def test_find_cliques_trivial(self): assert sorted(nx.find_cliques(G)) == [] assert sorted(nx.find_cliques_recursive(G)) == [] + def test_make_max_clique_graph_create_using(self): G = nx.Graph() G.add_edges_from([(1, 2), (3, 1), (4, 1), (5, 6)]) assert sorted(nx.make_max_clique_graph(G, nx.Graph)) == [0, 1, 2, 3]
codereview_new_python_data_5671
def all_pairs_node_connectivity(G, nbunch=None, cutoff=None): Examples -------- - >>> # A 3 node cycle with one extra node attached has connectivity 2 between all - >>> # nodes in the cycle and connectivity 1 between the extra node and the rest >>> G = nx.cycle_graph(3) >>> G.add_edge(2, 3) >>> nx.all_pairs_node_connectivity(G) Just a quick note: you can actually include text normally in the `Examples` section, as long as there is a blank line between "normal" text and the executable examples: ```suggestion A 3 node cycle with one extra node attached has connectivity 2 between all nodes in the cycle and connectivity 1 between the extra node and the rest: ``` def all_pairs_node_connectivity(G, nbunch=None, cutoff=None): Examples -------- + A 3 node cycle with one extra node attached has connectivity 2 between all + nodes in the cycle and connectivity 1 between the extra node and the rest: + >>> G = nx.cycle_graph(3) >>> G.add_edge(2, 3) >>> nx.all_pairs_node_connectivity(G)
codereview_new_python_data_5672
def test_bidirectional_shortest_path(self): @pytest.mark.parametrize( ("src", "tgt"), ( - (8, 3), - (3, 8), - (8, 10), - (8, 8), ), ) def test_bidirectional_shortest_path_src_tgt_not_in_graph(self, src, tgt): ```suggestion (8, 3), # source not in graph (3, 8), # target not in graph (8, 10), # neither source nor target in graph (8, 8), # src == tgt, neither in graph - tests order of input checks ``` def test_bidirectional_shortest_path(self): @pytest.mark.parametrize( ("src", "tgt"), ( + (8, 3), # source not in graph + (3, 8), # target not in graph + (8, 10), # neither source nor target in graph + (8, 8), # src == tgt, neither in graph - tests order of input checks ), ) def test_bidirectional_shortest_path_src_tgt_not_in_graph(self, src, tgt):
codereview_new_python_data_5673
G = nx.path_graph(20) # An example graph center_node = 5 # Or any other node to be in the center edge_nodes = set(G) - {center_node} -pos = nx.circular_layout( - G.subgraph(edge_nodes) -) # Ensures the nodes around the circle are evenly distributed pos[center_node] = np.array([0, 0]) # Or off-center - whatever the user needs nx.draw(G, pos, with_labels=True) Again, just a nit - the original comment made more sense in the context of the PR discussion; this might read a bit better. NBD either way! ```suggestion pos[center_node] = np.array([0, 0]) # manually specify node position ``` G = nx.path_graph(20) # An example graph center_node = 5 # Or any other node to be in the center edge_nodes = set(G) - {center_node} +# Ensures the nodes around the circle are evenly distributed +pos = nx.circular_layout(G.subgraph(edge_nodes)) pos[center_node] = np.array([0, 0]) # Or off-center - whatever the user needs nx.draw(G, pos, with_labels=True)
codereview_new_python_data_5674
def test_add_edge(self): assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} G = self.Graph() with pytest.raises(ValueError): - G.add_edges(None, "anything") def test_add_edge_conflicting_key(self): G = self.Graph() There is a typo here, it should be `add_edge` not `add_edges` :) def test_add_edge(self): assert G.adj == {0: {1: {0: {}}}, 1: {0: {0: {}}}} G = self.Graph() with pytest.raises(ValueError): + G.add_edge(None, "anything") def test_add_edge_conflicting_key(self): G = self.Graph()
codereview_new_python_data_5675
preflow_push, shortest_augmenting_path, ) - -flow_funcs_without_cutoff = { - preflow_push, -} flow_funcs = { boykov_kolmogorov, can this be imported from `maxflow.py` so that there is a single source of truth? preflow_push, shortest_augmenting_path, ) +from networkx.algorithms.flow.maxflow import flow_funcs_without_cutoff flow_funcs = { boykov_kolmogorov,