id
stringlengths
30
32
content
stringlengths
139
2.8k
codereview_new_python_data_7262
# Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). import functools import sys import types -# Copied from https://github.com/python/cpython/blob/951303fd855838d47765dcd05471e14311dc9fdd/Lib/inspect.py def get_annotations(obj, *, globals=None, locals=None, eval_str=False): """Compute the annotations dict for an object. obj may be a callable, class, or module. Passing in an object of any other type raises TypeError. Returns a dict. get_annotations() returns a Hmm this feels a bit naughty... # Copyright 2023 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). +# +# Copyright exception for `get_annotations()` which is in the public domain. No warranties. +# Copied from https://github.com/python/cpython/blob/951303fd855838d47765dcd05471e14311dc9fdd/Lib/inspect.py + import functools import sys import types def get_annotations(obj, *, globals=None, locals=None, eval_str=False): """Compute the annotations dict for an object. obj may be a callable, class, or module. Passing in an object of any other type raises TypeError. Returns a dict. get_annotations() returns a
codereview_new_python_data_7263
-# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md). -# Licensed under the Apache License, Version 2.0 (see LICENSE). - -"""Linter & formatter for Python. - -See https://www.pantsbuild.org/docs/python-linters-and-formatters and https://github.com/charliermarsh/ruff -""" - -from pants.backend.python.lint.ruff import rules as ruff_rules -from pants.backend.python.lint.ruff import skip_field, subsystem - - -def rules(): - return (*ruff_rules.rules(), *skip_field.rules(), *subsystem.rules()) I think we should remove this file, so the only backend is the experimental one
codereview_new_python_data_7264
class OptionHelpInfo: removal_version: If deprecated: The version at which this option is to be removed. removal_hint: If deprecated: The removal hint message registered for this option. choices: If this option has a constrained set of choices, a tuple of the stringified choices. - fromfile: Whether the option value should be loaded from a file specified with the `@filepath`. """ display_args: tuple[str, ...] I think the file source is optional, indicated with the leading `@`.. (i.e. "could be loaded" rather than "should") class OptionHelpInfo: removal_version: If deprecated: The version at which this option is to be removed. removal_hint: If deprecated: The removal hint message registered for this option. choices: If this option has a constrained set of choices, a tuple of the stringified choices. + fromfile: Whether the option value will be loaded from a file specified with the `@filepath`. """ display_args: tuple[str, ...]
codereview_new_python_data_7265
async def map_module_to_address( # the one with the closest common ancestor to the requester. for val in type_to_closest_providers.values(): providers = val[1] providers_with_closest_common_ancestor: list[ModuleProvider] = [] closest_common_ancestor_len = 0 for provider in providers: This doesn't actually check the "if we have more than one provider left" condition, but probably should to avoid the `for provider in providers` loop entirely? async def map_module_to_address( # the one with the closest common ancestor to the requester. for val in type_to_closest_providers.values(): providers = val[1] + if len(providers) < 2: + continue providers_with_closest_common_ancestor: list[ModuleProvider] = [] closest_common_ancestor_len = 0 for provider in providers:
codereview_new_python_data_7266
class ShellCommandWorkdirField(StringField): help = softwrap( "Sets the current working directory of the command, relative to the project root. If not " "set, use the project root.\n\n" - "The following special values are supported:\n\n" - "* `.`: the location of the `BUILD` file that defines this target.\n" - "* `/`: the project root.\n" - "\n" - "Directories may be specified relative to either of these special values." ) I'm not sure if the relative terminology here is helpful or confusing together with the `/` as a "special value". How about describing it as the path is rooted at the project root and that relative paths are relative to the location of the BULD file, and that paths are implicitly rooted unless starting with a `.`? class ShellCommandWorkdirField(StringField): help = softwrap( "Sets the current working directory of the command, relative to the project root. If not " "set, use the project root.\n\n" + "To specify the location of the `BUILD` file, use `.`. Values beginning with `.` are " + "relative to the location of the `BUILD` file.\n\n" + "To specify the project/build root, use `/` or the empty string." )
codereview_new_python_data_7267
class PythonDistributionEntryPointsField(NestedDictStringToStringField, AsyncFie class PythonDistributionOutputPathField(StringField, AsyncFieldMixin): help = softwrap( """ - The path to the output distribution, relative the dist directtory. - If undefined, this default to the empty path, i.e. the output goes at the top - level of dist/ """ ) alias = "output_path" - - def value_or_default(self) -> str: - return self.value or "" @dataclass(frozen=True) The distdir is [configurable](https://github.com/pantsbuild/pants), so it's best not to hardcode its default path here. Unfortunately there is no way of getting the actual value here, so I recommend saying "the top level of the dist dir". Also, above, you have a typo: directtory should be directory. class PythonDistributionEntryPointsField(NestedDictStringToStringField, AsyncFie class PythonDistributionOutputPathField(StringField, AsyncFieldMixin): help = softwrap( """ + The path to the output distribution, relative the dist directory. + If undefined, this defaults to the empty path, i.e. the output goes at the top + level of the dist dir. """ ) alias = "output_path" + default = "" @dataclass(frozen=True)
codereview_new_python_data_7268
class PythonDistributionEntryPointsField(NestedDictStringToStringField, AsyncFie class PythonDistributionOutputPathField(StringField, AsyncFieldMixin): help = softwrap( """ - The path to the output distribution, relative the dist directtory. - If undefined, this default to the empty path, i.e. the output goes at the top - level of dist/ """ ) alias = "output_path" - - def value_or_default(self) -> str: - return self.value or "" @dataclass(frozen=True) I believe that if you set `default = ""` on the class then `self.value` will do the right thing, and you won't need this function. class PythonDistributionEntryPointsField(NestedDictStringToStringField, AsyncFie class PythonDistributionOutputPathField(StringField, AsyncFieldMixin): help = softwrap( """ + The path to the output distribution, relative the dist directory. + If undefined, this defaults to the empty path, i.e. the output goes at the top + level of the dist dir. """ ) alias = "output_path" + default = "" @dataclass(frozen=True)
codereview_new_python_data_7269
class DistBuildRequest: input: Digest working_directory: str # Relpath within the input digest. build_time_source_roots: tuple[str, ...] # Source roots for 1st party build-time deps. - output_path: str # Location of the output file within dist dir. target_address_spec: str | None = None # Only needed for logging etc. wheel_config_settings: FrozenDict[str, tuple[str, ...]] | None = None Clarify in the comment that this is the location of the output *directory* within dist dir. class DistBuildRequest: input: Digest working_directory: str # Relpath within the input digest. build_time_source_roots: tuple[str, ...] # Source roots for 1st party build-time deps. + output_path: str # Location of the output directory within dist dir. target_address_spec: str | None = None # Only needed for logging etc. wheel_config_settings: FrozenDict[str, tuple[str, ...]] | None = None
codereview_new_python_data_7270
class PythonDistributionEntryPointsField(NestedDictStringToStringField, AsyncFie class PythonDistributionOutputPathField(StringField, AsyncFieldMixin): help = softwrap( """ - The path to the output distribution, relative the dist directory. If undefined, this defaults to the empty path, i.e. the output goes at the top level of the dist dir. ```suggestion The path to the directory to write the distribution file to, relative the dist directory. ``` class PythonDistributionEntryPointsField(NestedDictStringToStringField, AsyncFie class PythonDistributionOutputPathField(StringField, AsyncFieldMixin): help = softwrap( """ + The path to the directory to write the distribution file to, relative the dist directory. If undefined, this defaults to the empty path, i.e. the output goes at the top level of the dist dir.
codereview_new_python_data_7271
async def run_pep517_build(request: DistBuildRequest, python_setup: PythonSetup) ), ) - # This is the setuptools dist directory, not Pants's, so we harcode to dist/. dist_dir = "dist" backend_shim_name = "backend_shim.py" backend_shim_path = os.path.join(request.working_directory, backend_shim_name) ```suggestion # This is the setuptools dist directory, not Pants's, so we hardcode to dist/. ``` async def run_pep517_build(request: DistBuildRequest, python_setup: PythonSetup) ), ) + # This is the setuptools dist directory, not Pants's, so we hardcode to dist/. dist_dir = "dist" backend_shim_name = "backend_shim.py" backend_shim_path = os.path.join(request.working_directory, backend_shim_name)
codereview_new_python_data_7272
class YamlSourceField(SingleSourceField): class YamlSourcesGeneratingSourcesField(MultipleSourcesField): uses_source_roots = False - default = ("*.yaml",) help = generate_multiple_sources_field_help_message( - "Example: `sources=['example.yaml', 'examples_*.yaml', '!ignore_me.yaml']`" ) maybe include `*.yml` in the defaults as well? class YamlSourceField(SingleSourceField): class YamlSourcesGeneratingSourcesField(MultipleSourcesField): uses_source_roots = False + default = ("*.yaml", "*.yml",) help = generate_multiple_sources_field_help_message( + "Example: `sources=['example.yaml', 'example.yml', 'examples_*.yaml', '!ignore_me.yaml']`" )
codereview_new_python_data_7273
class YamlSourceField(SingleSourceField): - expected_file_extensions = (".yaml",) uses_source_roots = False ```suggestion expected_file_extensions = (".yaml", ".yml") ``` class YamlSourceField(SingleSourceField): + expected_file_extensions = (".yaml", ".yml") uses_source_roots = False
codereview_new_python_data_7274
def find_nearest_ancestor_file(files: set[str], dir: str, config_file: str) -> s dir = os.path.dirname(dir) @rule async def gather_config_files( request: YamllintConfigFilesRequest, yamllint: Yamllint You stole this from `scalafmt` :wink: Please move it to a utility and both can use it. def find_nearest_ancestor_file(files: set[str], dir: str, config_file: str) -> s dir = os.path.dirname(dir) +# @TODO: This logic is very similar, but not identical to the one for scalafmt. It should be generalized and shared. @rule async def gather_config_files( request: YamllintConfigFilesRequest, yamllint: Yamllint
codereview_new_python_data_7275
def opt_out(cls, tgt: Target) -> bool: class Shunit2TestRequest(TestRequest): tool_subsystem = Shunit2 field_set_type = Shunit2FieldSet - supports_debug = False @dataclass(frozen=True) ```suggestion supports_debug = True ``` def opt_out(cls, tgt: Target) -> bool: class Shunit2TestRequest(TestRequest): tool_subsystem = Shunit2 field_set_type = Shunit2FieldSet + supports_debug = True @dataclass(frozen=True)
codereview_new_python_data_7276
See https://www.pantsbuild.org/docs/python-backend. """ -from pants.backend.codegen import export_codegen_goal from pants.backend.python import target_types_rules from pants.backend.python.dependency_inference import rules as dependency_inference_rules from pants.backend.python.goals import ( whops, dangling import.. See https://www.pantsbuild.org/docs/python-backend. """ from pants.backend.python import target_types_rules from pants.backend.python.dependency_inference import rules as dependency_inference_rules from pants.backend.python.goals import (
codereview_new_python_data_7277
class ShellCommandOutputFilesField(StringSequenceField): class ShellCommandOutputDirectoriesField(StringSequenceField): alias = "output_directories" help = softwrap( """ - Specify full directories of output from the shell command to capture. For files, use `output_files`. At least one of `output_files` and `output_directories` must be specified. We should also mention all descendants are captured (just in case there was any confusion) class ShellCommandOutputFilesField(StringSequenceField): class ShellCommandOutputDirectoriesField(StringSequenceField): alias = "output_directories" + required = False + default = () help = softwrap( """ + Specify full directories (including recursive descendants) of output to capture from the + shell command. For files, use `output_files`. At least one of `output_files` and `output_directories` must be specified.
codereview_new_python_data_7278
async def _prepare_process_request_from_target(shell_command: Target) -> ShellCo command = shell_command[ShellCommandCommandField].value if not command: - raise ValueError(f"Missing `command` line in `{description}.") # Prepare `input_digest`: Currently uses transitive targets per old behaviour, but # this will probably change soon, per #17345. Small typo: ```suggestion raise ValueError(f"Missing `command` line in {description}.") ``` async def _prepare_process_request_from_target(shell_command: Target) -> ShellCo command = shell_command[ShellCommandCommandField].value if not command: + raise ValueError(f"Missing `command` line in {description}.") # Prepare `input_digest`: Currently uses transitive targets per old behaviour, but # this will probably change soon, per #17345.
codereview_new_python_data_7279
class Get(Generic[_Output], Awaitable[_Output]): The long form supports providing type information to the rule engine that it could not otherwise infer from the input variable [1]. Likewise, the short form must use inline construction of the - input in order to convey the input type to the engine. The dict form supports providing more than - one input to the engine for the Get request. [1] The engine needs to determine all rule and Get input and output types statically before executing any rules. Since Gets are declared inside function bodies, the only way to extract this Actually, I've recently found out, that this dict syntax also allows `Get` requests without inputs, `Get(BashBinary, {})` so we could get rid of "wrapper rules" like these for instance: https://github.com/pantsbuild/pants/blob/9fc72ca66c77866a9e346cb3a188f2e91dbb1b4b/src/python/pants/core/util_rules/system_binaries.py#L882-L894 Maybe would be worth mentioning here at some point.. (not planning on spending the CI cycle to tweak it now) class Get(Generic[_Output], Awaitable[_Output]): The long form supports providing type information to the rule engine that it could not otherwise infer from the input variable [1]. Likewise, the short form must use inline construction of the + input in order to convey the input type to the engine. The dict form supports providing zero or + more inputs to the engine for the Get request. [1] The engine needs to determine all rule and Get input and output types statically before executing any rules. Since Gets are declared inside function bodies, the only way to extract this
codereview_new_python_data_7280
class ShellRunInSandboxTarget(Target): ShellCommandExecutionDependenciesField, ShellCommandOutputDependenciesField, ShellCommandLogOutputField, - ShellCommandOutputsField, RunInSandboxSourcesField, ShellCommandTimeoutField, ShellCommandToolsField, Should this be using the new style from #17744? ```suggestion ShellCommandOutputFilesField, ShellCommandOutputDirectoriesField, ``` class ShellRunInSandboxTarget(Target): ShellCommandExecutionDependenciesField, ShellCommandOutputDependenciesField, ShellCommandLogOutputField, + ShellCommandOutputFilesField, + ShellCommandOutputDirectoriesField, RunInSandboxSourcesField, ShellCommandTimeoutField, ShellCommandToolsField,
codereview_new_python_data_7281
async def run_in_sandbox_request( runnable_address_str = shell_command[RunInSandboxRunnableField].value if not runnable_address_str: - raise Exception(f"Must supply a `runnable` for {description}.") runnable_address = await Get( Address, ```suggestion raise Exception(f"Must supply a value for `runnable` for {description}.") ``` async def run_in_sandbox_request( runnable_address_str = shell_command[RunInSandboxRunnableField].value if not runnable_address_str: + raise Exception(f"Must supply a value for `runnable` for {description}.") runnable_address = await Get( Address,
codereview_new_python_data_7282
class RunInSandboxBehavior(Enum): """Defines the behavhior of rules that act on a `RunFieldSet` subclass with regards to use in the sandbox. - This is used to automatically generate rules used to fulfil `experimental_run_in_sandbox` targets. """ ```suggestion This is used to automatically generate rules used to fulfill `experimental_run_in_sandbox` ``` class RunInSandboxBehavior(Enum): """Defines the behavhior of rules that act on a `RunFieldSet` subclass with regards to use in the sandbox. + This is used to automatically generate rules used to fulfill `experimental_run_in_sandbox` targets. """
codereview_new_python_data_7283
def _run_in_sandbox_behavior_rule(cls: type[RunFieldSet]) -> Iterable: @rule(_param_type_overrides={"request": cls}) async def not_supported(request: RunFieldSet) -> RunInSandboxRequest: raise NotImplementedError( - "Running this target type with in the sandbox is not yet supported." ) @rule(_param_type_overrides={"request": cls}) ```suggestion "Running this target type within the sandbox is not yet supported." ``` def _run_in_sandbox_behavior_rule(cls: type[RunFieldSet]) -> Iterable: @rule(_param_type_overrides={"request": cls}) async def not_supported(request: RunFieldSet) -> RunInSandboxRequest: raise NotImplementedError( + "Running this target type within the sandbox is not yet supported." ) @rule(_param_type_overrides={"request": cls})
codereview_new_python_data_7284
async def do_export( description, dest, post_processing_cmds=[ PostProcessingCommand(["rmdir", output_path]), PostProcessingCommand(["ln", "-s", venv_abspath, output_path]), ], export creates an empty directory for us when the digest gets written. We have to remove that before creating the symlink in its place. async def do_export( description, dest, post_processing_cmds=[ + # export creates an empty directory for us when the digest gets written. + # We have to remove that before creating the symlink in its place. PostProcessingCommand(["rmdir", output_path]), PostProcessingCommand(["ln", "-s", venv_abspath, output_path]), ],
codereview_new_python_data_7285
async def _create_python_source_run_request( *chrooted_source_roots, ] extra_env = { - **complete_pex_environment.environment_dict(python_configured=True), "PEX_EXTRA_SYS_PATH": os.pathsep.join(source_roots), } Why is it true that pex.python is not None always here? async def _create_python_source_run_request( *chrooted_source_roots, ] extra_env = { + **complete_pex_environment.environment_dict(python_configured=venv_pex.python is not None), "PEX_EXTRA_SYS_PATH": os.pathsep.join(source_roots), }
codereview_new_python_data_7286
def run_pants_help_all() -> dict[str, Any]: "pants.backend.shell", "pants.backend.shell.lint.shellcheck", "pants.backend.shell.lint.shfmt", - "pants.tools.fmt.preamble", ] argv = [ "./pants", Seems odd that this is not under `pants.backend` when absolutely everything else is. Why? This is a backend, even if not a language-specific one. def run_pants_help_all() -> dict[str, Any]: "pants.backend.shell", "pants.backend.shell.lint.shellcheck", "pants.backend.shell.lint.shfmt", + "pants.backend.tools.fmt.preamble", ] argv = [ "./pants",
codereview_new_python_data_7287
async def create_python_source_debug_adapter_request( python_setup: PythonSetup, ) -> RunDebugAdapterRequest: debugpy_pex = await Get( Pex, PexRequest, debugpy.to_pex_request( After all this it seems like `Pex` and not `VenvPex` here deserves a comment why this is OK. It's implied by line 77 if you know how `PEX_PATH` combines with a `--venv` PEX, but IIUC, most of the troublems in all this was exactly not understanding all that. async def create_python_source_debug_adapter_request( python_setup: PythonSetup, ) -> RunDebugAdapterRequest: debugpy_pex = await Get( + # NB: We fold the debugpy PEX into the normally constructed VenvPex so that debugpy is in the + # venv, but isn't the main entrypoint. Then we use PEX_* env vars to dynamically have debugpy + # be invoked in that VenvPex. Hence, a vanilla Pex. Pex, PexRequest, debugpy.to_pex_request(
codereview_new_python_data_7288
# Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations import os import textwrap from typing import Iterable, Optional ```suggestion import dataclasses import os ``` # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import annotations +import dataclasses import os import textwrap from typing import Iterable, Optional
codereview_new_python_data_7289
async def _create_python_source_run_request( ), ) - pex_request = dataclasses.replace( - pex_request, pex_path=(*pex_request.pex_path, *pex_path) - ) complete_pex_environment = pex_env.in_workspace() venv_pex = await Get(VenvPex, VenvPexRequest(pex_request, complete_pex_environment)) ```suggestion pex_request = dataclasses.replace(pex_request, pex_path=(*pex_request.pex_path, *pex_path)) ``` async def _create_python_source_run_request( ), ) + pex_request = dataclasses.replace(pex_request, pex_path=(*pex_request.pex_path, *pex_path)) complete_pex_environment = pex_env.in_workspace() venv_pex = await Get(VenvPex, VenvPexRequest(pex_request, complete_pex_environment))
codereview_new_python_data_7290
def run_test(test_target: Target) -> TestResult: fail_target = rule_runner.get_target(Address("", target_name="fail")) fail_result = run_test(fail_target) - assert fail_result.exit_code != 0 assert fail_result.stdout == "does not contain 'xyzzy'\n" why not assert the code matches the exit code from the test? or are they disconnected? def run_test(test_target: Target) -> TestResult: fail_target = rule_runner.get_target(Address("", target_name="fail")) fail_result = run_test(fail_target) + assert fail_result.exit_code == 1 assert fail_result.stdout == "does not contain 'xyzzy'\n"
codereview_new_python_data_7291
def get_action( ruleset = self.get_ruleset(target) if ruleset is None: return None, None, None - path = str(address) if address.is_file_target else address.spec_path for visibility_rule in ruleset.rules: if visibility_rule.match(path, relpath): if visibility_rule.action != DependencyRuleAction.ALLOW: @danxmoran this line here enables the use of globs like `src/python/**/my_file.py` or what you'd like to have... in case the target gen is named differently than the directory, beware that you'd need to consider that in the pattern.. `src/python/**/myfile.py:sometarget` (or with a wildcard, of course) def get_action( ruleset = self.get_ruleset(target) if ruleset is None: return None, None, None + path = address.filename if address.is_file_target else address.spec_path for visibility_rule in ruleset.rules: if visibility_rule.match(path, relpath): if visibility_rule.action != DependencyRuleAction.ALLOW:
codereview_new_python_data_7973
def foo(input, weight, bias): foo(*inps) # Autotuning checks correctness of each version - self.assertEqual(counters["inductor"]["choice_caller_benchmarked"], 14) @patches def test_mm(self): ```suggestion self.assertEqual(counters["inductor"]["choice_caller_benchmarked"], 13) ``` def foo(input, weight, bias): foo(*inps) # Autotuning checks correctness of each version + self.assertEqual(counters["inductor"]["choice_caller_benchmarked"], 13) @patches def test_mm(self):
codereview_new_python_data_7974
def __init__(self): def compile(self, graph_module: "torch.fx.GraphModule", _): assert self.captured_graph_count == 0 - self.captured_graph = gm self.captured_graph_count += 1 return graph_module ```suggestion self.captured_graph = graph_module ``` def __init__(self): def compile(self, graph_module: "torch.fx.GraphModule", _): assert self.captured_graph_count == 0 + self.captured_graph = graph_module self.captured_graph_count += 1 return graph_module
codereview_new_python_data_7975
def bitwise_or(g, self, other): if not symbolic_helper._is_bool(self): raise errors.SymbolicValueError( "ONNX export does NOT support exporting bitwise OR " - "for non-boolean input values", self, ) if not symbolic_helper._is_bool(other): raise errors.SymbolicValueError( "ONNX export does NOT support exporting bitwise OR " - "for non-boolean input values", other, ) return g.op("Or", self, other) nit: might be more helpful if the error message explicitly tells if it's `self`, or `other`. def bitwise_or(g, self, other): if not symbolic_helper._is_bool(self): raise errors.SymbolicValueError( "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values. self: ", self, ) if not symbolic_helper._is_bool(other): raise errors.SymbolicValueError( "ONNX export does NOT support exporting bitwise OR " + "for non-boolean input values. other: ", other, ) return g.op("Or", self, other)
codereview_new_python_data_7976
def _export( # Return ModelProto in binary format. return onnx_model.SerializeToString() # Return ModelProto - return model_proto def export( ```suggestion return onnx_model ``` def _export( # Return ModelProto in binary format. return onnx_model.SerializeToString() # Return ModelProto + return onnx_model def export(
codereview_new_python_data_7977
def run_model_test_with_fx_to_onnx_exporter( if hasattr(test_suite, "check_dtype"): options.check_dtype = test_suite.check_dtype - names = set([f.name for f in dataclasses.fields(options)]) keywords_to_pop = [] for k, v in kwargs.items(): if k in names: ```suggestion names = set(f.name for f in dataclasses.fields(options)) ``` def run_model_test_with_fx_to_onnx_exporter( if hasattr(test_suite, "check_dtype"): options.check_dtype = test_suite.check_dtype + names = set(f.name for f in dataclasses.fields(options)) keywords_to_pop = [] for k, v in kwargs.items(): if k in names:
codereview_new_python_data_7978
def verify_model_with_fx_to_onnx_exporter( rtol: float = 0.001, atol: float = 1e-7, acceptable_error_percentage: Optional[float] = None, - opset_version: Optional[int] = None, **_, ): if input_kwargs is None: I suppose you'd need to assign a default value if None? def verify_model_with_fx_to_onnx_exporter( rtol: float = 0.001, atol: float = 1e-7, acceptable_error_percentage: Optional[float] = None, + opset_version: Optional[int] = GLOBALS.export_onnx_opset_version, **_, ): if input_kwargs is None:
codereview_new_python_data_8348
def do_rolling_bounce(self, processor, counter, current_generation): first_other_node = first_other_processor.node second_other_node = second_other_processor.node - kafka_version_str = self.get_version_string(self.base_version_number) with first_other_node.account.monitor_log(first_other_processor.LOG_FILE) as first_other_monitor: with second_other_node.account.monitor_log(second_other_processor.LOG_FILE) as second_other_monitor: This will work, but make the check slightly weaker, since it will never check for `SNAPSHOT`, even if I am currently on a snapshot version. ```suggestion kafka_version_str = self.get_version_string(str(DEV_VERSION)) ``` def do_rolling_bounce(self, processor, counter, current_generation): first_other_node = first_other_processor.node second_other_node = second_other_processor.node + kafka_version_str = self.get_version_string(str(DEV_VERSION)) with first_other_node.account.monitor_log(first_other_processor.LOG_FILE) as first_other_monitor: with second_other_node.account.monitor_log(second_other_processor.LOG_FILE) as second_other_monitor:
codereview_new_python_data_8441
def test_upgrade_downgrade_state_updater(self, from_version, to_version, upgrade # rolling bounce random.shuffle(self.processors) for p in self.processors: - p.CLEAN_NODE_ENABLED = False self.do_stop_start_bounce(p, None, second_version, counter, extra_properties_second) counter = counter + 1 What do you think about cleaning the state on the node before we bounce so that some restoration is going on? I think setting `p.CLEAN_NODE_ENABLED` to `True` would achieve this. def test_upgrade_downgrade_state_updater(self, from_version, to_version, upgrade # rolling bounce random.shuffle(self.processors) for p in self.processors: + p.CLEAN_NODE_ENABLED = True self.do_stop_start_bounce(p, None, second_version, counter, extra_properties_second) counter = counter + 1
codereview_new_python_data_8458
def prepare_for(processor, version, extra_properties): else: processor.set_version(version) - def do_stop_start_bounce(self, processor, upgrade_from, new_version, counter, extra_properties=None): if extra_properties is None: extra_properties = {} nit: empty line def prepare_for(processor, version, extra_properties): else: processor.set_version(version) + def do_stop_start_bounce(self, processor, upgrade_from, new_version, counter, extra_properties = None): if extra_properties is None: extra_properties = {}
codereview_new_python_data_8608
def main(): jobs = list(pachClient.list_job(p.pipeline.name)) for j in jobs: - started = float((str(j.started.seconds) + "." + str(j.started.nanos))) - finished = float((str(j.finished.seconds) + "." + str(j.finished.nanos))) - elapsed = finished - started rows_to_insert = [{u"pachydermVersion": version, u"workloadName": "test", u"jobId": str(j.job.id), u"pipeline": j.job.pipeline.name, u"totalDatums": str(j.data_total), u"datumsProcessed": str(j.data_processed), - u"executionTime": str(elapsed)}] errors = client.insert_rows_json(table, rows_to_insert) if errors == []: I'm not sure this is completely right. For example, if nanos is 10(an int32, so no leading 0s), and seconds is 50 you get 50.10 seconds, but .10 seconds is way more than 10 nanos. The protobuf timestamp data type has an example with Python's Timestamp class which seems more reliable [https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Timestamp) def main(): jobs = list(pachClient.list_job(p.pipeline.name)) for j in jobs: rows_to_insert = [{u"pachydermVersion": version, u"workloadName": "test", u"jobId": str(j.job.id), u"pipeline": j.job.pipeline.name, u"totalDatums": str(j.data_total), u"datumsProcessed": str(j.data_processed), + u"startTime": j.started, u"startTime": j.finished}] errors = client.insert_rows_json(table, rows_to_insert) if errors == []:
codereview_new_python_data_8689
def test_list_tables_with_schema(self): with self.backend.execute_sql_query("DROP SCHEMA IF EXISTS orange_tests CASCADE"): pass - @dbt.run_on(["postgres"]) def test_nan_frequency(self): ar = np.random.random((4, 3)) ar[:2, 1:] = np.nan It should be also tested on mssql ```suggestion @dbt.run_on(["postgres", "mssql"]) ``` def test_list_tables_with_schema(self): with self.backend.execute_sql_query("DROP SCHEMA IF EXISTS orange_tests CASCADE"): pass + @dbt.run_on(["postgres", "mssql"]) def test_nan_frequency(self): ar = np.random.random((4, 3)) ar[:2, 1:] = np.nan
codereview_new_python_data_8690
def checksum(self, include_metas=True): return np.nan def __get_nan_frequency(self, columns): - nans = 0 - for column in columns: - col_field = column.to_sql() - query = self._sql_query(["COUNT(*)"], - filters=[f"{col_field} IS NULL OR {col_field} = 'NaN'"]) - with self.backend.execute_sql_query(query) as get_nans: - nans += get_nans.fetchall()[0][0] - return nans / (len(self) * len(columns)) def get_nan_frequency_attribute(self): return self.__get_nan_frequency(self.domain.attributes) Checking NULL is sufficient. Checking `"nan"` even fail on MSSQL when column is float. ```suggestion query = self._sql_query(["COUNT(*)"], filters=[f"{col_field} IS NULL"]) ``` def checksum(self, include_metas=True): return np.nan def __get_nan_frequency(self, columns): + query = self._sql_query([" + ".join([f"COUNT(*) - COUNT({col.to_sql()})" + for col in columns])]) + with self.backend.execute_sql_query(query) as cur: + nans = cur.fetchone() + return nans[0] / (len(self) * len(columns)) def get_nan_frequency_attribute(self): return self.__get_nan_frequency(self.domain.attributes)
codereview_new_python_data_8691
def test_missing_one_row_data(self): self.send_signal(widget.Inputs.data, None) self.assertFalse(widget.Warning.missing_values.is_shown()) def test_single_row_data(self): widget = self.widget with self.iris.unlocked(): Could you add `@_patch_recompute_som` before this function, so it won't waste time by actually running SOM? The time saved is negligible, but it's trivial to do (because the function is already there) and other tests in this module are decorated like this, so let's keep consistency. def test_missing_one_row_data(self): self.send_signal(widget.Inputs.data, None) self.assertFalse(widget.Warning.missing_values.is_shown()) + @_patch_recompute_som def test_single_row_data(self): widget = self.widget with self.iris.unlocked():
codereview_new_python_data_8692
def __eq__(self, other): return super().__eq__(other) and self.columns == other.columns def __hash__(self): - return super().__hash__() ^ hash(self.columns) class HasClass(Filter): Why xor and not putting it within one `hash()`? def __eq__(self, other): return super().__eq__(other) and self.columns == other.columns def __hash__(self): + return hash((super().__hash__(), hash(self.columns))) class HasClass(Filter):
codereview_new_python_data_8693
def has_missing_class(self): """Return `True` if there are any missing class values.""" return bn.anynan(self._Y) - def get_nan_frequency_attribute(self): - if self.X.size == 0: return 0 - vals = self.X if not sp.issparse(self.X) else self.X.data - return np.isnan(vals).sum() / self.X.size def get_nan_frequency_class(self): - if self.Y.size == 0: - return 0 - return np.isnan(self._Y).sum() / self._Y.size def checksum(self, include_metas=True): # TODO: zlib.adler32 does not work for numpy arrays with dtype object ``` >>> sp.csc_matrix([[1, 2, 0], [0, 0, 1]]).size 3 ``` This code assumes it's 6, right? def has_missing_class(self): """Return `True` if there are any missing class values.""" return bn.anynan(self._Y) + @staticmethod + def __get_nan_frequency(data): + if data.size == 0: return 0 + dense = data if not sp.issparse(data) else data.data + return np.isnan(dense).sum() / np.prod(data.shape) + + def get_nan_frequency_attribute(self): + return self.__get_nan_frequency(self.X) def get_nan_frequency_class(self): + return self.__get_nan_frequency(self.Y) def checksum(self, include_metas=True): # TODO: zlib.adler32 does not work for numpy arrays with dtype object
codereview_new_python_data_8694
def test_details(self): f'Target: —' with patch.object(Table, "get_nan_frequency_attribute") as mock: self.assertEqual(details, format_summary_details(data)) - self.assertFalse(mock.called) data = None self.assertEqual('', format_summary_details(data)) I should have the same objection as above. But these tests are already written the wrong (imho) way, so ... let it stay. def test_details(self): f'Target: —' with patch.object(Table, "get_nan_frequency_attribute") as mock: self.assertEqual(details, format_summary_details(data)) + mock.assert_not_called() data = None self.assertEqual('', format_summary_details(data))
codereview_new_python_data_8695
def test_details(self): f'Target: —' with patch.object(Table, "get_nan_frequency_attribute") as mock: self.assertEqual(details, format_summary_details(data)) - self.assertFalse(mock.called) data = None self.assertEqual('', format_summary_details(data)) The usual way to test this is `mock.assert_not_called()` because it gives a better diagnostic message - if you test for `False`, it just tells you that `True` is not `False`... def test_details(self): f'Target: —' with patch.object(Table, "get_nan_frequency_attribute") as mock: self.assertEqual(details, format_summary_details(data)) + mock.assert_not_called() data = None self.assertEqual('', format_summary_details(data))
codereview_new_python_data_8696
from Orange.widgets.visualize.owbarplot import OWBarPlot from Orange.tests.test_dasktable import open_as_dask -Table.LOCKING = False - class TestOWBarPlot(WidgetTest, WidgetOutputsTestMixin): @classmethod The locking line can be removed. from Orange.widgets.visualize.owbarplot import OWBarPlot from Orange.tests.test_dasktable import open_as_dask class TestOWBarPlot(WidgetTest, WidgetOutputsTestMixin): @classmethod
codereview_new_python_data_8697
def check_multi_assignment_from_iterable( ) -> None: rvalue_type = get_proper_type(rvalue_type) if self.type_is_iterable(rvalue_type) and isinstance( - rvalue_type, (Instance, CallableType, TypeType) ): item_type = self.iterable_item_type(rvalue_type) for lv in lvalues: Maybe we should also support `Overloaded`? def check_multi_assignment_from_iterable( ) -> None: rvalue_type = get_proper_type(rvalue_type) if self.type_is_iterable(rvalue_type) and isinstance( + rvalue_type, (Instance, CallableType, TypeType, Overloaded) ): item_type = self.iterable_item_type(rvalue_type) for lv in lvalues:
codereview_new_python_data_8698
def translate_stmt_list( and self.type_ignores and min(self.type_ignores) < self.get_lineno(stmts[0]) ): - if ignores := self.type_ignores[min(self.type_ignores)]: joined_ignores = ", ".join(ignores) self.fail( ( We can't use walrus just yet; we still support 3.7 def translate_stmt_list( and self.type_ignores and min(self.type_ignores) < self.get_lineno(stmts[0]) ): + ignores = self.type_ignores[min(self.type_ignores)] + if ignores: joined_ignores = ", ".join(ignores) self.fail( (
codereview_new_python_data_8699
def get_extension() -> type[Extension]: extension_class: type[Extension] if not use_setuptools: - from distutils.core import Extension as extension_class else: - from setuptools import Extension as extension_class return extension_class ```suggestion import distutils.core extension_class = distutils.core.Extension ``` Just a hunch def get_extension() -> type[Extension]: extension_class: type[Extension] if not use_setuptools: + import distutils.core + extension_class = distutils.core.Extension else: + import setuptools + extension_class = setuptools.Extension return extension_class
codereview_new_python_data_8700
def is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]: - return all(item is Extension for item in items) def find_package_data(base, globs, root="mypy"): ```suggestion return all(isinstance(item, Extension) for item in items) ``` def is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]: + return all(isinstance(item, Extension) for item in items) def find_package_data(base, globs, root="mypy"):
codereview_new_python_data_8701
def _get_bool_arg(self, name: str, default: bool) -> bool: if value is not None: return value else: - self._api.fail(f'"{name}" argument must be True or False.', expression) return default Style nit: Leave out the period at the end of the message, for consistency. def _get_bool_arg(self, name: str, default: bool) -> bool: if value is not None: return value else: + self._api.fail(f'"{name}" argument must be True or False', expression) return default
codereview_new_python_data_8702
def find_dataclass_transform_spec(node: Node | None) -> DataclassTransformSpec | if isinstance(node, TypeInfo): # Search all parent classes to see if any are decorated with `typing.dataclass_transform` for base in node.mro[1:]: - if base.defn.dataclass_transform_spec is not None: - return base.defn.dataclass_transform_spec # Check if there is a metaclass that is decorated with `typing.dataclass_transform` metaclass_type = node.metaclass_type - if ( - metaclass_type is not None - and metaclass_type.type.defn.dataclass_transform_spec is not None - ): - return metaclass_type.type.defn.dataclass_transform_spec return None Should we search through the MRO of the metaclass as well? def find_dataclass_transform_spec(node: Node | None) -> DataclassTransformSpec | if isinstance(node, TypeInfo): # Search all parent classes to see if any are decorated with `typing.dataclass_transform` for base in node.mro[1:]: + if base.dataclass_transform_spec is not None: + return base.dataclass_transform_spec # Check if there is a metaclass that is decorated with `typing.dataclass_transform` metaclass_type = node.metaclass_type + if metaclass_type is not None and metaclass_type.type.dataclass_transform_spec is not None: + return metaclass_type.type.dataclass_transform_spec return None
codereview_new_python_data_8703
def upload_dist(dist: Path, dry_run: bool = True) -> None: def upload_to_pypi(version: str, dry_run: bool = True) -> None: - assert re.match(r"v?1\.[0-9]+\.[0-9](\+\S+)?$", version) if "dev" in version: assert dry_run, "Must use --dry-run with dev versions of mypy" if version.startswith("v"): Maybe use `[1-9]` instead of 1 for the major version? def upload_dist(dist: Path, dry_run: bool = True) -> None: def upload_to_pypi(version: str, dry_run: bool = True) -> None: + assert re.match(r"v?[1-9]\.[0-9]+\.[0-9](\+\S+)?$", version) if "dev" in version: assert dry_run, "Must use --dry-run with dev versions of mypy" if version.startswith("v"):
codereview_new_python_data_8704
def check_state() -> None: - if not os.path.isfile("pyproject.toml") and not os.path.isdir("mypy"): sys.exit("error: The current working directory must be the mypy repository root") out = subprocess.check_output(["git", "status", "-s", os.path.join("mypy", "typeshed")]) if out: While we're here, shouldn't this be `or`? ```suggestion if not os.path.isfile("pyproject.toml") or not os.path.isdir("mypy"): ``` def check_state() -> None: + if not os.path.isfile("pyproject.toml") or not os.path.isdir("mypy"): sys.exit("error: The current working directory must be the mypy repository root") out = subprocess.check_output(["git", "status", "-s", os.path.join("mypy", "typeshed")]) if out:
codereview_new_python_data_8706
def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value: and isinstance(callee.node, TypeInfo) and callee.node.is_newtype ): - # Calls to a NewType type is a no-op at runtime. return builder.accept(expr.args[0]) if isinstance(callee, IndexExpr) and isinstance(callee.analyzed, TypeApplication): ```suggestion # A call to a NewType type is a no-op at runtime. ``` grammar nit def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value: and isinstance(callee.node, TypeInfo) and callee.node.is_newtype ): + # A call to a NewType type is a no-op at runtime. return builder.accept(expr.args[0]) if isinstance(callee, IndexExpr) and isinstance(callee.analyzed, TypeApplication):
codereview_new_python_data_8707
def analyze_unbound_tvar(self, t: Type) -> tuple[str, TypeVarLikeExpr] | None: # It's bound by our type variable scope return None return unbound.name, sym.node - if sym and sym.fullname in {"typing.Unpack", "typing_extensions.Unpack"}: inner_t = unbound.args[0] if not isinstance(inner_t, UnboundType): return None ```suggestion if sym and sym.fullname in ("typing.Unpack", "typing_extensions.Unpack"): ``` Nit, but this is what most other similar checks look like and I suspect mypyc handles it better. It's likely also faster in non-compiled Python to use a tuple here. def analyze_unbound_tvar(self, t: Type) -> tuple[str, TypeVarLikeExpr] | None: # It's bound by our type variable scope return None return unbound.name, sym.node + if sym and sym.fullname in ("typing.Unpack", "typing_extensions.Unpack"): inner_t = unbound.args[0] if not isinstance(inner_t, UnboundType): return None
codereview_new_python_data_8708
def _verify_exported_names( object_path + ["__all__"], ( "names exported from the stub do not correspond to the names exported at runtime. " - "This is probably due to things being missing from the stub, or an inaccurate `__all__` in the stub" ), # Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very # relevant here, and it makes for a prettier error message ```suggestion "This is probably due to things being missing from the stub or an inaccurate `__all__` in the stub" ``` def _verify_exported_names( object_path + ["__all__"], ( "names exported from the stub do not correspond to the names exported at runtime. " + "This is probably due to things being missing from the stub or an inaccurate `__all__` in the stub" ), # Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very # relevant here, and it makes for a prettier error message
codereview_new_python_data_8709
def _belongs_to_runtime(r: types.ModuleType, attr: str) -> bool: else { m for m in dir(runtime) - if not is_probably_private(m) and _belongs_to_runtime(runtime, m) } ) # Check all things declared in module's __all__, falling back to our best guess This comment still seems accurate and helpful def _belongs_to_runtime(r: types.ModuleType, attr: str) -> bool: else { m for m in dir(runtime) + if not is_probably_private(m) + # Do our best to filter out objects that originate from different modules, + # since in the absence of __all__, + # we don't have a sure-fire way to detect re-exports at runtime + and _belongs_to_runtime(runtime, m) } ) # Check all things declared in module's __all__, falling back to our best guess
codereview_new_python_data_8710
def _belongs_to_runtime(r: types.ModuleType, attr: str) -> bool: for m in dir(runtime) if not is_probably_private(m) # Filter out objects that originate from other modules (best effort). Note that in the - # absence of __all__, we don't have a way to detect re-exports at runtime and _belongs_to_runtime(runtime, m) } ) Nit: I'd prefer "surefire way" or "perfect way" here, since arguably we do have _a_ way of detecting re-exports (using `symtable` to parse the source code), it just isn't 100% reliable (doesn't work for `*` imports; and you can't always get the source code) def _belongs_to_runtime(r: types.ModuleType, attr: str) -> bool: for m in dir(runtime) if not is_probably_private(m) # Filter out objects that originate from other modules (best effort). Note that in the + # absence of __all__, we don't have a way to detect explicit re-exports at runtime and _belongs_to_runtime(runtime, m) } )
codereview_new_python_data_8711
def _find_module(self, id: str, use_typeshed: bool) -> ModuleSearchResult: # In namespace mode, register a potential namespace package if self.options and self.options.namespace_packages: if ( - fscache.exists_case(base_path, dir_prefix) and not fscache.isfile_case(base_path, dir_prefix) - and not has_init ): near_misses.append((base_path, dir_prefix)) ```suggestion not has_init and fscache.exists_case(base_path, dir_prefix) and not fscache.isfile_case(base_path, dir_prefix) ``` def _find_module(self, id: str, use_typeshed: bool) -> ModuleSearchResult: # In namespace mode, register a potential namespace package if self.options and self.options.namespace_packages: if ( + not has_init + and fscache.exists_case(base_path, dir_prefix) and not fscache.isfile_case(base_path, dir_prefix) ): near_misses.append((base_path, dir_prefix))
codereview_new_python_data_8712
def build_mypy(target_dir: str) -> None: env = os.environ.copy() env["CC"] = "clang" env["MYPYC_OPT_LEVEL"] = "2" - cmd = ["python3", "setup.py", "--use-mypyc", "build_ext", "--inplace"] subprocess.run(cmd, env=env, check=True, cwd=target_dir) `python3` doesn't work on Windows; it's better to use `sys.executable` for cross-platform compatibility ```suggestion cmd = [sys.executable, "setup.py", "--use-mypyc", "build_ext", "--inplace"] ``` def build_mypy(target_dir: str) -> None: env = os.environ.copy() env["CC"] = "clang" env["MYPYC_OPT_LEVEL"] = "2" + cmd = [sys.executable, "setup.py", "--use-mypyc", "build_ext", "--inplace"] subprocess.run(cmd, env=env, check=True, cwd=target_dir)
codereview_new_python_data_8713
def typeddict_key_not_found( self, typ: TypedDictType, item_name: str, context: Context, setitem: bool = False ) -> None: """ - Handles error messages. - Note, that we differentiate in between reading a value and setting - a value. - Setting a value on a TypedDict is an 'unknown-key' error, - whereas reading it is the more serious/general 'item' error. """ if typ.is_anonymous(): self.fail( Please use common convention for docstrings. It should be like this. ```python def func() -> None: """Short description in form of "do something". After empty line, long description indented with function body. The closing quotes n a separate line. """ def typeddict_key_not_found( self, typ: TypedDictType, item_name: str, context: Context, setitem: bool = False ) -> None: """ + Handles error messages. + Note, that we differentiate in between reading a value and setting + a value. + Setting a value on a TypedDict is an 'unknown-key' error, + whereas reading it is the more serious/general 'item' error. """ if typ.is_anonymous(): self.fail(
codereview_new_python_data_8714
def _verify_exported_names( object_path + ["__all__"], ( "names exported from the stub do not correspond to the names exported at runtime. " - "This is probably due to an inaccurate `__all__` in the stub or things being missing from the stub." ), # Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very # relevant here, and it makes for a prettier error message nit: re-order this hint, since it's not necessarily related to `__all__` ```suggestion "This is probably due to things being missing from the stub, or if present, an inaccurate `__all__` in the stub" ``` def _verify_exported_names( object_path + ["__all__"], ( "names exported from the stub do not correspond to the names exported at runtime. " + "This is probably due to things being missing from the stub, or if present, an inaccurate `__all__` in the stub" ), # Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very # relevant here, and it makes for a prettier error message
codereview_new_python_data_8715
def visit_with_stmt(self, o: WithStmt) -> None: def visit_import(self, o: Import) -> None: for mod, alias in o.ids: - name = alias - if name is None: - name = mod - self.tracker.record_definition(name) super().visit_import(o) def visit_import_from(self, o: ImportFrom) -> None: I think you need to split `id`s with `.` (not 100% sure) def visit_with_stmt(self, o: WithStmt) -> None: def visit_import(self, o: Import) -> None: for mod, alias in o.ids: + names = mod.split(".") + if alias is not None: + names[-1] = alias + for name in names: + self.tracker.record_definition(name) super().visit_import(o) def visit_import_from(self, o: ImportFrom) -> None:
codereview_new_python_data_8716
def check_namedtuple_classdef( ) -> tuple[list[str], list[Type], dict[str, Expression], list[Statement]] | None: """Parse and validate fields in named tuple class definition. - Return a three tuple: * field names * field types * field default values ```suggestion Return a four tuple: ``` def check_namedtuple_classdef( ) -> tuple[list[str], list[Type], dict[str, Expression], list[Statement]] | None: """Parse and validate fields in named tuple class definition. + Return a four tuple: * field names * field types * field default values
codereview_new_python_data_8717
def run_case_step(self, testcase: DataDrivenTestCase, incremental_step: int) -> env=env, ) if sys.version_info >= (3, 12): proc.wait(timeout=30) output = proc.communicate()[0].decode("utf8") outlines = output.splitlines() Hmm, why does this fix the hanging test? def run_case_step(self, testcase: DataDrivenTestCase, incremental_step: int) -> env=env, ) if sys.version_info >= (3, 12): + # TODO: testDecorators1 hangs on 3.12, remove this once fixed proc.wait(timeout=30) output = proc.communicate()[0].decode("utf8") outlines = output.splitlines()
codereview_new_python_data_8718
def __eq__(self, other: object) -> bool: return ( frozenset(self.items.keys()) == frozenset(other.items.keys()) and all( - not left_item_type == right_item_type for (_, left_item_type, right_item_type) in self.zip(other) ) and self.fallback == other.fallback This needs fixing :-) def __eq__(self, other: object) -> bool: return ( frozenset(self.items.keys()) == frozenset(other.items.keys()) and all( + left_item_type == right_item_type for (_, left_item_type, right_item_type) in self.zip(other) ) and self.fallback == other.fallback
codereview_new_python_data_8719
def mypycify( "-Wno-unused-command-line-argument", "-Wno-unknown-warning-option", "-Wno-unused-but-set-variable", - "-Wno-cpp", "-Wno-ignored-optimization-argument", ] elif compiler.compiler_type == "msvc": # msvc doesn't have levels, '/O2' is full and '/Od' is disable Can you add a short comment here explaining what this does (and perhaps include a link to the GitHub issue). def mypycify( "-Wno-unused-command-line-argument", "-Wno-unknown-warning-option", "-Wno-unused-but-set-variable", "-Wno-ignored-optimization-argument", + # Disables C Preprocessor (cpp) warnings + # See https://github.com/mypyc/mypyc/issues/956 + "-Wno-cpp", ] elif compiler.compiler_type == "msvc": # msvc doesn't have levels, '/O2' is full and '/Od' is disable
codereview_new_python_data_8720
def mypycify( "-Wno-unused-command-line-argument", "-Wno-unknown-warning-option", "-Wno-unused-but-set-variable", - "-Wno-cpp", - "-Wno-ignored-optimization-argument", ] elif compiler.compiler_type == "msvc": # msvc doesn't have levels, '/O2' is full and '/Od' is disable We should probably avoid adding to this list. Can you explain why these warnings need to be ignored? def mypycify( "-Wno-unused-command-line-argument", "-Wno-unknown-warning-option", "-Wno-unused-but-set-variable", ] elif compiler.compiler_type == "msvc": # msvc doesn't have levels, '/O2' is full and '/Od' is disable
codereview_new_python_data_8721
def visit_class_def(self, defn: ClassDef) -> None: f'Variance of TypeVar "{base_tvar.name}" incompatible ' "with variance in parent type", context=defn, ) if typ.is_protocol and typ.defn.type_vars: Maybe use existing `type-var` error code? def visit_class_def(self, defn: ClassDef) -> None: f'Variance of TypeVar "{base_tvar.name}" incompatible ' "with variance in parent type", context=defn, + code=codes.TYPE_VAR, ) if typ.is_protocol and typ.defn.type_vars:
codereview_new_python_data_8722
def type_check_second_pass(self) -> bool: self.time_spent_us += time_spent_us(t0) return result - def detect_partially_defined_vars(self, type_map: Dict[Expression, Type]) -> None: assert self.tree is not None, "Internal error: method must be called on parsed file only" manager = self.manager if manager.errors.is_error_code_enabled(codes.PARTIALLY_DEFINED): ```suggestion def detect_partially_defined_vars(self, type_map: dict[Expression, Type]) -> None: ``` def type_check_second_pass(self) -> bool: self.time_spent_us += time_spent_us(t0) return result + def detect_partially_defined_vars(self, type_map: dict[Expression, Type]) -> None: assert self.tree is not None, "Internal error: method must be called on parsed file only" manager = self.manager if manager.errors.is_error_code_enabled(codes.PARTIALLY_DEFINED):
codereview_new_python_data_8723
class BranchState: def __init__( self, - must_be_defined: Optional[set[str]] = None, - may_be_defined: Optional[set[str]] = None, skipped: bool = False, ) -> None: if may_be_defined is None: Style nit: ```suggestion must_be_defined: set[str] | None = None, may_be_defined: set[str] | None = None, ``` class BranchState: def __init__( self, + must_be_defined: set[str] | None = None, + may_be_defined: set[str] | None = None, skipped: bool = False, ) -> None: if may_be_defined is None:
codereview_new_python_data_8724
def format_error( def is_typeshed_file(typeshed_dir: str | None, file: str) -> bool: - if typeshed_dir is None: - typeshed = TYPESHED_DIR - else: - typeshed = os.path.abspath(typeshed_dir) try: - return os.path.commonpath((typeshed, os.path.abspath(file))) == typeshed except ValueError: # Different drives on Windows return False This might be pretty slow if using a custom typeshed dir, since we are calculating the abspath over and over again. Maybe we should precalculate this in options and pass the absolute path instead? def format_error( def is_typeshed_file(typeshed_dir: str | None, file: str) -> bool: + typeshed_dir = typeshed_dir if typeshed_dir is not None else TYPESHED_DIR try: + return os.path.commonpath((typeshed_dir, os.path.abspath(file))) == typeshed_dir except ValueError: # Different drives on Windows return False
codereview_new_python_data_8725
def visit_assert_type_expr(self, expr: AssertTypeExpr) -> Type: if not is_same_type(source_type, target_type): if not self.chk.in_checked_function(): self.msg.note( - "'assert_type' always outputs 'Any' in unchecked functions", expr.expr ) self.msg.assert_type_fail(source_type, target_type, expr) return source_type I don't think this is quite right; assert_type() doesn't output anything. def visit_assert_type_expr(self, expr: AssertTypeExpr) -> Type: if not is_same_type(source_type, target_type): if not self.chk.in_checked_function(): self.msg.note( + '"assert_type" expects everything to be "Any" in unchecked functions', + expr.expr, ) self.msg.assert_type_fail(source_type, target_type, expr) return source_type
codereview_new_python_data_8726
def find_cache_meta(id: str, path: str, manager: BuildManager) -> CacheMeta | No ) # Don't check for path match, that is dealt with in validate_meta(). - if m.id != id or m.data_mtime is None: manager.log(f"Metadata abandoned for {id}: attributes are missing") return None Not sure about this. Sure, these attributes may never be None according to their types, but it's good to be defensive against a corrupted cache. def find_cache_meta(id: str, path: str, manager: BuildManager) -> CacheMeta | No ) # Don't check for path match, that is dealt with in validate_meta(). + # + # TODO: these `type: ignore`s wouldn't be necessary + # if the type annotations for CacheMeta were more accurate + # (all of these attributes can be `None`) + if ( + m.id != id + or m.mtime is None # type: ignore[redundant-expr] + or m.size is None # type: ignore[redundant-expr] + or m.dependencies is None # type: ignore[redundant-expr] + or m.data_mtime is None + ): manager.log(f"Metadata abandoned for {id}: attributes are missing") return None
codereview_new_python_data_8727
def transform_args( for a, kd in zip(args.kwonlyargs, args.kw_defaults): new_args.append( self.make_argument( - a, kd, ARG_NAMED if kd is None else ARG_NAMED_OPT, no_type_check # type: ignore[redundant-expr] ) ) names.append(a) This one does not look right: `kd` can be `None`, can't it? def transform_args( for a, kd in zip(args.kwonlyargs, args.kw_defaults): new_args.append( self.make_argument( + a, kd, ARG_NAMED if kd is None else ARG_NAMED_OPT, no_type_check ) ) names.append(a)
codereview_new_python_data_8728
def is_literal_type(typ: ProperType, fallback_fullname: str, value: LiteralValue return typ.value == value -def is_self_type_like(typ: Type, is_class: bool) -> bool: """Does this look like a self-type annotation?""" typ = get_proper_type(typ) - if not is_class: return isinstance(typ, TypeVarType) if not isinstance(typ, TypeType): return False ```suggestion def is_self_type_like(typ: Type, *, is_classmethod: bool) -> bool: """Does this look like a self-type annotation?""" typ = get_proper_type(typ) if not is_classmethod: ``` def is_literal_type(typ: ProperType, fallback_fullname: str, value: LiteralValue return typ.value == value +def is_self_type_like(typ: Type, *, is_classmethod: bool) -> bool: """Does this look like a self-type annotation?""" typ = get_proper_type(typ) + if not is_classmethod: return isinstance(typ, TypeVarType) if not isinstance(typ, TypeType): return False
codereview_new_python_data_8729
def visit_super_expr(self, e: SuperExpr) -> Type: # checking super() we will still get an error. So to be consistent, we also # allow such imprecise annotations for use with super(), where we fall back # to the current class MRO instead. - if is_self_type_like(instance_type, method.is_class): if e.info and type_info in e.info.mro: mro = e.info.mro index = mro.index(type_info) ```suggestion if is_self_type_like(instance_type, is_classmethod=method.is_class): ``` def visit_super_expr(self, e: SuperExpr) -> Type: # checking super() we will still get an error. So to be consistent, we also # allow such imprecise annotations for use with super(), where we fall back # to the current class MRO instead. + if is_self_type_like(instance_type, is_classmethod=method.is_class): if e.info and type_info in e.info.mro: mro = e.info.mro index = mro.index(type_info)
codereview_new_python_data_8730
def normalize(lines: Iterator[str]) -> Iterator[str]: def produce_chunks(lines: Iterator[str]) -> Iterator[Chunk]: - current_chunk: Optional[Chunk] = None for line in normalize(lines): if is_header(line): if current_chunk is not None: ```suggestion current_chunk: Chunk | None = None ``` def normalize(lines: Iterator[str]) -> Iterator[str]: def produce_chunks(lines: Iterator[str]) -> Iterator[Chunk]: + current_chunk: Chunk | None = None for line in normalize(lines): if is_header(line): if current_chunk is not None:
codereview_new_python_data_8731
SIZEOF_SIZE_T: Final = ( int(SIZEOF_SIZE_T_SYSCONFIG) if SIZEOF_SIZE_T_SYSCONFIG is not None - else round((math.log2(sys.maxsize + 1)) / 8) ) IS_32_BIT_PLATFORM: Final = int(SIZEOF_SIZE_T) == 4 the rounding sketched me out a little, here's a possible equivalent ```suggestion else (sys.maxsize + 1).bit_length() // 8 ``` SIZEOF_SIZE_T: Final = ( int(SIZEOF_SIZE_T_SYSCONFIG) if SIZEOF_SIZE_T_SYSCONFIG is not None + else (sys.maxsize + 1).bit_length() // 8 ) IS_32_BIT_PLATFORM: Final = int(SIZEOF_SIZE_T) == 4
codereview_new_python_data_8732
def test_module(module_name: str) -> Iterator[Error]: """ stub = get_stub(module_name) if stub is None: - if "." in module_name: - last_part_of_module_name = module_name.split(".")[-1] - else: - last_part_of_module_name = module_name - if not is_probably_private(last_part_of_module_name): runtime_desc = repr(sys.modules[module_name]) if module_name in sys.modules else "N/A" yield Error( [module_name], "failed to find stubs", MISSING, None, runtime_desc=runtime_desc I might be missing something obvious, but can this just be: ```suggestion if not is_probably_private(module_name.split(".")[-1]): ``` def test_module(module_name: str) -> Iterator[Error]: """ stub = get_stub(module_name) if stub is None: + if not is_probably_private(module_name.split(".")[-1]): runtime_desc = repr(sys.modules[module_name]) if module_name in sys.modules else "N/A" yield Error( [module_name], "failed to find stubs", MISSING, None, runtime_desc=runtime_desc
codereview_new_python_data_8733
def __init__(self) -> None: # A comma-separated list packages for mypy to type check self.packages: list[str] | None = None - # A comma-separated list modules for mypy to type check self.modules: list[str] | None = None # Write junit.xml to given file Almost there, you missed updating this comment :-) def __init__(self) -> None: # A comma-separated list packages for mypy to type check self.packages: list[str] | None = None + # A comma-separated list of modules for mypy to type check self.modules: list[str] | None = None # Write junit.xml to given file
codereview_new_python_data_8734
def __init__(self) -> None: # supports globbing self.files: list[str] | None = None - # A comma-separated list of packages for mypy to type check self.packages: list[str] | None = None - # A comma-separated list of modules for mypy to type check self.modules: list[str] | None = None # Write junit.xml to given file ```suggestion # A list of packages for mypy to type check self.packages: list[str] | None = None # A list of modules for mypy to type check self.modules: list[str] | None = None ``` def __init__(self) -> None: # supports globbing self.files: list[str] | None = None + # A list of packages for mypy to type check self.packages: list[str] | None = None + # A list of modules for mypy to type check self.modules: list[str] | None = None # Write junit.xml to given file
codereview_new_python_data_8735
-from mypy.plugin import Plugin from mypy.nodes import ARG_POS, Argument, Var from mypy.plugins.common import add_method from mypy.types import NoneType class ClassMethodPlugin(Plugin): - def get_base_class_hook(self, fullname: str): if "BaseAddMethod" in fullname: return add_extra_methods_hook return None -def add_extra_methods_hook(ctx): add_method(ctx, "foo_classmethod", [], NoneType(), is_classmethod=True) add_method( ctx, Can you please type all arguments / returns? This is just a style we follow in other test plugins. +from typing import Callable, Optional + from mypy.nodes import ARG_POS, Argument, Var +from mypy.plugin import ClassDefContext, Plugin from mypy.plugins.common import add_method from mypy.types import NoneType class ClassMethodPlugin(Plugin): + def get_base_class_hook(self, fullname: str) -> Optional[Callable[[ClassDefContext], None]]: if "BaseAddMethod" in fullname: return add_extra_methods_hook return None +def add_extra_methods_hook(ctx: ClassDefContext) -> None: add_method(ctx, "foo_classmethod", [], NoneType(), is_classmethod=True) add_method( ctx,
codereview_new_python_data_8736
def type_check_second_pass(self) -> bool: t0 = time_ref() with self.wrap_context(): result = self.type_checker().check_second_pass() - self.time_spent_us += time_spent_us(t0) - return result def finish_passes(self) -> None: assert self.tree is not None, "Internal error: method must be called on parsed file only" I'm not sure if I agree with this refactor. If you want to measure the whole context manager, you should have the addition of `time_spent_us` after it exits, if you want to measure just the time of the second pass, you should put the `t0 =...` line below `self.wrap_context` so that isn't measured. def type_check_second_pass(self) -> bool: t0 = time_ref() with self.wrap_context(): result = self.type_checker().check_second_pass() + self.time_spent_us += time_spent_us(t0) + return result def finish_passes(self) -> None: assert self.tree is not None, "Internal error: method must be called on parsed file only"
codereview_new_python_data_8737
def verify_funcitem( runtime_abstract = getattr(runtime, "__isabstractmethod__", False) # The opposite can exist: some implementations omit `@abstractmethod` decorators if runtime_abstract and not stub_abstract: - yield Error(object_path, "runtime method is abstract, but stub is not", stub, runtime) for message in _verify_static_class_methods(stub, runtime, object_path): yield Error(object_path, "is inconsistent, " + message, stub, runtime) The signatures of the runtime and the stub are irrelevant for this check, so the default error message here won't be very good. Maybe something like this would be better? ```suggestion yield Error(object_path, "runtime method is abstract, but stub is not", stub, runtime, stub_desc="A concrete method", runtime_desc="An abstract method") ``` def verify_funcitem( runtime_abstract = getattr(runtime, "__isabstractmethod__", False) # The opposite can exist: some implementations omit `@abstractmethod` decorators if runtime_abstract and not stub_abstract: + yield Error(object_path, "runtime method is abstract, but stub is not", stub, runtime, stub_desc="A concrete method", runtime_desc="An abstract method") for message in _verify_static_class_methods(stub, runtime, object_path): yield Error(object_path, "is inconsistent, " + message, stub, runtime)
codereview_new_python_data_8738
def verify_funcitem( if runtime_abstract and not stub_abstract: yield Error( object_path, - "runtime method is abstract, but stub is not", stub, runtime, - stub_desc="A concrete method", - runtime_desc="An abstract method", ) for message in _verify_static_class_methods(stub, runtime, object_path): ```suggestion object_path, "is inconsistent, runtime method is abstract but stub is not", stub, runtime, ``` def verify_funcitem( if runtime_abstract and not stub_abstract: yield Error( object_path, + "is inconsistent, runtime method is abstract but stub is not", stub, runtime, ) for message in _verify_static_class_methods(stub, runtime, object_path):
codereview_new_python_data_8739
def _verify_exported_names( object_path, ( "names exported from the stub do not correspond to the names exported at runtime. " - "This is due to an inaccurate `__all__` in the stub or things missing from the stub." ), # Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very # relevant here, and it makes for a prettier error message ```suggestion "This is probably due to an inaccurate `__all__` in the stub or things being missing from the stub." ``` def _verify_exported_names( object_path, ( "names exported from the stub do not correspond to the names exported at runtime. " + "This is probably due to an inaccurate `__all__` in the stub or things being missing from the stub." ), # Pass in MISSING instead of the stub and runtime objects, as the line numbers aren't very # relevant here, and it makes for a prettier error message
codereview_new_python_data_8740
def infer_condition_value(expr: Expression, options: Options) -> int: if name == "PY2": result = ALWAYS_FALSE elif name == "PY3": - result = ALWAYS_TRUE if pyversion[0] == 3 else ALWAYS_FALSE elif name == "MYPY" or name == "TYPE_CHECKING": result = MYPY_TRUE elif name in options.always_true: ```suggestion result = ALWAYS_TRUE ``` def infer_condition_value(expr: Expression, options: Options) -> int: if name == "PY2": result = ALWAYS_FALSE elif name == "PY3": + result = ALWAYS_TRUE elif name == "MYPY" or name == "TYPE_CHECKING": result = MYPY_TRUE elif name in options.always_true:
codereview_new_python_data_8741
def visit_Num(self, n: Num) -> Type: # Str(string s) def visit_Str(self, n: Str) -> Type: - # Do a getattr because the field doesn't exist in 3.8 (where - # this method doesn't actually ever run.) We can't just do - # an attribute access with a `# type: ignore` because it would be - # unused on < 3.8. - kind: str = getattr(n, "kind") # noqa - return parse_type_string(n.s, "builtins.str", self.line, n.col_offset) # Bytes(bytes s) This now became unused def visit_Num(self, n: Num) -> Type: # Str(string s) def visit_Str(self, n: Str) -> Type: return parse_type_string(n.s, "builtins.str", self.line, n.col_offset) # Bytes(bytes s)
codereview_new_python_data_8742
def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_int_expr(self) -# How mypy uses StrExpr, BytesExpr, and UnicodeExpr: # # b'x' -> BytesExpr # 'x', u'x' -> StrExpr ```suggestion # How mypy uses StrExpr and BytesExpr: ``` def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_int_expr(self) +# How mypy uses StrExpr and BytesExpr: # # b'x' -> BytesExpr # 'x', u'x' -> StrExpr
codereview_new_python_data_8743
from mypy.test.typefixture import TypeFixture from mypy.constraints import infer_constraints, SUBTYPE_OF, SUPERTYPE_OF, Constraint class ConstraintsSuite(Suite): def setUp(self) -> None: self.fx = TypeFixture() To fix `flake8`: ```suggestion class ConstraintsSuite(Suite): ``` from mypy.test.typefixture import TypeFixture from mypy.constraints import infer_constraints, SUBTYPE_OF, SUPERTYPE_OF, Constraint + class ConstraintsSuite(Suite): def setUp(self) -> None: self.fx = TypeFixture()
codereview_new_python_data_8744
def visit_none_type(self, left: NoneType) -> bool: # None is compatible with Hashable (and other similar protocols). This is # slightly sloppy since we don't check the signature of "__hash__". # None is also compatible with `SupportsStr` protocol. - supported_members = frozenset(("__hash__", "__str__")) return not members or all(member in supported_members for member in members) return False else: Not sure how important it is to micro-optimize this, but maybe move the frozenset into a global constant? def visit_none_type(self, left: NoneType) -> bool: # None is compatible with Hashable (and other similar protocols). This is # slightly sloppy since we don't check the signature of "__hash__". # None is also compatible with `SupportsStr` protocol. return not members or all(member in supported_members for member in members) return False else:
codereview_new_python_data_8745
def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str]) def check_unbound_return_typevar(self, typ: CallableType) -> None: """Fails when the return typevar is not defined in arguments.""" - arg_type_visitor = CollectArgTypes() - for argtype in typ.arg_types: - argtype.accept(arg_type_visitor) - if typ.ret_type not in arg_type_visitor.arg_types and typ.ret_type in typ.variables: - self.fail(message_registry.UNBOUND_TYPEVAR, typ.ret_type, code=TYPE_VAR) def check_default_args(self, item: FuncItem, body_is_trivial: bool) -> None: for arg in item.arguments: Could we micro optimise this by first checking `if typ.ret_type in typ.variables` and only then doing the collection of arg types? def check_func_def(self, defn: FuncItem, typ: CallableType, name: Optional[str]) def check_unbound_return_typevar(self, typ: CallableType) -> None: """Fails when the return typevar is not defined in arguments.""" + if (typ.ret_type in typ.variables): + arg_type_visitor = CollectArgTypes() + for argtype in typ.arg_types: + argtype.accept(arg_type_visitor) + if typ.ret_type not in arg_type_visitor.arg_types: + self.fail(message_registry.UNBOUND_TYPEVAR, typ.ret_type, code=TYPE_VAR) def check_default_args(self, item: FuncItem, body_is_trivial: bool) -> None: for arg in item.arguments:
codereview_new_python_data_8746
def generate_stub_for_c_module(module_name: str, if name not in done and not inspect.ismodule(obj): type_str = strip_or_import(get_type_fullname(type(obj)), module, imports) variables.append(f'{name}: {type_str}') - output = list(sorted(set(imports))) for line in variables: output.append(line) for line in types: `sorted()` already returns a list ```suggestion output = sorted(set(imports)) ``` def generate_stub_for_c_module(module_name: str, if name not in done and not inspect.ismodule(obj): type_str = strip_or_import(get_type_fullname(type(obj)), module, imports) variables.append(f'{name}: {type_str}') + output = sorted(set(imports)) for line in variables: output.append(line) for line in types:
codereview_new_python_data_8747
def normalize_file_output(content: List[str], current_abs_path: str) -> List[str def find_test_files(pattern: str, exclude: Optional[List[str]] = None) -> List[str]: return [ - str(x.name) - for x in (pathlib.Path("./test-data/unit").rglob(pattern)) - if str(x.name) not in (exclude or []) ] `x.name` is already a string, you can remove the conversion. def normalize_file_output(content: List[str], current_abs_path: str) -> List[str def find_test_files(pattern: str, exclude: Optional[List[str]] = None) -> List[str]: return [ + path.name + for path in (pathlib.Path("./test-data/unit").rglob(pattern)) + if path.name not in (exclude or []) ]
codereview_new_python_data_8748
def num_skipped_suffix_lines(a1: List[str], a2: List[str]) -> int: def testfile_pyversion(path: str) -> Tuple[int, int]: - if path.endswith('python2.test'): - raise ValueError(path) - return defaults.PYTHON2_VERSION - elif path.endswith('python310.test'): return 3, 10 else: return defaults.PYTHON3_VERSION Maybe just remove this case? def num_skipped_suffix_lines(a1: List[str], a2: List[str]) -> int: def testfile_pyversion(path: str) -> Tuple[int, int]: + if path.endswith('python310.test'): return 3, 10 else: return defaults.PYTHON3_VERSION
codereview_new_python_data_8749
def set_strict_flags() -> None: if options.python_version < (3,): parser.error( "Mypy no longer supports checking Python 2 code. " - "Consider pinning to mypy<=0.971 if you need to check Python 2 code." ) try: infer_python_executable(options, special_opts) ```suggestion "Consider pinning to mypy<0.980 if you need to check Python 2 code." ``` def set_strict_flags() -> None: if options.python_version < (3,): parser.error( "Mypy no longer supports checking Python 2 code. " + "Consider pinning to mypy<0.980 if you need to check Python 2 code." ) try: infer_python_executable(options, special_opts)