diff --git a/.editorconfig b/.editorconfig index 560067027c52..1e8be1006aa9 100644 --- a/.editorconfig +++ b/.editorconfig @@ -1,6 +1,6 @@ root = true -[*.{py,pyi,c,cpp,h,rst,md,yml,json,test}] +[*.{py,pyi,c,cpp,h,rst,md,yml,yaml,json,test}] trim_trailing_whitespace = true insert_final_newline = true indent_style = space @@ -8,5 +8,5 @@ indent_style = space [*.{py,pyi,c,h,json,test}] indent_size = 4 -[*.yml] +[*.{yml,yaml}] indent_size = 2 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0de686b7eb01..d794c780ad3b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ repos: hooks: - id: black - repo: https://github.com/pycqa/isort - rev: 5.11.4 # must match test-requirements.txt + rev: 5.11.5 # must match test-requirements.txt hooks: - id: isort - repo: https://github.com/pycqa/flake8 diff --git a/build-requirements.txt b/build-requirements.txt index 52c518d53bc2..0b1e6d43103a 100644 --- a/build-requirements.txt +++ b/build-requirements.txt @@ -2,4 +2,4 @@ -r mypy-requirements.txt types-psutil types-setuptools -types-typed-ast>=1.5.8,<1.6.0 +types-typed-ast>=1.5.8.5,<1.6.0 diff --git a/docs/source/common_issues.rst b/docs/source/common_issues.rst index afb8e7d3ffe1..e6570b7eef5b 100644 --- a/docs/source/common_issues.rst +++ b/docs/source/common_issues.rst @@ -186,7 +186,7 @@ Ignoring a whole file * To only ignore errors, use a top-level ``# mypy: ignore-errors`` comment instead. * To only ignore errors with a specific error code, use a top-level - ``# mypy: disable-error-code=...`` comment. + ``# mypy: disable-error-code="..."`` comment. Example: ``# mypy: disable-error-code="truthy-bool, ignore-without-code"`` * To replace the contents of a module with ``Any``, use a per-module ``follow_imports = skip``. See :ref:`Following imports ` for details. diff --git a/docs/source/error_code_list.rst b/docs/source/error_code_list.rst index 0388cd2165dd..1f08a36414e9 100644 --- a/docs/source/error_code_list.rst +++ b/docs/source/error_code_list.rst @@ -344,7 +344,7 @@ Check that assignment target is not a method [method-assign] In general, assigning to a method on class object or instance (a.k.a. monkey-patching) is ambiguous in terms of types, since Python's static type -system cannot express difference between bound and unbound callable types. +system cannot express the difference between bound and unbound callable types. Consider this example: .. code-block:: python @@ -355,18 +355,18 @@ Consider this example: def h(self: A) -> None: pass - A.f = h # type of h is Callable[[A], None] - A().f() # this works - A.f = A().g # type of A().g is Callable[[], None] - A().f() # but this also works at runtime + A.f = h # Type of h is Callable[[A], None] + A().f() # This works + A.f = A().g # Type of A().g is Callable[[], None] + A().f() # ...but this also works at runtime To prevent the ambiguity, mypy will flag both assignments by default. If this -error code is disabled, mypy will treat all method assignments r.h.s. as unbound, -so the second assignment will still generate an error. +error code is disabled, mypy will treat the assigned value in all method assignments as unbound, +so only the second assignment will still generate an error. .. note:: - This error code is a sub-error code of a wider ``[assignment]`` code. + This error code is a subcode of the more general ``[assignment]`` code. Check type variable values [type-var] ------------------------------------- @@ -456,11 +456,11 @@ Example: Check TypedDict items [typeddict-item] -------------------------------------- -When constructing a ``TypedDict`` object, mypy checks that each key and value is compatible -with the ``TypedDict`` type that is inferred from the surrounding context. +When constructing a TypedDict object, mypy checks that each key and value is compatible +with the TypedDict type that is inferred from the surrounding context. -When getting a ``TypedDict`` item, mypy checks that the key -exists. When assigning to a ``TypedDict``, mypy checks that both the +When getting a TypedDict item, mypy checks that the key +exists. When assigning to a TypedDict, mypy checks that both the key and the value are valid. Example: @@ -480,10 +480,13 @@ Example: Check TypedDict Keys [typeddict-unknown-key] -------------------------------------------- -When constructing a ``TypedDict`` object, mypy checks whether the definition -contains unknown keys. For convenience's sake, mypy will not generate an error -when a ``TypedDict`` has extra keys if it's passed to a function as an argument. -However, it will generate an error when these are created. Example: +When constructing a TypedDict object, mypy checks whether the +definition contains unknown keys, to catch invalid keys and +misspellings. On the other hand, mypy will not generate an error when +a previously constructed TypedDict value with extra keys is passed +to a function as an argument, since TypedDict values support +structural subtyping ("static duck typing") and the keys are assumed +to have been validated at the point of construction. Example: .. code-block:: python @@ -502,13 +505,13 @@ However, it will generate an error when these are created. Example: a: Point = {"x": 1, "y": 4} b: Point3D = {"x": 2, "y": 5, "z": 6} - # OK - add_x_coordinates(a, b) + add_x_coordinates(a, b) # OK + # Error: Extra key "z" for TypedDict "Point" [typeddict-unknown-key] add_x_coordinates(a, {"x": 1, "y": 4, "z": 5}) - -Setting an unknown value on a ``TypedDict`` will also generate this error: +Setting a TypedDict item using an unknown key will also generate this +error, since it could be a misspelling: .. code-block:: python @@ -516,9 +519,9 @@ Setting an unknown value on a ``TypedDict`` will also generate this error: # Error: Extra key "z" for TypedDict "Point" [typeddict-unknown-key] a["z"] = 3 - -Whereas reading an unknown value will generate the more generic/serious -``typeddict-item``: +Reading an unknown key will generate the more general (and serious) +``typeddict-item`` error, which is likely to result in an exception at +runtime: .. code-block:: python @@ -528,7 +531,7 @@ Whereas reading an unknown value will generate the more generic/serious .. note:: - This error code is a sub-error code of a wider ``[typeddict-item]`` code. + This error code is a subcode of the wider ``[typeddict-item]`` code. Check that type of target is known [has-type] --------------------------------------------- @@ -810,8 +813,8 @@ Check that literal is used where expected [literal-required] There are some places where only a (string) literal value is expected for the purposes of static type checking, for example a ``TypedDict`` key, or a ``__match_args__`` item. Providing a ``str``-valued variable in such contexts -will result in an error. Note however, in many cases you can use ``Final``, -or ``Literal`` variables, for example: +will result in an error. Note that in many cases you can also use ``Final`` +or ``Literal`` variables. Example: .. code-block:: python diff --git a/docs/source/error_codes.rst b/docs/source/error_codes.rst index 34bb8ab6b5e1..c8a2728b5697 100644 --- a/docs/source/error_codes.rst +++ b/docs/source/error_codes.rst @@ -114,13 +114,13 @@ So one can e.g. enable some code globally, disable it for all tests in the corresponding config section, and then re-enable it with an inline comment in some specific test. -Sub-error codes of other error codes ------------------------------------- +Subcodes of error codes +----------------------- -In rare cases (mostly for backwards compatibility reasons), some error -code may be covered by another, wider error code. For example, an error with +In some cases, mostly for backwards compatibility reasons, an error +code may be covered also by another, wider error code. For example, an error with code ``[method-assign]`` can be ignored by ``# type: ignore[assignment]``. Similar logic works for disabling error codes globally. If a given error code -is a sub code of another one, it must mentioned in the docs for the narrower -code. This hierarchy is not nested, there cannot be sub-error codes of other -sub-error codes. +is a subcode of another one, it will be mentioned in the documentation for the narrower +code. This hierarchy is not nested: there cannot be subcodes of other +subcodes. diff --git a/docs/source/existing_code.rst b/docs/source/existing_code.rst index 410d7af0c350..c66008f4b782 100644 --- a/docs/source/existing_code.rst +++ b/docs/source/existing_code.rst @@ -183,7 +183,7 @@ An excellent goal to aim for is to have your codebase pass when run against ``my This basically ensures that you will never have a type related error without an explicit circumvention somewhere (such as a ``# type: ignore`` comment). -The following config is equivalent to ``--strict`` (as of mypy 0.990): +The following config is equivalent to ``--strict`` (as of mypy 1.0): .. code-block:: text @@ -191,7 +191,6 @@ The following config is equivalent to ``--strict`` (as of mypy 0.990): warn_unused_configs = True warn_redundant_casts = True warn_unused_ignores = True - no_implicit_optional = True # Getting these passing should be easy strict_equality = True diff --git a/misc/convert-cache.py b/misc/convert-cache.py index e5da9c2650d5..2a8a9579c11b 100755 --- a/misc/convert-cache.py +++ b/misc/convert-cache.py @@ -8,6 +8,7 @@ from __future__ import annotations import os +import re import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -36,15 +37,23 @@ def main() -> None: input_dir = args.input_dir output_dir = args.output_dir or input_dir + assert os.path.isdir(output_dir), f"{output_dir} is not a directory" if args.to_sqlite: input: MetadataStore = FilesystemMetadataStore(input_dir) output: MetadataStore = SqliteMetadataStore(output_dir) else: + fnam = os.path.join(input_dir, "cache.db") + msg = f"{fnam} does not exist" + if not re.match(r"[0-9]+\.[0-9]+$", os.path.basename(input_dir)): + msg += f" (are you missing Python version at the end, e.g. {input_dir}/3.11)" + assert os.path.isfile(fnam), msg input, output = SqliteMetadataStore(input_dir), FilesystemMetadataStore(output_dir) for s in input.list_all(): if s.endswith(".json"): - assert output.write(s, input.read(s), input.getmtime(s)), "Failed to write cache file!" + assert output.write( + s, input.read(s), input.getmtime(s) + ), f"Failed to write cache file {s}!" output.commit() diff --git a/misc/sync-typeshed.py b/misc/sync-typeshed.py index 86b0fd774e0c..813ced68a1b3 100644 --- a/misc/sync-typeshed.py +++ b/misc/sync-typeshed.py @@ -179,9 +179,10 @@ def main() -> None: print("Created typeshed sync commit.") commits_to_cherry_pick = [ - "874afd970", # LiteralString reverts - "3a240111e", # sum reverts - "f968d6ce0", # ctypes reverts + "250e6fda7", # LiteralString reverts + "a633a7283", # sum reverts + "1ceeb1fdb", # ctypes reverts + "e1a82a2b8", # ParamSpec for functools.wraps ] for commit in commits_to_cherry_pick: subprocess.run(["git", "cherry-pick", commit], check=True) diff --git a/mypy/api.py b/mypy/api.py index 589bfbbfa1a7..612fd0442276 100644 --- a/mypy/api.py +++ b/mypy/api.py @@ -47,7 +47,7 @@ import sys from io import StringIO -from typing import Callable, TextIO, cast +from typing import Callable, TextIO def _run(main_wrapper: Callable[[TextIO, TextIO], None]) -> tuple[str, str, int]: @@ -59,7 +59,8 @@ def _run(main_wrapper: Callable[[TextIO, TextIO], None]) -> tuple[str, str, int] main_wrapper(stdout, stderr) exit_status = 0 except SystemExit as system_exit: - exit_status = cast(int, system_exit.code) + assert isinstance(system_exit.code, int) + exit_status = system_exit.code return stdout.getvalue(), stderr.getvalue(), exit_status diff --git a/mypy/build.py b/mypy/build.py index a4817d1866c7..e36535a1aa80 100644 --- a/mypy/build.py +++ b/mypy/build.py @@ -705,8 +705,8 @@ def __init__( self.quickstart_state = read_quickstart_file(options, self.stdout) # Fine grained targets (module top levels and top level functions) processed by # the semantic analyzer, used only for testing. Currently used only by the new - # semantic analyzer. - self.processed_targets: list[str] = [] + # semantic analyzer. Tuple of module and target name. + self.processed_targets: list[tuple[str, str]] = [] # Missing stub packages encountered. self.missing_stub_packages: set[str] = set() # Cache for mypy ASTs that have completed semantic analysis @@ -919,7 +919,7 @@ def stats_summary(self) -> Mapping[str, object]: def deps_to_json(x: dict[str, set[str]]) -> str: - return json.dumps({k: list(v) for k, v in x.items()}) + return json.dumps({k: list(v) for k, v in x.items()}, separators=(",", ":")) # File for storing metadata about all the fine-grained dependency caches @@ -987,7 +987,7 @@ def write_deps_cache( meta = {"snapshot": meta_snapshot, "deps_meta": fg_deps_meta} - if not metastore.write(DEPS_META_FILE, json.dumps(meta)): + if not metastore.write(DEPS_META_FILE, json.dumps(meta, separators=(",", ":"))): manager.log(f"Error writing fine-grained deps meta JSON file {DEPS_META_FILE}") error = True @@ -1055,7 +1055,8 @@ def generate_deps_for_cache(manager: BuildManager, graph: Graph) -> dict[str, di def write_plugins_snapshot(manager: BuildManager) -> None: """Write snapshot of versions and hashes of currently active plugins.""" - if not manager.metastore.write(PLUGIN_SNAPSHOT_FILE, json.dumps(manager.plugins_snapshot)): + snapshot = json.dumps(manager.plugins_snapshot, separators=(",", ":")) + if not manager.metastore.write(PLUGIN_SNAPSHOT_FILE, snapshot): manager.errors.set_file(_cache_dir_prefix(manager.options), None, manager.options) manager.errors.report(0, 0, "Error writing plugins snapshot", blocker=True) @@ -1487,7 +1488,7 @@ def validate_meta( if manager.options.debug_cache: meta_str = json.dumps(meta_dict, indent=2, sort_keys=True) else: - meta_str = json.dumps(meta_dict) + meta_str = json.dumps(meta_dict, separators=(",", ":")) meta_json, _, _ = get_cache_names(id, path, manager.options) manager.log( "Updating mtime for {}: file {}, meta {}, mtime {}".format( @@ -1517,7 +1518,7 @@ def json_dumps(obj: Any, debug_cache: bool) -> str: if debug_cache: return json.dumps(obj, indent=2, sort_keys=True) else: - return json.dumps(obj, sort_keys=True) + return json.dumps(obj, sort_keys=True, separators=(",", ":")) def write_cache( @@ -2410,6 +2411,12 @@ def finish_passes(self) -> None: manager.report_file(self.tree, self.type_map(), self.options) self.update_fine_grained_deps(self.manager.fg_deps) + + if manager.options.export_ref_info: + write_undocumented_ref_info( + self, manager.metastore, manager.options, self.type_map() + ) + self.free_state() if not manager.options.fine_grained_incremental and not manager.options.preserve_asts: free_tree(self.tree) @@ -2941,6 +2948,7 @@ def dispatch(sources: list[BuildSource], manager: BuildManager, stdout: TextIO) dump_all_dependencies( manager.modules, manager.all_types, manager.options.python_version, manager.options ) + return graph @@ -3616,3 +3624,24 @@ def is_silent_import_module(manager: BuildManager, path: str) -> bool: is_sub_path(path, dir) for dir in manager.search_paths.package_path + manager.search_paths.typeshed_path ) + + +def write_undocumented_ref_info( + state: State, metastore: MetadataStore, options: Options, type_map: dict[Expression, Type] +) -> None: + # This exports some dependency information in a rather ad-hoc fashion, which + # can be helpful for some tools. This is all highly experimental and could be + # removed at any time. + + from mypy.refinfo import get_undocumented_ref_info_json + + if not state.tree: + # We need a full AST for this. + return + + _, data_file, _ = get_cache_names(state.id, state.xpath, options) + ref_info_file = ".".join(data_file.split(".")[:-2]) + ".refs.json" + assert not ref_info_file.startswith(".") + + deps_json = get_undocumented_ref_info_json(state.tree, type_map) + metastore.write(ref_info_file, json.dumps(deps_json, separators=(",", ":"))) diff --git a/mypy/checker.py b/mypy/checker.py index 4bf009f74092..dbd6db796ffe 100644 --- a/mypy/checker.py +++ b/mypy/checker.py @@ -629,7 +629,8 @@ def _visit_overloaded_func_def(self, defn: OverloadedFuncDef) -> None: if defn.is_property: # HACK: Infer the type of the property. - self.visit_decorator(cast(Decorator, defn.items[0])) + assert isinstance(defn.items[0], Decorator) + self.visit_decorator(defn.items[0]) for fdef in defn.items: assert isinstance(fdef, Decorator) self.check_func_item(fdef.func, name=fdef.func.name, allow_empty=True) @@ -1753,8 +1754,7 @@ def expand_typevars( result: list[tuple[FuncItem, CallableType]] = [] for substitutions in itertools.product(*subst): mapping = dict(substitutions) - expanded = cast(CallableType, expand_type(typ, mapping)) - result.append((expand_func(defn, mapping), expanded)) + result.append((expand_func(defn, mapping), expand_type(typ, mapping))) return result else: return [(defn, typ)] @@ -1876,23 +1876,6 @@ def check_method_override_for_base_with_name( original_class_or_static = False # a variable can't be class or static if isinstance(original_type, FunctionLike): - active_self_type = self.scope.active_self_type() - if isinstance(original_type, Overloaded) and active_self_type: - # If we have an overload, filter to overloads that match the self type. - # This avoids false positives for concrete subclasses of generic classes, - # see testSelfTypeOverrideCompatibility for an example. - # It's possible we might want to do this as part of bind_and_map_method - filtered_items = [ - item - for item in original_type.items - if not item.arg_types or is_subtype(active_self_type, item.arg_types[0]) - ] - # If we don't have any filtered_items, maybe it's always a valid override - # of the superclass? However if you get to that point you're in murky type - # territory anyway, so we just preserve the type and have the behaviour match - # that of older versions of mypy. - if filtered_items: - original_type = Overloaded(filtered_items) original_type = self.bind_and_map_method(base_attr, original_type, defn.info, base) if original_node and is_property(original_node): original_type = get_property_type(original_type) @@ -1964,10 +1947,28 @@ def bind_and_map_method( is_class_method = sym.node.func.is_class else: is_class_method = sym.node.is_class - bound = bind_self(typ, self.scope.active_self_type(), is_class_method) + + mapped_typ = cast(FunctionLike, map_type_from_supertype(typ, sub_info, super_info)) + active_self_type = self.scope.active_self_type() + if isinstance(mapped_typ, Overloaded) and active_self_type: + # If we have an overload, filter to overloads that match the self type. + # This avoids false positives for concrete subclasses of generic classes, + # see testSelfTypeOverrideCompatibility for an example. + filtered_items = [ + item + for item in mapped_typ.items + if not item.arg_types or is_subtype(active_self_type, item.arg_types[0]) + ] + # If we don't have any filtered_items, maybe it's always a valid override + # of the superclass? However if you get to that point you're in murky type + # territory anyway, so we just preserve the type and have the behaviour match + # that of older versions of mypy. + if filtered_items: + mapped_typ = Overloaded(filtered_items) + + return bind_self(mapped_typ, active_self_type, is_class_method) else: - bound = typ - return cast(FunctionLike, map_type_from_supertype(bound, sub_info, super_info)) + return cast(FunctionLike, map_type_from_supertype(typ, sub_info, super_info)) def get_op_other_domain(self, tp: FunctionLike) -> Type | None: if isinstance(tp, CallableType): @@ -2484,7 +2485,14 @@ class C(B, A[int]): ... # this is unsafe because... first_type = get_proper_type(self.determine_type_of_member(first)) second_type = get_proper_type(self.determine_type_of_member(second)) - if isinstance(first_type, FunctionLike) and isinstance(second_type, FunctionLike): + # start with the special case that Instance can be a subtype of FunctionLike + call = None + if isinstance(first_type, Instance): + call = find_member("__call__", first_type, first_type, is_operator=True) + if call and isinstance(second_type, FunctionLike): + second_sig = self.bind_and_map_method(second, second_type, ctx, base2) + ok = is_subtype(call, second_sig, ignore_pos_arg_names=True) + elif isinstance(first_type, FunctionLike) and isinstance(second_type, FunctionLike): if first_type.is_type_obj() and second_type.is_type_obj(): # For class objects only check the subtype relationship of the classes, # since we allow incompatible overrides of '__init__'/'__new__' @@ -2576,9 +2584,8 @@ def check_import(self, node: ImportBase) -> None: if lvalue_type is None: # TODO: This is broken. lvalue_type = AnyType(TypeOfAny.special_form) - message = message_registry.INCOMPATIBLE_IMPORT_OF.format( - cast(NameExpr, assign.rvalue).name - ) + assert isinstance(assign.rvalue, NameExpr) + message = message_registry.INCOMPATIBLE_IMPORT_OF.format(assign.rvalue.name) self.check_simple_assignment( lvalue_type, assign.rvalue, @@ -3299,14 +3306,14 @@ def check_assignment_to_multiple_lvalues( rvalues.extend([TempNode(typ) for typ in typs.items]) elif self.type_is_iterable(typs) and isinstance(typs, Instance): if iterable_type is not None and iterable_type != self.iterable_item_type( - typs + typs, rvalue ): self.fail(message_registry.CONTIGUOUS_ITERABLE_EXPECTED, context) else: if last_idx is None or last_idx + 1 == idx_rval: rvalues.append(rval) last_idx = idx_rval - iterable_type = self.iterable_item_type(typs) + iterable_type = self.iterable_item_type(typs, rvalue) else: self.fail(message_registry.CONTIGUOUS_ITERABLE_EXPECTED, context) else: @@ -3632,8 +3639,10 @@ def check_multi_assignment_from_iterable( infer_lvalue_type: bool = True, ) -> None: rvalue_type = get_proper_type(rvalue_type) - if self.type_is_iterable(rvalue_type) and isinstance(rvalue_type, Instance): - item_type = self.iterable_item_type(rvalue_type) + if self.type_is_iterable(rvalue_type) and isinstance( + rvalue_type, (Instance, CallableType, TypeType, Overloaded) + ): + item_type = self.iterable_item_type(rvalue_type, context) for lv in lvalues: if isinstance(lv, StarExpr): items_type = self.named_generic_type("builtins.list", [item_type]) @@ -3656,8 +3665,8 @@ def check_lvalue(self, lvalue: Lvalue) -> tuple[Type | None, IndexExpr | None, V not isinstance(lvalue, NameExpr) or isinstance(lvalue.node, Var) ): if isinstance(lvalue, NameExpr): - inferred = cast(Var, lvalue.node) - assert isinstance(inferred, Var) + assert isinstance(lvalue.node, Var) + inferred = lvalue.node else: assert isinstance(lvalue, MemberExpr) self.expr_checker.accept(lvalue.expr) @@ -4047,7 +4056,7 @@ def check_indexed_assignment( ) lvalue.method_type = method_type - self.expr_checker.check_method_call( + res_type, _ = self.expr_checker.check_method_call( "__setitem__", basetype, method_type, @@ -4055,6 +4064,9 @@ def check_indexed_assignment( [nodes.ARG_POS, nodes.ARG_POS], context, ) + res_type = get_proper_type(res_type) + if isinstance(res_type, UninhabitedType) and not res_type.ambiguous: + self.binder.unreachable() def try_infer_partial_type_from_indexed_assignment( self, lvalue: IndexExpr, rvalue: Expression @@ -4980,7 +4992,8 @@ def intersect_instance_callable(self, typ: Instance, callable_type: CallableType # In order for this to work in incremental mode, the type we generate needs to # have a valid fullname and a corresponding entry in a symbol table. We generate # a unique name inside the symbol table of the current module. - cur_module = cast(MypyFile, self.scope.stack[0]) + cur_module = self.scope.stack[0] + assert isinstance(cur_module, MypyFile) gen_name = gen_unique_name(f"", cur_module.names) # Synthesize a fake TypeInfo @@ -6192,7 +6205,8 @@ def lookup(self, name: str) -> SymbolTableNode: else: b = self.globals.get("__builtins__", None) if b: - table = cast(MypyFile, b.node).names + assert isinstance(b.node, MypyFile) + table = b.node.names if name in table: return table[name] raise KeyError(f"Failed lookup: {name}") @@ -6206,7 +6220,8 @@ def lookup_qualified(self, name: str) -> SymbolTableNode: for i in range(1, len(parts) - 1): sym = n.names.get(parts[i]) assert sym is not None, "Internal error: attempted lookup of unknown name" - n = cast(MypyFile, sym.node) + assert isinstance(sym.node, MypyFile) + n = sym.node last = parts[-1] if last in n.names: return n.names[last] @@ -6384,23 +6399,18 @@ def note( return self.msg.note(msg, context, offset=offset, code=code) - def iterable_item_type(self, instance: Instance) -> Type: - iterable = map_instance_to_supertype(instance, self.lookup_typeinfo("typing.Iterable")) - item_type = iterable.args[0] - if not isinstance(get_proper_type(item_type), AnyType): - # This relies on 'map_instance_to_supertype' returning 'Iterable[Any]' - # in case there is no explicit base class. - return item_type + def iterable_item_type( + self, it: Instance | CallableType | TypeType | Overloaded, context: Context + ) -> Type: + if isinstance(it, Instance): + iterable = map_instance_to_supertype(it, self.lookup_typeinfo("typing.Iterable")) + item_type = iterable.args[0] + if not isinstance(get_proper_type(item_type), AnyType): + # This relies on 'map_instance_to_supertype' returning 'Iterable[Any]' + # in case there is no explicit base class. + return item_type # Try also structural typing. - iter_type = get_proper_type(find_member("__iter__", instance, instance, is_operator=True)) - if iter_type and isinstance(iter_type, CallableType): - ret_type = get_proper_type(iter_type.ret_type) - if isinstance(ret_type, Instance): - iterator = map_instance_to_supertype( - ret_type, self.lookup_typeinfo("typing.Iterator") - ) - item_type = iterator.args[0] - return item_type + return self.analyze_iterable_item_type_without_expression(it, context)[1] def function_type(self, func: FuncBase) -> FunctionLike: return function_type(func, self.named_type("builtins.function")) @@ -6517,7 +6527,8 @@ def is_writable_attribute(self, node: Node) -> bool: return False return True elif isinstance(node, OverloadedFuncDef) and node.is_property: - first_item = cast(Decorator, node.items[0]) + first_item = node.items[0] + assert isinstance(first_item, Decorator) return first_item.var.is_settable_property return False @@ -6768,6 +6779,7 @@ def conditional_types( def conditional_types_to_typemaps( expr: Expression, yes_type: Type | None, no_type: Type | None ) -> tuple[TypeMap, TypeMap]: + expr = collapse_walrus(expr) maps: list[TypeMap] = [] for typ in (yes_type, no_type): proper_type = get_proper_type(typ) @@ -7112,7 +7124,6 @@ def overload_can_never_match(signature: CallableType, other: CallableType) -> bo exp_signature = expand_type( signature, {tvar.id: erase_def_to_union_or_bound(tvar) for tvar in signature.variables} ) - assert isinstance(exp_signature, CallableType) return is_callable_compatible( exp_signature, other, is_compat=is_more_precise, ignore_return=True ) diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py index 38b5c2419d95..a053111bb223 100644 --- a/mypy/checkexpr.py +++ b/mypy/checkexpr.py @@ -4638,7 +4638,11 @@ def _super_arg_types(self, e: SuperExpr) -> Type | tuple[Type, Type]: return type_type, instance_type def visit_slice_expr(self, e: SliceExpr) -> Type: - expected = make_optional_type(self.named_type("builtins.int")) + try: + supports_index = self.chk.named_type("typing_extensions.SupportsIndex") + except KeyError: + supports_index = self.chk.named_type("builtins.int") # thanks, fixture life + expected = make_optional_type(supports_index) for index in [e.begin_index, e.end_index, e.stride]: if index: t = self.accept(index) @@ -5518,7 +5522,7 @@ def merge_typevars_in_callables_by_name( variables.append(tv) rename[tv.id] = unique_typevars[name] - target = cast(CallableType, expand_type(target, rename)) + target = expand_type(target, rename) output.append(target) return output, variables diff --git a/mypy/checkmember.py b/mypy/checkmember.py index a2c580e13446..2ba917715be0 100644 --- a/mypy/checkmember.py +++ b/mypy/checkmember.py @@ -312,7 +312,8 @@ def analyze_instance_member_access( if method.is_property: assert isinstance(method, OverloadedFuncDef) - first_item = cast(Decorator, method.items[0]) + first_item = method.items[0] + assert isinstance(first_item, Decorator) return analyze_var(name, first_item.var, typ, info, mx) if mx.is_lvalue: mx.msg.cant_assign_to_method(mx.context) @@ -412,6 +413,13 @@ def analyze_type_type_member_access( upper_bound = get_proper_type(typ.item.upper_bound) if isinstance(upper_bound, Instance): item = upper_bound + elif isinstance(upper_bound, UnionType): + return _analyze_member_access( + name, + TypeType.make_normalized(upper_bound, line=typ.line, column=typ.column), + mx, + override_info, + ) elif isinstance(upper_bound, TupleType): item = tuple_fallback(upper_bound) elif isinstance(upper_bound, AnyType): @@ -1150,7 +1158,7 @@ class B(A[str]): pass t = freshen_all_functions_type_vars(t) t = bind_self(t, original_type, is_classmethod=True) assert isuper is not None - t = cast(CallableType, expand_type_by_instance(t, isuper)) + t = expand_type_by_instance(t, isuper) freeze_all_type_vars(t) return t.copy_modified(variables=list(tvars) + list(t.variables)) elif isinstance(t, Overloaded): diff --git a/mypy/checkpattern.py b/mypy/checkpattern.py index 603b392eee29..e60ed8a11711 100644 --- a/mypy/checkpattern.py +++ b/mypy/checkpattern.py @@ -15,7 +15,7 @@ from mypy.maptype import map_instance_to_supertype from mypy.meet import narrow_declared_type from mypy.messages import MessageBuilder -from mypy.nodes import ARG_POS, Expression, NameExpr, TypeAlias, TypeInfo, Var +from mypy.nodes import ARG_POS, Context, Expression, NameExpr, TypeAlias, TypeInfo, Var from mypy.patterns import ( AsPattern, ClassPattern, @@ -242,7 +242,7 @@ def visit_sequence_pattern(self, o: SequencePattern) -> PatternType: elif size_diff > 0 and star_position is None: return self.early_non_match() else: - inner_type = self.get_sequence_type(current_type) + inner_type = self.get_sequence_type(current_type, o) if inner_type is None: inner_type = self.chk.named_type("builtins.object") inner_types = [inner_type] * len(o.patterns) @@ -309,12 +309,12 @@ def visit_sequence_pattern(self, o: SequencePattern) -> PatternType: new_type = current_type return PatternType(new_type, rest_type, captures) - def get_sequence_type(self, t: Type) -> Type | None: + def get_sequence_type(self, t: Type, context: Context) -> Type | None: t = get_proper_type(t) if isinstance(t, AnyType): return AnyType(TypeOfAny.from_another_any, t) if isinstance(t, UnionType): - items = [self.get_sequence_type(item) for item in t.items] + items = [self.get_sequence_type(item, context) for item in t.items] not_none_items = [item for item in items if item is not None] if len(not_none_items) > 0: return make_simplified_union(not_none_items) @@ -324,7 +324,7 @@ def get_sequence_type(self, t: Type) -> Type | None: if self.chk.type_is_iterable(t) and isinstance(t, (Instance, TupleType)): if isinstance(t, TupleType): t = tuple_fallback(t) - return self.chk.iterable_item_type(t) + return self.chk.iterable_item_type(t, context) else: return None diff --git a/mypy/constant_fold.py b/mypy/constant_fold.py index a22c1b9ba9e5..a1011397eba8 100644 --- a/mypy/constant_fold.py +++ b/mypy/constant_fold.py @@ -64,6 +64,8 @@ def constant_fold_expr(expr: Expression, cur_mod_id: str) -> ConstantValue | Non value = constant_fold_expr(expr.expr, cur_mod_id) if isinstance(value, int): return constant_fold_unary_int_op(expr.op, value) + if isinstance(value, float): + return constant_fold_unary_float_op(expr.op, value) return None @@ -110,6 +112,14 @@ def constant_fold_unary_int_op(op: str, value: int) -> int | None: return None +def constant_fold_unary_float_op(op: str, value: float) -> float | None: + if op == "-": + return -value + elif op == "+": + return value + return None + + def constant_fold_binary_str_op(op: str, left: str, right: str) -> str | None: if op == "+": return left + right diff --git a/mypy/constraints.py b/mypy/constraints.py index a8f04094ca63..c8c3c7933b6e 100644 --- a/mypy/constraints.py +++ b/mypy/constraints.py @@ -949,7 +949,7 @@ def visit_callable_type(self, template: CallableType) -> list[Constraint]: ) # TODO: see above "FIX" comments for param_spec is None case - # TODO: this assume positional arguments + # TODO: this assumes positional arguments for t, a in zip(prefix.arg_types, cactual_prefix.arg_types): res.extend(infer_constraints(t, a, neg_op(self.direction))) diff --git a/mypy/dmypy/client.py b/mypy/dmypy/client.py index efa1b5f01288..ee786fdd7436 100644 --- a/mypy/dmypy/client.py +++ b/mypy/dmypy/client.py @@ -665,10 +665,15 @@ def request( return {"error": str(err)} # TODO: Other errors, e.g. ValueError, UnicodeError else: - # Display debugging output written to stdout in the server process for convenience. + # Display debugging output written to stdout/stderr in the server process for convenience. stdout = response.get("stdout") if stdout: sys.stdout.write(stdout) + stderr = response.get("stderr") + if stderr: + print("-" * 79) + print("stderr:") + sys.stdout.write(stderr) return response diff --git a/mypy/dmypy_server.py b/mypy/dmypy_server.py index 7227cd559946..3cc50f4ece36 100644 --- a/mypy/dmypy_server.py +++ b/mypy/dmypy_server.py @@ -215,7 +215,9 @@ def serve(self) -> None: with server: data = receive(server) debug_stdout = io.StringIO() + debug_stderr = io.StringIO() sys.stdout = debug_stdout + sys.stderr = debug_stderr resp: dict[str, Any] = {} if "command" not in data: resp = {"error": "No command found in request"} @@ -233,9 +235,11 @@ def serve(self) -> None: resp = {"error": "Daemon crashed!\n" + "".join(tb)} resp.update(self._response_metadata()) resp["stdout"] = debug_stdout.getvalue() + resp["stderr"] = debug_stderr.getvalue() server.write(json.dumps(resp).encode("utf8")) raise resp["stdout"] = debug_stdout.getvalue() + resp["stderr"] = debug_stderr.getvalue() try: resp.update(self._response_metadata()) server.write(json.dumps(resp).encode("utf8")) diff --git a/mypy/errorcodes.py b/mypy/errorcodes.py index 3d8b1096ed4f..2eb2d5c624b6 100644 --- a/mypy/errorcodes.py +++ b/mypy/errorcodes.py @@ -132,6 +132,9 @@ def __str__(self) -> str: SAFE_SUPER: Final = ErrorCode( "safe-super", "Warn about calls to abstract methods with empty/trivial bodies", "General" ) +TOP_LEVEL_AWAIT: Final = ErrorCode( + "top-level-await", "Warn about top level await experessions", "General" +) # These error codes aren't enabled by default. NO_UNTYPED_DEF: Final[ErrorCode] = ErrorCode( diff --git a/mypy/expandtype.py b/mypy/expandtype.py index 7933283b24d6..21c3a592669e 100644 --- a/mypy/expandtype.py +++ b/mypy/expandtype.py @@ -47,6 +47,13 @@ ) +@overload +def expand_type( + typ: CallableType, env: Mapping[TypeVarId, Type], allow_erased_callables: bool = ... +) -> CallableType: + ... + + @overload def expand_type( typ: ProperType, env: Mapping[TypeVarId, Type], allow_erased_callables: bool = ... @@ -70,6 +77,11 @@ def expand_type( return typ.accept(ExpandTypeVisitor(env, allow_erased_callables)) +@overload +def expand_type_by_instance(typ: CallableType, instance: Instance) -> CallableType: + ... + + @overload def expand_type_by_instance(typ: ProperType, instance: Instance) -> ProperType: ... @@ -133,7 +145,7 @@ def freshen_function_type_vars(callee: F) -> F: tv = ParamSpecType.new_unification_variable(v) tvs.append(tv) tvmap[v.id] = tv - fresh = cast(CallableType, expand_type(callee, tvmap)).copy_modified(variables=tvs) + fresh = expand_type(callee, tvmap).copy_modified(variables=tvs) return cast(F, fresh) else: assert isinstance(callee, Overloaded) @@ -234,7 +246,10 @@ def visit_type_var(self, t: TypeVarType) -> Type: return repl def visit_param_spec(self, t: ParamSpecType) -> Type: - repl = get_proper_type(self.variables.get(t.id, t)) + # set prefix to something empty so we don't duplicate it + repl = get_proper_type( + self.variables.get(t.id, t.copy_modified(prefix=Parameters([], [], []))) + ) if isinstance(repl, Instance): # TODO: what does prefix mean in this case? # TODO: why does this case even happen? Instances aren't plural. @@ -346,7 +361,7 @@ def interpolate_args_for_unpack( ) return (arg_names, arg_kinds, arg_types) - def visit_callable_type(self, t: CallableType) -> Type: + def visit_callable_type(self, t: CallableType) -> CallableType: param_spec = t.param_spec() if param_spec is not None: repl = get_proper_type(self.variables.get(param_spec.id)) @@ -357,7 +372,7 @@ def visit_callable_type(self, t: CallableType) -> Type: # must expand both of them with all the argument types, # kinds and names in the replacement. The return type in # the replacement is ignored. - if isinstance(repl, CallableType) or isinstance(repl, Parameters): + if isinstance(repl, (CallableType, Parameters)): # Substitute *args: P.args, **kwargs: P.kwargs prefix = param_spec.prefix # we need to expand the types in the prefix, so might as well @@ -370,6 +385,23 @@ def visit_callable_type(self, t: CallableType) -> Type: ret_type=t.ret_type.accept(self), type_guard=(t.type_guard.accept(self) if t.type_guard is not None else None), ) + # TODO: Conceptually, the "len(t.arg_types) == 2" should not be here. However, this + # errors without it. Either figure out how to eliminate this or place an + # explanation for why this is necessary. + elif isinstance(repl, ParamSpecType) and len(t.arg_types) == 2: + # We're substituting one paramspec for another; this can mean that the prefix + # changes. (e.g. sub Concatenate[int, P] for Q) + prefix = repl.prefix + old_prefix = param_spec.prefix + + # Check assumptions. I'm not sure what order to place new prefix vs old prefix: + assert not old_prefix.arg_types or not prefix.arg_types + + t = t.copy_modified( + arg_types=prefix.arg_types + old_prefix.arg_types + t.arg_types, + arg_kinds=prefix.arg_kinds + old_prefix.arg_kinds + t.arg_kinds, + arg_names=prefix.arg_names + old_prefix.arg_names + t.arg_names, + ) var_arg = t.var_arg() if var_arg is not None and isinstance(var_arg.typ, UnpackType): @@ -453,9 +485,15 @@ def visit_union_type(self, t: UnionType) -> Type: # After substituting for type variables in t.items, some resulting types # might be subtypes of others, however calling make_simplified_union() # can cause recursion, so we just remove strict duplicates. - return UnionType.make_union( + simplified = UnionType.make_union( remove_trivial(flatten_nested_unions(expanded)), t.line, t.column ) + # This call to get_proper_type() is unfortunate but is required to preserve + # the invariant that ProperType will stay ProperType after applying expand_type(), + # otherwise a single item union of a type alias will break it. Note this should not + # cause infinite recursion since pathological aliases like A = Union[A, B] are + # banned at the semantic analysis level. + return get_proper_type(simplified) def visit_partial_type(self, t: PartialType) -> Type: return t diff --git a/mypy/fastparse.py b/mypy/fastparse.py index ef1fdf61af2e..6ff51e25ad5f 100644 --- a/mypy/fastparse.py +++ b/mypy/fastparse.py @@ -351,8 +351,7 @@ def parse_type_comment( else: extra_ignore = TYPE_IGNORE_PATTERN.match(type_comment) if extra_ignore: - # Typeshed has a non-optional return type for group! - tag: str | None = cast(Any, extra_ignore).group(1) + tag: str | None = extra_ignore.group(1) ignored: list[str] | None = parse_type_ignore_tag(tag) if ignored is None: if errors is not None: @@ -487,11 +486,13 @@ def translate_stmt_list( and self.type_ignores and min(self.type_ignores) < self.get_lineno(stmts[0]) ): - if self.type_ignores[min(self.type_ignores)]: + ignores = self.type_ignores[min(self.type_ignores)] + if ignores: + joined_ignores = ", ".join(ignores) self.fail( ( "type ignore with error code is not supported for modules; " - "use `# mypy: disable-error-code=...`" + f'use `# mypy: disable-error-code="{joined_ignores}"`' ), line=min(self.type_ignores), column=0, @@ -664,7 +665,9 @@ def fix_function_overloads(self, stmts: list[Statement]) -> list[Statement]: if current_overload and current_overload_name == last_if_stmt_overload_name: # Remove last stmt (IfStmt) from ret if the overload names matched # Only happens if no executable block had been found in IfStmt - skipped_if_stmts.append(cast(IfStmt, ret.pop())) + popped = ret.pop() + assert isinstance(popped, IfStmt) + skipped_if_stmts.append(popped) if current_overload and skipped_if_stmts: # Add bare IfStmt (without overloads) to ret # Required for mypy to be able to still check conditions @@ -841,7 +844,7 @@ def translate_module_id(self, id: str) -> str: def visit_Module(self, mod: ast3.Module) -> MypyFile: self.type_ignores = {} for ti in mod.type_ignores: - parsed = parse_type_ignore_tag(ti.tag) # type: ignore[attr-defined] + parsed = parse_type_ignore_tag(ti.tag) if parsed is not None: self.type_ignores[ti.lineno] = parsed else: diff --git a/mypy/fixup.py b/mypy/fixup.py index 7b0f5f433d72..01e4c0a716fc 100644 --- a/mypy/fixup.py +++ b/mypy/fixup.py @@ -170,7 +170,7 @@ def visit_class_def(self, c: ClassDef) -> None: if isinstance(v, TypeVarType): for value in v.values: value.accept(self.type_fixer) - v.upper_bound.accept(self.type_fixer) + v.upper_bound.accept(self.type_fixer) def visit_type_var_expr(self, tv: TypeVarExpr) -> None: for value in tv.values: diff --git a/mypy/main.py b/mypy/main.py index 47dea2ae9797..3f5e02ec3f79 100644 --- a/mypy/main.py +++ b/mypy/main.py @@ -1017,6 +1017,8 @@ def add_invertible_flag( add_invertible_flag( "--allow-empty-bodies", default=False, help=argparse.SUPPRESS, group=internals_group ) + # This undocumented feature exports limited line-level dependency information. + internals_group.add_argument("--export-ref-info", action="store_true", help=argparse.SUPPRESS) report_group = parser.add_argument_group( title="Report generation", description="Generate a report in the specified format." diff --git a/mypy/meet.py b/mypy/meet.py index d99e1a92d2eb..3214b4b43975 100644 --- a/mypy/meet.py +++ b/mypy/meet.py @@ -342,7 +342,22 @@ def _is_overlapping_types(left: Type, right: Type) -> bool: left_possible = get_possible_variants(left) right_possible = get_possible_variants(right) - # We start by checking multi-variant types like Unions first. We also perform + # First handle special cases relating to PEP 612: + # - comparing a `Parameters` to a `Parameters` + # - comparing a `Parameters` to a `ParamSpecType` + # - comparing a `ParamSpecType` to a `ParamSpecType` + # + # These should all always be considered overlapping equality checks. + # These need to be done before we move on to other TypeVarLike comparisons. + if isinstance(left, (Parameters, ParamSpecType)) and isinstance( + right, (Parameters, ParamSpecType) + ): + return True + # A `Parameters` does not overlap with anything else, however + if isinstance(left, Parameters) or isinstance(right, Parameters): + return False + + # Now move on to checking multi-variant types like Unions. We also perform # the same logic if either type happens to be a TypeVar/ParamSpec/TypeVarTuple. # # Handling the TypeVarLikes now lets us simulate having them bind to the corresponding diff --git a/mypy/message_registry.py b/mypy/message_registry.py index e00aca2869bd..130b94c7bf9a 100644 --- a/mypy/message_registry.py +++ b/mypy/message_registry.py @@ -82,7 +82,7 @@ def with_additional_msg(self, info: str) -> ErrorMessage: INCOMPATIBLE_TYPES_IN_CAPTURE: Final = ErrorMessage("Incompatible types in capture pattern") MUST_HAVE_NONE_RETURN_TYPE: Final = ErrorMessage('The return type of "{}" must be None') TUPLE_INDEX_OUT_OF_RANGE: Final = ErrorMessage("Tuple index out of range") -INVALID_SLICE_INDEX: Final = ErrorMessage("Slice index must be an integer or None") +INVALID_SLICE_INDEX: Final = ErrorMessage("Slice index must be an integer, SupportsIndex or None") CANNOT_INFER_LAMBDA_TYPE: Final = ErrorMessage("Cannot infer type of lambda") CANNOT_ACCESS_INIT: Final = ( 'Accessing "__init__" on an instance is unsound, since instance.__init__ could be from' diff --git a/mypy/messages.py b/mypy/messages.py index ba2508033790..2ba34ea4fe12 100644 --- a/mypy/messages.py +++ b/mypy/messages.py @@ -516,6 +516,12 @@ def has_no_attr( context, code=codes.UNION_ATTR, ) + else: + self.fail( + '{} has no attribute "{}"{}'.format(format_type(original_type), member, extra), + context, + code=codes.ATTR_DEFINED, + ) return AnyType(TypeOfAny.from_error) def unsupported_operand_types( diff --git a/mypy/nodes.py b/mypy/nodes.py index 4787930214f3..d5ce8d675127 100644 --- a/mypy/nodes.py +++ b/mypy/nodes.py @@ -20,7 +20,7 @@ Union, cast, ) -from typing_extensions import Final, TypeAlias as _TypeAlias +from typing_extensions import Final, TypeAlias as _TypeAlias, TypeGuard from mypy_extensions import trait @@ -901,6 +901,7 @@ def deserialize(cls, data: JsonDict) -> Decorator: VAR_FLAGS: Final = [ "is_self", + "is_cls", "is_initialized_in_class", "is_staticmethod", "is_classmethod", @@ -935,6 +936,7 @@ class Var(SymbolNode): "type", "final_value", "is_self", + "is_cls", "is_ready", "is_inferred", "is_initialized_in_class", @@ -967,6 +969,8 @@ def __init__(self, name: str, type: mypy.types.Type | None = None) -> None: self.type: mypy.types.Type | None = type # Declared or inferred type, or None # Is this the first argument to an ordinary method (usually "self")? self.is_self = False + # Is this the first argument to a classmethod (typically "cls")? + self.is_cls = False self.is_ready = True # If inferred, is the inferred type available? self.is_inferred = self.type is None # Is this initialized explicitly to a non-None value in class body? @@ -1631,6 +1635,10 @@ def accept(self, visitor: ExpressionVisitor[T]) -> T: return visitor.visit_str_expr(self) +def is_StrExpr_list(seq: list[Expression]) -> TypeGuard[list[StrExpr]]: + return all(isinstance(item, StrExpr) for item in seq) + + class BytesExpr(Expression): """Bytes literal""" @@ -2175,7 +2183,8 @@ def name(self) -> str: def expr(self) -> Expression: """Return the expression (the body) of the lambda.""" - ret = cast(ReturnStmt, self.body.body[-1]) + ret = self.body.body[-1] + assert isinstance(ret, ReturnStmt) expr = ret.expr assert expr is not None # lambda can't have empty body return expr @@ -3074,7 +3083,7 @@ def protocol_members(self) -> list[str]: for base in self.mro[:-1]: # we skip "object" since everyone implements it if base.is_protocol: for name, node in base.names.items(): - if isinstance(node.node, (TypeAlias, TypeVarExpr)): + if isinstance(node.node, (TypeAlias, TypeVarExpr, MypyFile)): # These are auxiliary definitions (and type aliases are prohibited). continue members.add(name) @@ -3906,8 +3915,8 @@ def serialize(self) -> JsonDict: "eq_default": self.eq_default, "order_default": self.order_default, "kw_only_default": self.kw_only_default, - "frozen_only_default": self.frozen_default, - "field_specifiers": self.field_specifiers, + "frozen_default": self.frozen_default, + "field_specifiers": list(self.field_specifiers), } @classmethod @@ -3917,7 +3926,7 @@ def deserialize(cls, data: JsonDict) -> DataclassTransformSpec: order_default=data.get("order_default"), kw_only_default=data.get("kw_only_default"), frozen_default=data.get("frozen_default"), - field_specifiers=data.get("field_specifiers"), + field_specifiers=tuple(data.get("field_specifiers", [])), ) diff --git a/mypy/options.py b/mypy/options.py index 92c96a92c531..077e0d4ed90a 100644 --- a/mypy/options.py +++ b/mypy/options.py @@ -339,6 +339,9 @@ def __init__(self) -> None: self.disable_recursive_aliases = False # Deprecated reverse version of the above, do not use. self.enable_recursive_aliases = False + # Export line-level, limited, fine-grained dependency information in cache data + # (undocumented feature). + self.export_ref_info = False self.disable_bytearray_promotion = False self.disable_memoryview_promotion = False diff --git a/mypy/partially_defined.py b/mypy/partially_defined.py index 9b8238eff83f..085384989705 100644 --- a/mypy/partially_defined.py +++ b/mypy/partially_defined.py @@ -79,7 +79,9 @@ def copy(self) -> BranchState: class BranchStatement: - def __init__(self, initial_state: BranchState) -> None: + def __init__(self, initial_state: BranchState | None = None) -> None: + if initial_state is None: + initial_state = BranchState() self.initial_state = initial_state self.branches: list[BranchState] = [ BranchState( @@ -171,7 +173,7 @@ class ScopeType(Enum): Global = 1 Class = 2 Func = 3 - Generator = 3 + Generator = 4 class Scope: @@ -199,7 +201,7 @@ class DefinedVariableTracker: def __init__(self) -> None: # There's always at least one scope. Within each scope, there's at least one "global" BranchingStatement. - self.scopes: list[Scope] = [Scope([BranchStatement(BranchState())], ScopeType.Global)] + self.scopes: list[Scope] = [Scope([BranchStatement()], ScopeType.Global)] # disable_branch_skip is used to disable skipping a branch due to a return/raise/etc. This is useful # in things like try/except/finally statements. self.disable_branch_skip = False @@ -216,9 +218,11 @@ def _scope(self) -> Scope: def enter_scope(self, scope_type: ScopeType) -> None: assert len(self._scope().branch_stmts) > 0 - self.scopes.append( - Scope([BranchStatement(self._scope().branch_stmts[-1].branches[-1])], scope_type) - ) + initial_state = None + if scope_type == ScopeType.Generator: + # Generators are special because they inherit the outer scope. + initial_state = self._scope().branch_stmts[-1].branches[-1] + self.scopes.append(Scope([BranchStatement(initial_state)], scope_type)) def exit_scope(self) -> None: self.scopes.pop() @@ -342,13 +346,15 @@ def variable_may_be_undefined(self, name: str, context: Context) -> None: def process_definition(self, name: str) -> None: # Was this name previously used? If yes, it's a used-before-definition error. if not self.tracker.in_scope(ScopeType.Class): - # Errors in class scopes are caught by the semantic analyzer. refs = self.tracker.pop_undefined_ref(name) for ref in refs: if self.loops: self.variable_may_be_undefined(name, ref) else: self.var_used_before_def(name, ref) + else: + # Errors in class scopes are caught by the semantic analyzer. + pass self.tracker.record_definition(name) def visit_global_decl(self, o: GlobalDecl) -> None: @@ -415,17 +421,24 @@ def visit_match_stmt(self, o: MatchStmt) -> None: def visit_func_def(self, o: FuncDef) -> None: self.process_definition(o.name) - self.tracker.enter_scope(ScopeType.Func) super().visit_func_def(o) - self.tracker.exit_scope() def visit_func(self, o: FuncItem) -> None: if o.is_dynamic() and not self.options.check_untyped_defs: return - if o.arguments is not None: - for arg in o.arguments: - self.tracker.record_definition(arg.variable.name) - super().visit_func(o) + + args = o.arguments or [] + # Process initializers (defaults) outside the function scope. + for arg in args: + if arg.initializer is not None: + arg.initializer.accept(self) + + self.tracker.enter_scope(ScopeType.Func) + for arg in args: + self.process_definition(arg.variable.name) + super().visit_var(arg.variable) + o.body.accept(self) + self.tracker.exit_scope() def visit_generator_expr(self, o: GeneratorExpr) -> None: self.tracker.enter_scope(ScopeType.Generator) @@ -603,7 +616,7 @@ def visit_starred_pattern(self, o: StarredPattern) -> None: super().visit_starred_pattern(o) def visit_name_expr(self, o: NameExpr) -> None: - if o.name in self.builtins: + if o.name in self.builtins and self.tracker.in_scope(ScopeType.Global): return if self.tracker.is_possibly_undefined(o.name): # A variable is only defined in some branches. diff --git a/mypy/plugins/attrs.py b/mypy/plugins/attrs.py index 6fda965ade8b..c71d898e1c62 100644 --- a/mypy/plugins/attrs.py +++ b/mypy/plugins/attrs.py @@ -6,8 +6,10 @@ from typing_extensions import Final, Literal import mypy.plugin # To avoid circular imports. +from mypy.checker import TypeChecker from mypy.errorcodes import LITERAL_REQ from mypy.exprtotype import TypeTranslationError, expr_to_unanalyzed_type +from mypy.messages import format_type_bare from mypy.nodes import ( ARG_NAMED, ARG_NAMED_OPT, @@ -77,6 +79,7 @@ SELF_TVAR_NAME: Final = "_AT" MAGIC_ATTR_NAME: Final = "__attrs_attrs__" MAGIC_ATTR_CLS_NAME_TEMPLATE: Final = "__{}_AttrsAttributes__" # The tuple subclass pattern. +ATTRS_INIT_NAME: Final = "__attrs_init__" class Converter: @@ -330,7 +333,7 @@ def attr_class_maker_callback( adder = MethodAdder(ctx) # If __init__ is not being generated, attrs still generates it as __attrs_init__ instead. - _add_init(ctx, attributes, adder, "__init__" if init else "__attrs_init__") + _add_init(ctx, attributes, adder, "__init__" if init else ATTRS_INIT_NAME) if order: _add_order(ctx, adder) if frozen: @@ -888,3 +891,64 @@ def add_method( """ self_type = self_type if self_type is not None else self.self_type add_method(self.ctx, method_name, args, ret_type, self_type, tvd) + + +def _get_attrs_init_type(typ: Type) -> CallableType | None: + """ + If `typ` refers to an attrs class, gets the type of its initializer method. + """ + typ = get_proper_type(typ) + if not isinstance(typ, Instance): + return None + magic_attr = typ.type.get(MAGIC_ATTR_NAME) + if magic_attr is None or not magic_attr.plugin_generated: + return None + init_method = typ.type.get_method("__init__") or typ.type.get_method(ATTRS_INIT_NAME) + if not isinstance(init_method, FuncDef) or not isinstance(init_method.type, CallableType): + return None + return init_method.type + + +def evolve_function_sig_callback(ctx: mypy.plugin.FunctionSigContext) -> CallableType: + """ + Generates a signature for the 'attr.evolve' function that's specific to the call site + and dependent on the type of the first argument. + """ + if len(ctx.args) != 2: + # Ideally the name and context should be callee's, but we don't have it in FunctionSigContext. + ctx.api.fail(f'"{ctx.default_signature.name}" has unexpected type annotation', ctx.context) + return ctx.default_signature + + if len(ctx.args[0]) != 1: + return ctx.default_signature # leave it to the type checker to complain + + inst_arg = ctx.args[0][0] + + # + assert isinstance(ctx.api, TypeChecker) + inst_type = ctx.api.expr_checker.accept(inst_arg) + # + + inst_type = get_proper_type(inst_type) + if isinstance(inst_type, AnyType): + return ctx.default_signature + inst_type_str = format_type_bare(inst_type) + + attrs_init_type = _get_attrs_init_type(inst_type) + if not attrs_init_type: + ctx.api.fail( + f'Argument 1 to "evolve" has incompatible type "{inst_type_str}"; expected an attrs class', + ctx.context, + ) + return ctx.default_signature + + # AttrClass.__init__ has the following signature (or similar, if having kw-only & defaults): + # def __init__(self, attr1: Type1, attr2: Type2) -> None: + # We want to generate a signature for evolve that looks like this: + # def evolve(inst: AttrClass, *, attr1: Type1 = ..., attr2: Type2 = ...) -> AttrClass: + return attrs_init_type.copy_modified( + arg_names=["inst"] + attrs_init_type.arg_names[1:], + arg_kinds=[ARG_POS] + [ARG_NAMED_OPT for _ in attrs_init_type.arg_kinds[1:]], + ret_type=inst_type, + name=f"{ctx.default_signature.name} of {inst_type_str}", + ) diff --git a/mypy/plugins/common.py b/mypy/plugins/common.py index 0acf3e3a6369..67796ef15cf3 100644 --- a/mypy/plugins/common.py +++ b/mypy/plugins/common.py @@ -1,5 +1,6 @@ from __future__ import annotations +from mypy.argmap import map_actuals_to_formals from mypy.fixup import TypeFixer from mypy.nodes import ( ARG_POS, @@ -13,6 +14,7 @@ Expression, FuncDef, JsonDict, + NameExpr, Node, PassStmt, RefExpr, @@ -22,6 +24,7 @@ from mypy.plugin import CheckerPluginInterface, ClassDefContext, SemanticAnalyzerPluginInterface from mypy.semanal_shared import ( ALLOW_INCOMPATIBLE_OVERRIDE, + parse_bool, require_bool_literal_argument, set_callable_name, ) @@ -29,13 +32,19 @@ try_getting_str_literals as try_getting_str_literals, ) from mypy.types import ( + AnyType, CallableType, + Instance, + LiteralType, + NoneType, Overloaded, Type, + TypeOfAny, TypeType, TypeVarType, deserialize_type, get_proper_type, + is_optional, ) from mypy.typevars import fill_typevars from mypy.util import get_unique_redefinition_name @@ -87,6 +96,71 @@ def _get_argument(call: CallExpr, name: str) -> Expression | None: return None +def find_shallow_matching_overload_item(overload: Overloaded, call: CallExpr) -> CallableType: + """Perform limited lookup of a matching overload item. + + Full overload resolution is only supported during type checking, but plugins + sometimes need to resolve overloads. This can be used in some such use cases. + + Resolve overloads based on these things only: + + * Match using argument kinds and names + * If formal argument has type None, only accept the "None" expression in the callee + * If formal argument has type Literal[True] or Literal[False], only accept the + relevant bool literal + + Return the first matching overload item, or the last one if nothing matches. + """ + for item in overload.items[:-1]: + ok = True + mapped = map_actuals_to_formals( + call.arg_kinds, + call.arg_names, + item.arg_kinds, + item.arg_names, + lambda i: AnyType(TypeOfAny.special_form), + ) + + # Look for extra actuals + matched_actuals = set() + for actuals in mapped: + matched_actuals.update(actuals) + if any(i not in matched_actuals for i in range(len(call.args))): + ok = False + + for arg_type, kind, actuals in zip(item.arg_types, item.arg_kinds, mapped): + if kind.is_required() and not actuals: + # Missing required argument + ok = False + break + elif actuals: + args = [call.args[i] for i in actuals] + arg_type = get_proper_type(arg_type) + arg_none = any(isinstance(arg, NameExpr) and arg.name == "None" for arg in args) + if isinstance(arg_type, NoneType): + if not arg_none: + ok = False + break + elif ( + arg_none + and not is_optional(arg_type) + and not ( + isinstance(arg_type, Instance) + and arg_type.type.fullname == "builtins.object" + ) + and not isinstance(arg_type, AnyType) + ): + ok = False + break + elif isinstance(arg_type, LiteralType) and type(arg_type.value) is bool: + if not any(parse_bool(arg) == arg_type.value for arg in args): + ok = False + break + if ok: + return item + return overload.items[-1] + + def _get_callee_type(call: CallExpr) -> CallableType | None: """Return the type of the callee, regardless of its syntatic form.""" @@ -103,8 +177,7 @@ def _get_callee_type(call: CallExpr) -> CallableType | None: if isinstance(callee_node, (Var, SYMBOL_FUNCBASE_TYPES)) and callee_node.type: callee_node_type = get_proper_type(callee_node.type) if isinstance(callee_node_type, Overloaded): - # We take the last overload. - return callee_node_type.items[-1] + return find_shallow_matching_overload_item(callee_node_type, call) elif isinstance(callee_node_type, CallableType): return callee_node_type diff --git a/mypy/plugins/dataclasses.py b/mypy/plugins/dataclasses.py index 6b1062d6457f..9fa32a108b74 100644 --- a/mypy/plugins/dataclasses.py +++ b/mypy/plugins/dataclasses.py @@ -2,11 +2,11 @@ from __future__ import annotations -from typing import Optional +from typing import Iterator, Optional from typing_extensions import Final from mypy import errorcodes, message_registry -from mypy.expandtype import expand_type +from mypy.expandtype import expand_type, expand_type_by_instance from mypy.nodes import ( ARG_NAMED, ARG_NAMED_OPT, @@ -17,11 +17,14 @@ MDEF, Argument, AssignmentStmt, + Block, CallExpr, ClassDef, Context, DataclassTransformSpec, Expression, + FuncDef, + IfStmt, JsonDict, NameExpr, Node, @@ -37,6 +40,7 @@ ) from mypy.plugin import ClassDefContext, SemanticAnalyzerPluginInterface from mypy.plugins.common import ( + _get_callee_type, _get_decorator_bool_argument, add_attribute_to_class, add_method_to_class, @@ -45,7 +49,7 @@ from mypy.semanal_shared import find_dataclass_transform_spec, require_bool_literal_argument from mypy.server.trigger import make_wildcard_trigger from mypy.state import state -from mypy.typeops import map_type_from_supertype +from mypy.typeops import map_type_from_supertype, try_getting_literals_from_type from mypy.types import ( AnyType, CallableType, @@ -87,6 +91,7 @@ def __init__( type: Type | None, info: TypeInfo, kw_only: bool, + is_neither_frozen_nor_nonfrozen: bool, ) -> None: self.name = name self.alias = alias @@ -95,9 +100,10 @@ def __init__( self.has_default = has_default self.line = line self.column = column - self.type = type + self.type = type # Type as __init__ argument self.info = info self.kw_only = kw_only + self.is_neither_frozen_nor_nonfrozen = is_neither_frozen_nor_nonfrozen def to_argument(self, current_info: TypeInfo) -> Argument: arg_kind = ARG_POS @@ -138,6 +144,7 @@ def serialize(self) -> JsonDict: "column": self.column, "type": self.type.serialize(), "kw_only": self.kw_only, + "is_neither_frozen_nor_nonfrozen": self.is_neither_frozen_nor_nonfrozen, } @classmethod @@ -290,7 +297,11 @@ def transform(self) -> bool: parent_decorator_arguments = [] for parent in info.mro[1:-1]: parent_args = parent.metadata.get("dataclass") - if parent_args: + + # Ignore parent classes that directly specify a dataclass transform-decorated metaclass + # when searching for usage of the frozen parameter. PEP 681 states that a class that + # directly specifies such a metaclass must be treated as neither frozen nor non-frozen. + if parent_args and not _has_direct_dataclass_transform_metaclass(parent): parent_decorator_arguments.append(parent_args) if decorator_arguments["frozen"]: @@ -380,6 +391,22 @@ def reset_init_only_vars(self, info: TypeInfo, attributes: list[DataclassAttribu # recreate a symbol node for this attribute. lvalue.node = None + def _get_assignment_statements_from_if_statement( + self, stmt: IfStmt + ) -> Iterator[AssignmentStmt]: + for body in stmt.body: + if not body.is_unreachable: + yield from self._get_assignment_statements_from_block(body) + if stmt.else_body is not None and not stmt.else_body.is_unreachable: + yield from self._get_assignment_statements_from_block(stmt.else_body) + + def _get_assignment_statements_from_block(self, block: Block) -> Iterator[AssignmentStmt]: + for stmt in block.body: + if isinstance(stmt, AssignmentStmt): + yield stmt + elif isinstance(stmt, IfStmt): + yield from self._get_assignment_statements_from_if_statement(stmt) + def collect_attributes(self) -> list[DataclassAttribute] | None: """Collect all attributes declared in the dataclass and its parents. @@ -438,10 +465,10 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: # Second, collect attributes belonging to the current class. current_attr_names: set[str] = set() kw_only = self._get_bool_arg("kw_only", self._spec.kw_only_default) - for stmt in cls.defs.body: + for stmt in self._get_assignment_statements_from_block(cls.defs): # Any assignment that doesn't use the new type declaration # syntax can be ignored out of hand. - if not (isinstance(stmt, AssignmentStmt) and stmt.new_syntax): + if not stmt.new_syntax: continue # a: int, b: str = 1, 'foo' is not supported syntax so we @@ -491,7 +518,7 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: is_in_init_param = field_args.get("init") if is_in_init_param is None: - is_in_init = True + is_in_init = self._get_default_init_value_for_field_specifier(stmt.rvalue) else: is_in_init = bool(self._api.parse_bool(is_in_init_param)) @@ -510,9 +537,12 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: elif not isinstance(stmt.rvalue, TempNode): has_default = True - if not has_default: - # Make all non-default attributes implicit because they are de-facto set - # on self in the generated __init__(), not in the class body. + if not has_default and self._spec is _TRANSFORM_SPEC_FOR_DATACLASSES: + # Make all non-default dataclass attributes implicit because they are de-facto + # set on self in the generated __init__(), not in the class body. On the other + # hand, we don't know how custom dataclass transforms initialize attributes, + # so we don't treat them as implicit. This is required to support descriptors + # (https://github.com/python/mypy/issues/14868). sym.implicit = True is_kw_only = kw_only @@ -553,6 +583,7 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: ) current_attr_names.add(lhs.name) + init_type = self._infer_dataclass_attr_init_type(sym, lhs.name, stmt) found_attrs[lhs.name] = DataclassAttribute( name=lhs.name, alias=alias, @@ -561,9 +592,12 @@ def collect_attributes(self) -> list[DataclassAttribute] | None: has_default=has_default, line=stmt.line, column=stmt.column, - type=sym.type, + type=init_type, info=cls.info, kw_only=is_kw_only, + is_neither_frozen_nor_nonfrozen=_has_direct_dataclass_transform_metaclass( + cls.info + ), ) all_attrs = list(found_attrs.values()) @@ -606,6 +640,13 @@ def _freeze(self, attributes: list[DataclassAttribute]) -> None: """ info = self._cls.info for attr in attributes: + # Classes that directly specify a dataclass_transform metaclass must be neither frozen + # non non-frozen per PEP681. Though it is surprising, this means that attributes from + # such a class must be writable even if the rest of the class heirarchy is frozen. This + # matches the behavior of Pyright (the reference implementation). + if attr.is_neither_frozen_nor_nonfrozen: + continue + sym_node = info.names.get(attr.name) if sym_node is not None: var = sym_node.node @@ -648,17 +689,18 @@ def _is_kw_only_type(self, node: Type | None) -> bool: return node_type.type.fullname == "dataclasses.KW_ONLY" def _add_dataclass_fields_magic_attribute(self) -> None: - # Only add if the class is a dataclasses dataclass, and omit it for dataclass_transform - # classes. - # It would be nice if this condition were reified rather than using an `is` check. - # Only add if the class is a dataclasses dataclass, and omit it for dataclass_transform - # classes. - if self._spec is not _TRANSFORM_SPEC_FOR_DATACLASSES: - return - attr_name = "__dataclass_fields__" any_type = AnyType(TypeOfAny.explicit) - field_type = self._api.named_type_or_none("dataclasses.Field", [any_type]) or any_type + # For `dataclasses`, use the type `dict[str, Field[Any]]` for accuracy. For dataclass + # transforms, it's inaccurate to use `Field` since a given transform may use a completely + # different type (or none); fall back to `Any` there. + # + # In either case, we're aiming to match the Typeshed stub for `is_dataclass`, which expects + # the instance to have a `__dataclass_fields__` attribute of type `dict[str, Field[Any]]`. + if self._spec is _TRANSFORM_SPEC_FOR_DATACLASSES: + field_type = self._api.named_type_or_none("dataclasses.Field", [any_type]) or any_type + else: + field_type = any_type attr_type = self._api.named_type( "builtins.dict", [self._api.named_type("builtins.str"), field_type] ) @@ -719,6 +761,74 @@ def _get_bool_arg(self, name: str, default: bool) -> bool: return require_bool_literal_argument(self._api, expression, name, default) return default + def _get_default_init_value_for_field_specifier(self, call: Expression) -> bool: + """ + Find a default value for the `init` parameter of the specifier being called. If the + specifier's type signature includes an `init` parameter with a type of `Literal[True]` or + `Literal[False]`, return the appropriate boolean value from the literal. Otherwise, + fall back to the standard default of `True`. + """ + if not isinstance(call, CallExpr): + return True + + specifier_type = _get_callee_type(call) + if specifier_type is None: + return True + + parameter = specifier_type.argument_by_name("init") + if parameter is None: + return True + + literals = try_getting_literals_from_type(parameter.typ, bool, "builtins.bool") + if literals is None or len(literals) != 1: + return True + + return literals[0] + + def _infer_dataclass_attr_init_type( + self, sym: SymbolTableNode, name: str, context: Context + ) -> Type | None: + """Infer __init__ argument type for an attribute. + + In particular, possibly use the signature of __set__. + """ + default = sym.type + if sym.implicit: + return default + t = get_proper_type(sym.type) + + # Perform a simple-minded inference from the signature of __set__, if present. + # We can't use mypy.checkmember here, since this plugin runs before type checking. + # We only support some basic scanerios here, which is hopefully sufficient for + # the vast majority of use cases. + if not isinstance(t, Instance): + return default + setter = t.type.get("__set__") + if setter: + if isinstance(setter.node, FuncDef): + super_info = t.type.get_containing_type_info("__set__") + assert super_info + if setter.type: + setter_type = get_proper_type( + map_type_from_supertype(setter.type, t.type, super_info) + ) + else: + return AnyType(TypeOfAny.unannotated) + if isinstance(setter_type, CallableType) and setter_type.arg_kinds == [ + ARG_POS, + ARG_POS, + ARG_POS, + ]: + return expand_type_by_instance(setter_type.arg_types[2], t) + else: + self._api.fail( + f'Unsupported signature for "__set__" in "{t.type.name}"', context + ) + else: + self._api.fail(f'Unsupported "__set__" in "{t.type.name}"', context) + + return default + def add_dataclass_tag(info: TypeInfo) -> None: # The value is ignored, only the existence matters. @@ -768,3 +878,10 @@ def _is_dataclasses_decorator(node: Node) -> bool: if isinstance(node, RefExpr): return node.fullname in dataclass_makers return False + + +def _has_direct_dataclass_transform_metaclass(info: TypeInfo) -> bool: + return ( + info.declared_metaclass is not None + and info.declared_metaclass.type.dataclass_transform_spec is not None + ) diff --git a/mypy/plugins/default.py b/mypy/plugins/default.py index 4d6f46860939..3dc32a67b84c 100644 --- a/mypy/plugins/default.py +++ b/mypy/plugins/default.py @@ -10,6 +10,7 @@ AttributeContext, ClassDefContext, FunctionContext, + FunctionSigContext, MethodContext, MethodSigContext, Plugin, @@ -46,6 +47,15 @@ def get_function_hook(self, fullname: str) -> Callable[[FunctionContext], Type] return singledispatch.create_singledispatch_function_callback return None + def get_function_signature_hook( + self, fullname: str + ) -> Callable[[FunctionSigContext], FunctionLike] | None: + from mypy.plugins import attrs + + if fullname in ("attr.evolve", "attrs.evolve", "attr.assoc", "attrs.assoc"): + return attrs.evolve_function_sig_callback + return None + def get_method_signature_hook( self, fullname: str ) -> Callable[[MethodSigContext], FunctionLike] | None: diff --git a/mypy/refinfo.py b/mypy/refinfo.py new file mode 100644 index 000000000000..3df1e575a35c --- /dev/null +++ b/mypy/refinfo.py @@ -0,0 +1,69 @@ +"""Find line-level reference information from a mypy AST (undocumented feature)""" + +from __future__ import annotations + +from mypy.nodes import LDEF, Expression, MemberExpr, MypyFile, NameExpr, RefExpr +from mypy.traverser import TraverserVisitor +from mypy.typeops import tuple_fallback +from mypy.types import ( + FunctionLike, + Instance, + TupleType, + Type, + TypeType, + TypeVarLikeType, + get_proper_type, +) + + +class RefInfoVisitor(TraverserVisitor): + def __init__(self, type_map: dict[Expression, Type]) -> None: + super().__init__() + self.type_map = type_map + self.data: list[dict[str, object]] = [] + + def visit_name_expr(self, expr: NameExpr) -> None: + super().visit_name_expr(expr) + self.record_ref_expr(expr) + + def visit_member_expr(self, expr: MemberExpr) -> None: + super().visit_member_expr(expr) + self.record_ref_expr(expr) + + def record_ref_expr(self, expr: RefExpr) -> None: + fullname = None + if expr.kind != LDEF and "." in expr.fullname: + fullname = expr.fullname + elif isinstance(expr, MemberExpr): + typ = self.type_map.get(expr.expr) + if typ: + tfn = type_fullname(typ) + if tfn: + fullname = f"{tfn}.{expr.name}" + if not fullname: + fullname = f"*.{expr.name}" + if fullname is not None: + self.data.append({"line": expr.line, "column": expr.column, "target": fullname}) + + +def type_fullname(typ: Type) -> str | None: + typ = get_proper_type(typ) + if isinstance(typ, Instance): + return typ.type.fullname + elif isinstance(typ, TypeType): + return type_fullname(typ.item) + elif isinstance(typ, FunctionLike) and typ.is_type_obj(): + return type_fullname(typ.fallback) + elif isinstance(typ, TupleType): + return type_fullname(tuple_fallback(typ)) + elif isinstance(typ, TypeVarLikeType): + return type_fullname(typ.upper_bound) + return None + + +def get_undocumented_ref_info_json( + tree: MypyFile, type_map: dict[Expression, Type] +) -> list[dict[str, object]]: + visitor = RefInfoVisitor(type_map) + tree.accept(visitor) + return visitor.data diff --git a/mypy/report.py b/mypy/report.py index 75c372200ca3..2edd0957254e 100644 --- a/mypy/report.py +++ b/mypy/report.py @@ -12,7 +12,7 @@ import tokenize from abc import ABCMeta, abstractmethod from operator import attrgetter -from typing import Any, Callable, Dict, Iterator, Tuple, cast +from typing import Any, Callable, Dict, Iterator, Tuple from typing_extensions import Final, TypeAlias as _TypeAlias from urllib.request import pathname2url @@ -704,8 +704,9 @@ def __init__(self, reports: Reports, output_dir: str) -> None: super().__init__(reports, output_dir) memory_reporter = reports.add_report("memory-xml", "") + assert isinstance(memory_reporter, MemoryXmlReporter) # The dependency will be called first. - self.memory_xml = cast(MemoryXmlReporter, memory_reporter) + self.memory_xml = memory_reporter class XmlReporter(AbstractXmlReporter): diff --git a/mypy/semanal.py b/mypy/semanal.py index d2fd92499679..e585bde6babd 100644 --- a/mypy/semanal.py +++ b/mypy/semanal.py @@ -216,6 +216,7 @@ calculate_tuple_fallback, find_dataclass_transform_spec, has_placeholder, + parse_bool, require_bool_literal_argument, set_callable_name as set_callable_name, ) @@ -1314,7 +1315,8 @@ def analyze_property_with_multi_part_definition(self, defn: OverloadedFuncDef) - """ defn.is_property = True items = defn.items - first_item = cast(Decorator, defn.items[0]) + first_item = defn.items[0] + assert isinstance(first_item, Decorator) deleted_items = [] for i, item in enumerate(items[1:]): if isinstance(item, Decorator): @@ -1357,7 +1359,8 @@ def analyze_function_body(self, defn: FuncItem) -> None: # Bind the type variables again to visit the body. if defn.type: a = self.type_analyzer() - typ = cast(CallableType, defn.type) + typ = defn.type + assert isinstance(typ, CallableType) a.bind_function_type_variables(typ, defn) for i in range(len(typ.arg_types)): store_argument_type(defn, i, typ, self.named_type) @@ -1369,8 +1372,11 @@ def analyze_function_body(self, defn: FuncItem) -> None: # The first argument of a non-static, non-class method is like 'self' # (though the name could be different), having the enclosing class's # instance type. - if is_method and not defn.is_static and not defn.is_class and defn.arguments: - defn.arguments[0].variable.is_self = True + if is_method and not defn.is_static and defn.arguments: + if not defn.is_class: + defn.arguments[0].variable.is_self = True + else: + defn.arguments[0].variable.is_cls = True defn.body.accept(self) self.function_stack.pop() @@ -2613,11 +2619,14 @@ def report_missing_module_attribute( typing_extensions = self.modules.get("typing_extensions") if typing_extensions and source_id in typing_extensions.names: self.msg.note( - f"Use `from typing_extensions import {source_id}` instead", context + f"Use `from typing_extensions import {source_id}` instead", + context, + code=codes.ATTR_DEFINED, ) self.msg.note( "See https://mypy.readthedocs.io/en/stable/runtime_troubles.html#using-new-additions-to-the-typing-module", context, + code=codes.ATTR_DEFINED, ) def process_import_over_existing_name( @@ -5249,7 +5258,9 @@ def visit_yield_expr(self, e: YieldExpr) -> None: def visit_await_expr(self, expr: AwaitExpr) -> None: if not self.is_func_scope() or not self.function_stack: # We check both because is_function_scope() returns True inside comprehensions. - self.fail('"await" outside function', expr, serious=True, blocker=True) + # This is not a blocker, because some enviroments (like ipython) + # support top level awaits. + self.fail('"await" outside function', expr, serious=True, code=codes.TOP_LEVEL_AWAIT) elif not self.function_stack[-1].is_coroutine: self.fail('"await" outside coroutine ("async def")', expr, serious=True, blocker=True) expr.expr.accept(self) @@ -5805,7 +5816,7 @@ def _get_node_for_class_scoped_import( # mypyc is absolutely convinced that `symbol_node` narrows to a Var in the following, # when it can also be a FuncBase. Once fixed, `f` in the following can be removed. # See also https://github.com/mypyc/mypyc/issues/892 - f = cast(Any, lambda x: x) + f: Callable[[object], Any] = lambda x: x if isinstance(f(symbol_node), (Decorator, FuncBase, Var)): # For imports in class scope, we construct a new node to represent the symbol and # set its `info` attribute to `self.type`. @@ -6455,12 +6466,8 @@ def is_initial_mangled_global(self, name: str) -> bool: return name == unmangle(name) + "'" def parse_bool(self, expr: Expression) -> bool | None: - if isinstance(expr, NameExpr): - if expr.fullname == "builtins.True": - return True - if expr.fullname == "builtins.False": - return False - return None + # This wrapper is preserved for plugins. + return parse_bool(expr) def parse_str_literal(self, expr: Expression) -> str | None: """Attempt to find the string literal value of the given expression. Returns `None` if no diff --git a/mypy/semanal_enum.py b/mypy/semanal_enum.py index c7b8e44f65aa..efb9764545eb 100644 --- a/mypy/semanal_enum.py +++ b/mypy/semanal_enum.py @@ -27,6 +27,7 @@ TupleExpr, TypeInfo, Var, + is_StrExpr_list, ) from mypy.options import Options from mypy.semanal_shared import SemanticAnalyzerInterface @@ -177,8 +178,8 @@ def parse_enum_call_args( items.append(field) elif isinstance(names, (TupleExpr, ListExpr)): seq_items = names.items - if all(isinstance(seq_item, StrExpr) for seq_item in seq_items): - items = [cast(StrExpr, seq_item).value for seq_item in seq_items] + if is_StrExpr_list(seq_items): + items = [seq_item.value for seq_item in seq_items] elif all( isinstance(seq_item, (TupleExpr, ListExpr)) and len(seq_item.items) == 2 diff --git a/mypy/semanal_main.py b/mypy/semanal_main.py index a5e85878e931..912851520958 100644 --- a/mypy/semanal_main.py +++ b/mypy/semanal_main.py @@ -218,7 +218,7 @@ def process_top_levels(graph: Graph, scc: list[str], patches: Patches) -> None: state = graph[next_id] assert state.tree is not None deferred, incomplete, progress = semantic_analyze_target( - next_id, state, state.tree, None, final_iteration, patches + next_id, next_id, state, state.tree, None, final_iteration, patches ) all_deferred += deferred any_progress = any_progress or progress @@ -289,7 +289,7 @@ def process_top_level_function( # OK, this is one last pass, now missing names will be reported. analyzer.incomplete_namespaces.discard(module) deferred, incomplete, progress = semantic_analyze_target( - target, state, node, active_type, final_iteration, patches + target, module, state, node, active_type, final_iteration, patches ) if final_iteration: assert not deferred, "Must not defer during final iteration" @@ -318,6 +318,7 @@ def get_all_leaf_targets(file: MypyFile) -> list[TargetInfo]: def semantic_analyze_target( target: str, + module: str, state: State, node: MypyFile | FuncDef | OverloadedFuncDef | Decorator, active_type: TypeInfo | None, @@ -331,7 +332,7 @@ def semantic_analyze_target( - was some definition incomplete (need to run another pass) - were any new names defined (or placeholders replaced) """ - state.manager.processed_targets.append(target) + state.manager.processed_targets.append((module, target)) tree = state.tree assert tree is not None analyzer = state.manager.semantic_analyzer diff --git a/mypy/semanal_namedtuple.py b/mypy/semanal_namedtuple.py index 1194557836b1..68185b1c9cf1 100644 --- a/mypy/semanal_namedtuple.py +++ b/mypy/semanal_namedtuple.py @@ -41,6 +41,7 @@ TypeInfo, TypeVarExpr, Var, + is_StrExpr_list, ) from mypy.options import Options from mypy.semanal_shared import ( @@ -373,7 +374,7 @@ def parse_namedtuple_args( if not isinstance(args[0], StrExpr): self.fail(f'"{type_name}()" expects a string literal as the first argument', call) return None - typename = cast(StrExpr, call.args[0]).value + typename = args[0].value types: list[Type] = [] tvar_defs = [] if not isinstance(args[1], (ListExpr, TupleExpr)): @@ -392,10 +393,10 @@ def parse_namedtuple_args( listexpr = args[1] if fullname == "collections.namedtuple": # The fields argument contains just names, with implicit Any types. - if any(not isinstance(item, StrExpr) for item in listexpr.items): + if not is_StrExpr_list(listexpr.items): self.fail('String literal expected as "namedtuple()" item', call) return None - items = [cast(StrExpr, item).value for item in listexpr.items] + items = [item.value for item in listexpr.items] else: type_exprs = [ t.items[1] diff --git a/mypy/semanal_shared.py b/mypy/semanal_shared.py index 03efbe6ca1b8..c86ed828b2b9 100644 --- a/mypy/semanal_shared.py +++ b/mypy/semanal_shared.py @@ -18,6 +18,7 @@ Decorator, Expression, FuncDef, + NameExpr, Node, OverloadedFuncDef, RefExpr, @@ -451,7 +452,7 @@ def require_bool_literal_argument( default: bool | None = None, ) -> bool | None: """Attempt to interpret an expression as a boolean literal, and fail analysis if we can't.""" - value = api.parse_bool(expression) + value = parse_bool(expression) if value is None: api.fail( f'"{name}" argument must be a True or False literal', expression, code=LITERAL_REQ @@ -459,3 +460,12 @@ def require_bool_literal_argument( return default return value + + +def parse_bool(expr: Expression) -> bool | None: + if isinstance(expr, NameExpr): + if expr.fullname == "builtins.True": + return True + if expr.fullname == "builtins.False": + return False + return None diff --git a/mypy/server/astdiff.py b/mypy/server/astdiff.py index 40b60f1a69d8..83ae64fbc1a8 100644 --- a/mypy/server/astdiff.py +++ b/mypy/server/astdiff.py @@ -52,7 +52,7 @@ class level -- these are handled at attribute level (say, 'mod.Cls.method' from __future__ import annotations -from typing import Sequence, Tuple, Union, cast +from typing import Sequence, Tuple, Union from typing_extensions import TypeAlias as _TypeAlias from mypy.expandtype import expand_type @@ -73,6 +73,7 @@ class level -- these are handled at attribute level (say, 'mod.Cls.method' TypeVarTupleExpr, Var, ) +from mypy.semanal_shared import find_dataclass_transform_spec from mypy.types import ( AnyType, CallableType, @@ -230,6 +231,7 @@ def snapshot_definition(node: SymbolNode | None, common: SymbolSnapshot) -> Symb elif isinstance(node, OverloadedFuncDef) and node.impl: impl = node.impl.func if isinstance(node.impl, Decorator) else node.impl is_trivial_body = impl.is_trivial_body if impl else False + dataclass_transform_spec = find_dataclass_transform_spec(node) return ( "Func", common, @@ -239,6 +241,7 @@ def snapshot_definition(node: SymbolNode | None, common: SymbolSnapshot) -> Symb node.is_static, signature, is_trivial_body, + dataclass_transform_spec.serialize() if dataclass_transform_spec is not None else None, ) elif isinstance(node, Var): return ("Var", common, snapshot_optional_type(node.type), node.is_final) @@ -256,6 +259,10 @@ def snapshot_definition(node: SymbolNode | None, common: SymbolSnapshot) -> Symb snapshot_definition(node.func, common), ) elif isinstance(node, TypeInfo): + dataclass_transform_spec = node.dataclass_transform_spec + if dataclass_transform_spec is None: + dataclass_transform_spec = find_dataclass_transform_spec(node) + attrs = ( node.is_abstract, node.is_enum, @@ -280,6 +287,7 @@ def snapshot_definition(node: SymbolNode | None, common: SymbolSnapshot) -> Symb tuple(snapshot_type(tdef) for tdef in node.defn.type_vars), [snapshot_type(base) for base in node.bases], [snapshot_type(p) for p in node._promote], + dataclass_transform_spec.serialize() if dataclass_transform_spec is not None else None, ) prefix = node.fullname symbol_table = snapshot_symbol_table(prefix, node.names) @@ -434,7 +442,7 @@ def normalize_callable_variables(self, typ: CallableType) -> CallableType: tv = v.copy_modified(id=tid) tvs.append(tv) tvmap[v.id] = tv - return cast(CallableType, expand_type(typ, tvmap)).copy_modified(variables=tvs) + return expand_type(typ, tvmap).copy_modified(variables=tvs) def visit_tuple_type(self, typ: TupleType) -> SnapshotItem: return ("TupleType", snapshot_types(typ.items)) diff --git a/mypy/server/astmerge.py b/mypy/server/astmerge.py index 1ec6d572a82c..0cc6377bfb0f 100644 --- a/mypy/server/astmerge.py +++ b/mypy/server/astmerge.py @@ -358,7 +358,8 @@ def fixup_and_reset_typeinfo(self, node: TypeInfo) -> TypeInfo: if node in self.replacements: # The subclass relationships may change, so reset all caches relevant to the # old MRO. - new = cast(TypeInfo, self.replacements[node]) + new = self.replacements[node] + assert isinstance(new, TypeInfo) type_state.reset_all_subtype_caches_for(new) return self.fixup(node) diff --git a/mypy/stats.py b/mypy/stats.py index b3a32c1ce72c..5f4b9d4d201f 100644 --- a/mypy/stats.py +++ b/mypy/stats.py @@ -5,7 +5,7 @@ import os from collections import Counter from contextlib import contextmanager -from typing import Iterator, cast +from typing import Iterator from typing_extensions import Final from mypy import nodes @@ -154,10 +154,12 @@ def visit_func_def(self, o: FuncDef) -> None: ) return for defn in o.expanded: - self.visit_func_def(cast(FuncDef, defn)) + assert isinstance(defn, FuncDef) + self.visit_func_def(defn) else: if o.type: - sig = cast(CallableType, o.type) + assert isinstance(o.type, CallableType) + sig = o.type arg_types = sig.arg_types if sig.arg_names and sig.arg_names[0] == "self" and not self.inferred: arg_types = arg_types[1:] diff --git a/mypy/stubgen.py b/mypy/stubgen.py index 6cb4669887fe..212d934a11b7 100755 --- a/mypy/stubgen.py +++ b/mypy/stubgen.py @@ -48,7 +48,7 @@ import sys import traceback from collections import defaultdict -from typing import Iterable, List, Mapping, cast +from typing import Iterable, List, Mapping from typing_extensions import Final import mypy.build @@ -102,6 +102,7 @@ TupleExpr, TypeInfo, UnaryExpr, + is_StrExpr_list, ) from mypy.options import Options as MypyOptions from mypy.stubdoc import Sig, find_unique_signatures, parse_all_signatures @@ -1052,7 +1053,8 @@ def process_namedtuple(self, lvalue: NameExpr, rvalue: CallExpr) -> None: if isinstance(rvalue.args[1], StrExpr): items = rvalue.args[1].value.replace(",", " ").split() elif isinstance(rvalue.args[1], (ListExpr, TupleExpr)): - list_items = cast(List[StrExpr], rvalue.args[1].items) + list_items = rvalue.args[1].items + assert is_StrExpr_list(list_items) items = [item.value for item in list_items] else: self.add(f"{self._indent}{lvalue.name}: Incomplete") diff --git a/mypy/stubinfo.py b/mypy/stubinfo.py index 15bd96d9f4b4..e6e549ad280f 100644 --- a/mypy/stubinfo.py +++ b/mypy/stubinfo.py @@ -20,8 +20,6 @@ def stub_package_name(prefix: str) -> str: # Package name can have one or two components ('a' or 'a.b'). legacy_bundled_packages = { "aiofiles": "types-aiofiles", - "backports": "types-backports", - "backports_abc": "types-backports_abc", "bleach": "types-bleach", "boto": "types-boto", "cachetools": "types-cachetools", @@ -76,8 +74,6 @@ def stub_package_name(prefix: str) -> str: # Package name can have one or two components ('a' or 'a.b'). # # Note that these packages are omitted for now: -# sqlalchemy: It's unclear which stub package to suggest. There's also -# a mypy plugin available. # pika: typeshed's stubs are on PyPI as types-pika-ts. # types-pika already exists on PyPI, and is more complete in many ways, # but is a non-typeshed stubs package. @@ -180,4 +176,9 @@ def stub_package_name(prefix: str) -> str: "xmltodict": "types-xmltodict", "xxhash": "types-xxhash", "zxcvbn": "types-zxcvbn", + # Stub packages that are not from typeshed + # Since these can be installed automatically via --install-types, we have a high trust bar + # for additions here + "pandas": "pandas-stubs", # https://github.com/pandas-dev/pandas-stubs + "lxml": "lxml-stubs", # https://github.com/lxml/lxml-stubs } diff --git a/mypy/stubtest.py b/mypy/stubtest.py index cd173f63e2a1..b0ef94e62480 100644 --- a/mypy/stubtest.py +++ b/mypy/stubtest.py @@ -25,7 +25,7 @@ from contextlib import redirect_stderr, redirect_stdout from functools import singledispatch from pathlib import Path -from typing import Any, Generic, Iterator, TypeVar, Union, cast +from typing import Any, Generic, Iterator, TypeVar, Union from typing_extensions import get_origin import mypy.build @@ -419,6 +419,21 @@ class SubClass(runtime): # type: ignore[misc] # Examples: ctypes.Array, ctypes._SimpleCData pass + # Runtime class might be annotated with `@final`: + try: + runtime_final = getattr(runtime, "__final__", False) + except Exception: + runtime_final = False + + if runtime_final and not stub.is_final: + yield Error( + object_path, + "has `__final__` attribute, but isn't marked with @final in the stub", + stub, + runtime, + stub_desc=repr(stub), + ) + def _verify_metaclass( stub: nodes.TypeInfo, runtime: type[Any], object_path: list[str] @@ -476,10 +491,7 @@ def verify_typeinfo( to_check = set(stub.names) # Check all public things on the runtime class to_check.update( - # cast to workaround mypyc complaints - m - for m in cast(Any, vars)(runtime) - if not is_probably_private(m) and m not in IGNORABLE_CLASS_DUNDERS + m for m in vars(runtime) if not is_probably_private(m) and m not in IGNORABLE_CLASS_DUNDERS ) # Special-case the __init__ method for Protocols # @@ -1342,7 +1354,7 @@ def verify_typealias( "__origin__", "__args__", "__orig_bases__", - "__final__", + "__final__", # Has a specialized check # Consider removing __slots__? "__slots__", } diff --git a/mypy/subtypes.py b/mypy/subtypes.py index c3d5517d43dd..b322cf7b6cd8 100644 --- a/mypy/subtypes.py +++ b/mypy/subtypes.py @@ -388,8 +388,7 @@ def _is_subtype(self, left: Type, right: Type) -> bool: return is_proper_subtype(left, right, subtype_context=self.subtype_context) return is_subtype(left, right, subtype_context=self.subtype_context) - # visit_x(left) means: is left (which is an instance of X) a subtype of - # right? + # visit_x(left) means: is left (which is an instance of X) a subtype of right? def visit_unbound_type(self, left: UnboundType) -> bool: # This can be called if there is a bad type annotation. The result probably diff --git a/mypy/test/data.py b/mypy/test/data.py index 6e2ad198f614..3ee368869095 100644 --- a/mypy/test/data.py +++ b/mypy/test/data.py @@ -41,6 +41,14 @@ class DeleteFile(NamedTuple): FileOperation: _TypeAlias = Union[UpdateFile, DeleteFile] +def _file_arg_to_module(filename: str) -> str: + filename, _ = os.path.splitext(filename) + parts = filename.split("/") # not os.sep since it comes from test data + if parts[-1] == "__init__": + parts.pop() + return ".".join(parts) + + def parse_test_case(case: DataDrivenTestCase) -> None: """Parse and prepare a single case from suite with test case descriptions. @@ -65,22 +73,26 @@ def parse_test_case(case: DataDrivenTestCase) -> None: rechecked_modules: dict[int, set[str]] = {} # from run number module names triggered: list[str] = [] # Active triggers (one line per incremental step) targets: dict[int, list[str]] = {} # Fine-grained targets (per fine-grained update) + test_modules: list[str] = [] # Modules which are deemed "test" (vs "fixture") # Process the parsed items. Each item has a header of form [id args], # optionally followed by lines of text. item = first_item = test_items[0] + test_modules.append("__main__") for item in test_items[1:]: - if item.id in {"file", "outfile", "outfile-re"}: + if item.id in {"file", "fixture", "outfile", "outfile-re"}: # Record an extra file needed for the test case. assert item.arg is not None contents = expand_variables("\n".join(item.data)) - file_entry = (join(base_path, item.arg), contents) - if item.id == "file": - files.append(file_entry) + path = join(base_path, item.arg) + if item.id != "fixture": + test_modules.append(_file_arg_to_module(item.arg)) + if item.id in {"file", "fixture"}: + files.append((path, contents)) elif item.id == "outfile-re": - output_files.append((file_entry[0], re.compile(file_entry[1].rstrip(), re.S))) - else: - output_files.append(file_entry) + output_files.append((path, re.compile(contents.rstrip(), re.S))) + elif item.id == "outfile": + output_files.append((path, contents)) elif item.id == "builtins": # Use an alternative stub file for the builtins module. assert item.arg is not None @@ -207,6 +219,7 @@ def parse_test_case(case: DataDrivenTestCase) -> None: case.triggered = triggered or [] case.normalize_output = normalize_output case.expected_fine_grained_targets = targets + case.test_modules = test_modules class DataDrivenTestCase(pytest.Item): @@ -225,6 +238,8 @@ class DataDrivenTestCase(pytest.Item): # (file path, file content) tuples files: list[tuple[str, str]] + # Modules which is to be considered "test" rather than "fixture" + test_modules: list[str] expected_stale_modules: dict[int, set[str]] expected_rechecked_modules: dict[int, set[str]] expected_fine_grained_targets: dict[int, list[str]] diff --git a/mypy/test/testcheck.py b/mypy/test/testcheck.py index 4fe2ee6393c0..5f128283a190 100644 --- a/mypy/test/testcheck.py +++ b/mypy/test/testcheck.py @@ -11,7 +11,6 @@ from mypy.errors import CompileError from mypy.modulefinder import BuildSource, FindModuleCache, SearchPaths from mypy.options import TYPE_VAR_TUPLE, UNPACK -from mypy.semanal_main import core_modules from mypy.test.config import test_data_prefix, test_temp_dir from mypy.test.data import DataDrivenTestCase, DataSuite, FileOperation, module_from_path from mypy.test.helpers import ( @@ -188,12 +187,10 @@ def run_case_once( if incremental_step: name += str(incremental_step + 1) expected = testcase.expected_fine_grained_targets.get(incremental_step + 1) - actual = res.manager.processed_targets - # Skip the initial builtin cycle. actual = [ - t - for t in actual - if not any(t.startswith(mod) for mod in core_modules + ["mypy_extensions"]) + target + for module, target in res.manager.processed_targets + if module in testcase.test_modules ] if expected is not None: assert_target_equivalence(name, expected, actual) diff --git a/mypy/test/testdeps.py b/mypy/test/testdeps.py index 3343762cfaaf..faf4956a0273 100644 --- a/mypy/test/testdeps.py +++ b/mypy/test/testdeps.py @@ -41,16 +41,9 @@ def run_case(self, testcase: DataDrivenTestCase) -> None: a = ["Unknown compile error (likely syntax error in test case or fixture)"] else: deps: defaultdict[str, set[str]] = defaultdict(set) - for module in files: - if ( - module in dumped_modules - or dump_all - and module - not in ("abc", "typing", "mypy_extensions", "typing_extensions", "enum") - ): - new_deps = get_dependencies( - files[module], type_map, options.python_version, options - ) + for module, file in files.items(): + if (module in dumped_modules or dump_all) and (module in testcase.test_modules): + new_deps = get_dependencies(file, type_map, options.python_version, options) for source in new_deps: deps[source].update(new_deps[source]) diff --git a/mypy/test/testfinegrained.py b/mypy/test/testfinegrained.py index b19c49bf60bc..5b4c816b5c38 100644 --- a/mypy/test/testfinegrained.py +++ b/mypy/test/testfinegrained.py @@ -18,7 +18,7 @@ import re import sys import unittest -from typing import Any, cast +from typing import Any import pytest @@ -169,7 +169,8 @@ def get_options(self, source: str, testcase: DataDrivenTestCase, build_cache: bo def run_check(self, server: Server, sources: list[BuildSource]) -> list[str]: response = server.check(sources, export_types=True, is_tty=False, terminal_width=-1) - out = cast(str, response["out"] or response["err"]) + out = response["out"] or response["err"] + assert isinstance(out, str) return out.splitlines() def build(self, options: Options, sources: list[BuildSource]) -> list[str]: diff --git a/mypy/test/testmerge.py b/mypy/test/testmerge.py index 595aba49d8b7..11e9a3c3d7e7 100644 --- a/mypy/test/testmerge.py +++ b/mypy/test/testmerge.py @@ -36,18 +36,6 @@ AST = "AST" -NOT_DUMPED_MODULES = ( - "builtins", - "typing", - "abc", - "contextlib", - "sys", - "mypy_extensions", - "typing_extensions", - "enum", -) - - class ASTMergeSuite(DataSuite): files = ["merge.test"] @@ -84,13 +72,13 @@ def run_case(self, testcase: DataDrivenTestCase) -> None: target_path = os.path.join(test_temp_dir, "target.py") shutil.copy(os.path.join(test_temp_dir, "target.py.next"), target_path) - a.extend(self.dump(fine_grained_manager, kind)) + a.extend(self.dump(fine_grained_manager, kind, testcase.test_modules)) old_subexpr = get_subexpressions(result.manager.modules["target"]) a.append("==>") new_file, new_types = self.build_increment(fine_grained_manager, "target", target_path) - a.extend(self.dump(fine_grained_manager, kind)) + a.extend(self.dump(fine_grained_manager, kind, testcase.test_modules)) for expr in old_subexpr: if isinstance(expr, TypeVarExpr): @@ -137,8 +125,12 @@ def build_increment( type_map = manager.graph[module_id].type_map() return module, type_map - def dump(self, manager: FineGrainedBuildManager, kind: str) -> list[str]: - modules = manager.manager.modules + def dump( + self, manager: FineGrainedBuildManager, kind: str, test_modules: list[str] + ) -> list[str]: + modules = { + name: file for name, file in manager.manager.modules.items() if name in test_modules + } if kind == AST: return self.dump_asts(modules) elif kind == TYPEINFO: @@ -146,15 +138,12 @@ def dump(self, manager: FineGrainedBuildManager, kind: str) -> list[str]: elif kind == SYMTABLE: return self.dump_symbol_tables(modules) elif kind == TYPES: - return self.dump_types(manager) + return self.dump_types(modules, manager) assert False, f"Invalid kind {kind}" def dump_asts(self, modules: dict[str, MypyFile]) -> list[str]: a = [] for m in sorted(modules): - if m in NOT_DUMPED_MODULES: - # We don't support incremental checking of changes to builtins, etc. - continue s = modules[m].accept(self.str_conv) a.extend(s.splitlines()) return a @@ -162,9 +151,6 @@ def dump_asts(self, modules: dict[str, MypyFile]) -> list[str]: def dump_symbol_tables(self, modules: dict[str, MypyFile]) -> list[str]: a = [] for id in sorted(modules): - if not is_dumped_module(id): - # We don't support incremental checking of changes to builtins, etc. - continue a.extend(self.dump_symbol_table(id, modules[id].names)) return a @@ -197,8 +183,6 @@ def format_symbol_table_node(self, node: SymbolTableNode) -> str: def dump_typeinfos(self, modules: dict[str, MypyFile]) -> list[str]: a = [] for id in sorted(modules): - if not is_dumped_module(id): - continue a.extend(self.dump_typeinfos_recursive(modules[id].names)) return a @@ -217,13 +201,13 @@ def dump_typeinfo(self, info: TypeInfo) -> list[str]: s = info.dump(str_conv=self.str_conv, type_str_conv=self.type_str_conv) return s.splitlines() - def dump_types(self, manager: FineGrainedBuildManager) -> list[str]: + def dump_types( + self, modules: dict[str, MypyFile], manager: FineGrainedBuildManager + ) -> list[str]: a = [] # To make the results repeatable, we try to generate unique and # deterministic sort keys. - for module_id in sorted(manager.manager.modules): - if not is_dumped_module(module_id): - continue + for module_id in sorted(modules): all_types = manager.manager.all_types # Compute a module type map from the global type map tree = manager.graph[module_id].tree @@ -242,7 +226,3 @@ def dump_types(self, manager: FineGrainedBuildManager) -> list[str]: def format_type(self, typ: Type) -> str: return typ.accept(self.type_str_conv) - - -def is_dumped_module(id: str) -> bool: - return id not in NOT_DUMPED_MODULES and (not id.startswith("_") or id == "__main__") diff --git a/mypy/test/testsemanal.py b/mypy/test/testsemanal.py index 71ebc43df8c2..3276f21540df 100644 --- a/mypy/test/testsemanal.py +++ b/mypy/test/testsemanal.py @@ -2,7 +2,6 @@ from __future__ import annotations -import os.path import sys from typing import Dict @@ -77,27 +76,9 @@ def test_semanal(testcase: DataDrivenTestCase) -> None: raise CompileError(a) # Include string representations of the source files in the actual # output. - for fnam in sorted(result.files.keys()): - f = result.files[fnam] - # Omit the builtins module and files with a special marker in the - # path. - # TODO the test is not reliable - if ( - not f.path.endswith( - ( - os.sep + "builtins.pyi", - "typing.pyi", - "mypy_extensions.pyi", - "typing_extensions.pyi", - "abc.pyi", - "collections.pyi", - "sys.pyi", - ) - ) - and not os.path.basename(f.path).startswith("_") - and not os.path.splitext(os.path.basename(f.path))[0].endswith("_") - ): - a += str(f).split("\n") + for module in sorted(result.files.keys()): + if module in testcase.test_modules: + a += str(result.files[module]).split("\n") except CompileError as e: a = e.messages if testcase.normalize_output: @@ -164,10 +145,10 @@ def run_case(self, testcase: DataDrivenTestCase) -> None: a = result.errors if a: raise CompileError(a) - for f in sorted(result.files.keys()): - if f not in ("builtins", "typing", "abc"): - a.append(f"{f}:") - for s in str(result.files[f].names).split("\n"): + for module in sorted(result.files.keys()): + if module in testcase.test_modules: + a.append(f"{module}:") + for s in str(result.files[module].names).split("\n"): a.append(" " + s) except CompileError as e: a = e.messages @@ -199,11 +180,13 @@ def run_case(self, testcase: DataDrivenTestCase) -> None: # Collect all TypeInfos in top-level modules. typeinfos = TypeInfoMap() - for f in result.files.values(): - for n in f.names.values(): - if isinstance(n.node, TypeInfo): - assert n.fullname - typeinfos[n.fullname] = n.node + for module, file in result.files.items(): + if module in testcase.test_modules: + for n in file.names.values(): + if isinstance(n.node, TypeInfo): + assert n.fullname + if any(n.fullname.startswith(m + ".") for m in testcase.test_modules): + typeinfos[n.fullname] = n.node # The output is the symbol table converted into a string. a = str(typeinfos).split("\n") @@ -220,12 +203,7 @@ class TypeInfoMap(Dict[str, TypeInfo]): def __str__(self) -> str: a: list[str] = ["TypeInfoMap("] for x, y in sorted(self.items()): - if ( - not x.startswith("builtins.") - and not x.startswith("typing.") - and not x.startswith("abc.") - ): - ti = ("\n" + " ").join(str(y).split("\n")) - a.append(f" {x} : {ti}") + ti = ("\n" + " ").join(str(y).split("\n")) + a.append(f" {x} : {ti}") a[-1] += ")" return "\n".join(a) diff --git a/mypy/test/teststubtest.py b/mypy/test/teststubtest.py index 6bb4dfb2c937..d39812b5f9b6 100644 --- a/mypy/test/teststubtest.py +++ b/mypy/test/teststubtest.py @@ -1139,6 +1139,45 @@ def test_not_subclassable(self) -> Iterator[Case]: error="CannotBeSubclassed", ) + @collect_cases + def test_has_runtime_final_decorator(self) -> Iterator[Case]: + yield Case( + stub="from typing_extensions import final", + runtime="from typing_extensions import final", + error=None, + ) + yield Case( + stub=""" + @final + class A: ... + """, + runtime=""" + @final + class A: ... + """, + error=None, + ) + yield Case( # Runtime can miss `@final` decorator + stub=""" + @final + class B: ... + """, + runtime=""" + class B: ... + """, + error=None, + ) + yield Case( # Stub cannot miss `@final` decorator + stub=""" + class C: ... + """, + runtime=""" + @final + class C: ... + """, + error="C", + ) + @collect_cases def test_name_mangling(self) -> Iterator[Case]: yield Case( diff --git a/mypy/test/testtransform.py b/mypy/test/testtransform.py index 1d3d4468444e..c765bae12062 100644 --- a/mypy/test/testtransform.py +++ b/mypy/test/testtransform.py @@ -2,8 +2,6 @@ from __future__ import annotations -import os.path - from mypy import build from mypy.errors import CompileError from mypy.modulefinder import BuildSource @@ -50,29 +48,12 @@ def test_transform(testcase: DataDrivenTestCase) -> None: raise CompileError(a) # Include string representations of the source files in the actual # output. - for fnam in sorted(result.files.keys()): - f = result.files[fnam] - - # Omit the builtins module and files with a special marker in the - # path. - # TODO the test is not reliable - if ( - not f.path.endswith( - ( - os.sep + "builtins.pyi", - "typing_extensions.pyi", - "typing.pyi", - "abc.pyi", - "sys.pyi", - ) - ) - and not os.path.basename(f.path).startswith("_") - and not os.path.splitext(os.path.basename(f.path))[0].endswith("_") - ): + for module in sorted(result.files.keys()): + if module in testcase.test_modules: t = TypeAssertTransformVisitor() t.test_only = True - f = t.mypyfile(f) - a += str(f).split("\n") + file = t.mypyfile(result.files[module]) + a += str(file).split("\n") except CompileError as e: a = e.messages if testcase.normalize_output: diff --git a/mypy/test/testtypegen.py b/mypy/test/testtypegen.py index 22ef4272e933..3f09254f081a 100644 --- a/mypy/test/testtypegen.py +++ b/mypy/test/testtypegen.py @@ -48,6 +48,7 @@ def run_case(self, testcase: DataDrivenTestCase) -> None: # to simplify output. searcher = SkippedNodeSearcher() for file in result.files.values(): + searcher.ignore_file = file.fullname not in testcase.test_modules file.accept(searcher) ignored = searcher.nodes diff --git a/mypy/test/testtypes.py b/mypy/test/testtypes.py index ee0256e2057a..6fe65675554b 100644 --- a/mypy/test/testtypes.py +++ b/mypy/test/testtypes.py @@ -7,7 +7,21 @@ from mypy.indirection import TypeIndirectionVisitor from mypy.join import join_simple, join_types from mypy.meet import meet_types, narrow_declared_type -from mypy.nodes import ARG_OPT, ARG_POS, ARG_STAR, ARG_STAR2, CONTRAVARIANT, COVARIANT, INVARIANT +from mypy.nodes import ( + ARG_NAMED, + ARG_OPT, + ARG_POS, + ARG_STAR, + ARG_STAR2, + CONTRAVARIANT, + COVARIANT, + INVARIANT, + ArgKind, + CallExpr, + Expression, + NameExpr, +) +from mypy.plugins.common import find_shallow_matching_overload_item from mypy.state import state from mypy.subtypes import is_more_precise, is_proper_subtype, is_same_type, is_subtype from mypy.test.helpers import Suite, assert_equal, assert_type, skip @@ -1287,3 +1301,135 @@ def assert_union_result(self, t: ProperType, expected: list[Type]) -> None: t2 = remove_instance_last_known_values(t) assert type(t2) is UnionType assert t2.items == expected + + +class ShallowOverloadMatchingSuite(Suite): + def setUp(self) -> None: + self.fx = TypeFixture() + + def test_simple(self) -> None: + fx = self.fx + ov = self.make_overload([[("x", fx.anyt, ARG_NAMED)], [("y", fx.anyt, ARG_NAMED)]]) + # Match first only + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", "x")), 0) + # Match second only + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", "y")), 1) + # No match -- invalid keyword arg name + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", "z")), 1) + # No match -- missing arg + self.assert_find_shallow_matching_overload_item(ov, make_call(), 1) + # No match -- extra arg + self.assert_find_shallow_matching_overload_item( + ov, make_call(("foo", "x"), ("foo", "z")), 1 + ) + + def test_match_using_types(self) -> None: + fx = self.fx + ov = self.make_overload( + [ + [("x", fx.nonet, ARG_POS)], + [("x", fx.lit_false, ARG_POS)], + [("x", fx.lit_true, ARG_POS)], + [("x", fx.anyt, ARG_POS)], + ] + ) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", None)), 0) + self.assert_find_shallow_matching_overload_item(ov, make_call(("builtins.False", None)), 1) + self.assert_find_shallow_matching_overload_item(ov, make_call(("builtins.True", None)), 2) + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", None)), 3) + + def test_none_special_cases(self) -> None: + fx = self.fx + ov = self.make_overload( + [[("x", fx.callable(fx.nonet), ARG_POS)], [("x", fx.nonet, ARG_POS)]] + ) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", None)), 1) + self.assert_find_shallow_matching_overload_item(ov, make_call(("func", None)), 0) + ov = self.make_overload([[("x", fx.str_type, ARG_POS)], [("x", fx.nonet, ARG_POS)]]) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", None)), 1) + self.assert_find_shallow_matching_overload_item(ov, make_call(("func", None)), 0) + ov = self.make_overload( + [[("x", UnionType([fx.str_type, fx.a]), ARG_POS)], [("x", fx.nonet, ARG_POS)]] + ) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", None)), 1) + self.assert_find_shallow_matching_overload_item(ov, make_call(("func", None)), 0) + ov = self.make_overload([[("x", fx.o, ARG_POS)], [("x", fx.nonet, ARG_POS)]]) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", None)), 0) + self.assert_find_shallow_matching_overload_item(ov, make_call(("func", None)), 0) + ov = self.make_overload( + [[("x", UnionType([fx.str_type, fx.nonet]), ARG_POS)], [("x", fx.nonet, ARG_POS)]] + ) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", None)), 0) + self.assert_find_shallow_matching_overload_item(ov, make_call(("func", None)), 0) + ov = self.make_overload([[("x", fx.anyt, ARG_POS)], [("x", fx.nonet, ARG_POS)]]) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", None)), 0) + self.assert_find_shallow_matching_overload_item(ov, make_call(("func", None)), 0) + + def test_optional_arg(self) -> None: + fx = self.fx + ov = self.make_overload( + [[("x", fx.anyt, ARG_NAMED)], [("y", fx.anyt, ARG_OPT)], [("z", fx.anyt, ARG_NAMED)]] + ) + self.assert_find_shallow_matching_overload_item(ov, make_call(), 1) + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", "x")), 0) + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", "y")), 1) + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", "z")), 2) + + def test_two_args(self) -> None: + fx = self.fx + ov = self.make_overload( + [ + [("x", fx.nonet, ARG_OPT), ("y", fx.anyt, ARG_OPT)], + [("x", fx.anyt, ARG_OPT), ("y", fx.anyt, ARG_OPT)], + ] + ) + self.assert_find_shallow_matching_overload_item(ov, make_call(), 0) + self.assert_find_shallow_matching_overload_item(ov, make_call(("None", "x")), 0) + self.assert_find_shallow_matching_overload_item(ov, make_call(("foo", "x")), 1) + self.assert_find_shallow_matching_overload_item( + ov, make_call(("foo", "y"), ("None", "x")), 0 + ) + self.assert_find_shallow_matching_overload_item( + ov, make_call(("foo", "y"), ("bar", "x")), 1 + ) + + def assert_find_shallow_matching_overload_item( + self, ov: Overloaded, call: CallExpr, expected_index: int + ) -> None: + c = find_shallow_matching_overload_item(ov, call) + assert c in ov.items + assert ov.items.index(c) == expected_index + + def make_overload(self, items: list[list[tuple[str, Type, ArgKind]]]) -> Overloaded: + result = [] + for item in items: + arg_types = [] + arg_names = [] + arg_kinds = [] + for name, typ, kind in item: + arg_names.append(name) + arg_types.append(typ) + arg_kinds.append(kind) + result.append( + CallableType( + arg_types, arg_kinds, arg_names, ret_type=NoneType(), fallback=self.fx.o + ) + ) + return Overloaded(result) + + +def make_call(*items: tuple[str, str | None]) -> CallExpr: + args: list[Expression] = [] + arg_names = [] + arg_kinds = [] + for arg, name in items: + shortname = arg.split(".")[-1] + n = NameExpr(shortname) + n.fullname = arg + args.append(n) + arg_names.append(name) + if name: + arg_kinds.append(ARG_NAMED) + else: + arg_kinds.append(ARG_POS) + return CallExpr(NameExpr("f"), args, arg_kinds, arg_names) diff --git a/mypy/test/typefixture.py b/mypy/test/typefixture.py index d12e7abab0e2..1013b87c213f 100644 --- a/mypy/test/typefixture.py +++ b/mypy/test/typefixture.py @@ -136,6 +136,7 @@ def make_type_var( self.type_type = Instance(self.type_typei, []) # type self.function = Instance(self.functioni, []) # function TODO self.str_type = Instance(self.str_type_info, []) + self.bool_type = Instance(self.bool_type_info, []) self.a = Instance(self.ai, []) # A self.b = Instance(self.bi, []) # B self.c = Instance(self.ci, []) # C @@ -197,6 +198,9 @@ def make_type_var( self.lit_str2_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str2) self.lit_str3_inst = Instance(self.str_type_info, [], last_known_value=self.lit_str3) + self.lit_false = LiteralType(False, self.bool_type) + self.lit_true = LiteralType(True, self.bool_type) + self.type_a = TypeType.make_normalized(self.a) self.type_b = TypeType.make_normalized(self.b) self.type_c = TypeType.make_normalized(self.c) diff --git a/mypy/test/visitors.py b/mypy/test/visitors.py index 771119dbdc70..2b748ec1bdc4 100644 --- a/mypy/test/visitors.py +++ b/mypy/test/visitors.py @@ -8,15 +8,7 @@ from __future__ import annotations -from mypy.nodes import ( - AssignmentStmt, - CallExpr, - Expression, - IntExpr, - MypyFile, - NameExpr, - TypeVarExpr, -) +from mypy.nodes import AssignmentStmt, CallExpr, Expression, IntExpr, NameExpr, Node, TypeVarExpr from mypy.traverser import TraverserVisitor from mypy.treetransform import TransformVisitor from mypy.types import Type @@ -25,12 +17,8 @@ # from testtypegen class SkippedNodeSearcher(TraverserVisitor): def __init__(self) -> None: - self.nodes: set[Expression] = set() - self.is_typing = False - - def visit_mypy_file(self, f: MypyFile) -> None: - self.is_typing = f.fullname == "typing" or f.fullname == "builtins" - super().visit_mypy_file(f) + self.nodes: set[Node] = set() + self.ignore_file = False def visit_assignment_stmt(self, s: AssignmentStmt) -> None: if s.type or ignore_node(s.rvalue): @@ -40,14 +28,14 @@ def visit_assignment_stmt(self, s: AssignmentStmt) -> None: super().visit_assignment_stmt(s) def visit_name_expr(self, n: NameExpr) -> None: - self.skip_if_typing(n) + if self.ignore_file: + self.nodes.add(n) + super().visit_name_expr(n) def visit_int_expr(self, n: IntExpr) -> None: - self.skip_if_typing(n) - - def skip_if_typing(self, n: Expression) -> None: - if self.is_typing: + if self.ignore_file: self.nodes.add(n) + super().visit_int_expr(n) def ignore_node(node: Expression) -> bool: diff --git a/mypy/types.py b/mypy/types.py index 9858559ad5c1..994eb290fff3 100644 --- a/mypy/types.py +++ b/mypy/types.py @@ -183,7 +183,7 @@ class TypeOfAny: # Does this Any come from an error? from_error: Final = 5 # Is this a type that can't be represented in mypy's type system? For instance, type of - # call to NewType...). Even though these types aren't real Anys, we treat them as such. + # call to NewType(...). Even though these types aren't real Anys, we treat them as such. # Also used for variables named '_'. special_form: Final = 6 # Does this Any come from interaction with another Any? @@ -950,7 +950,8 @@ def __init__( def accept(self, visitor: TypeVisitor[T]) -> T: assert isinstance(visitor, SyntheticTypeVisitor) - return cast(T, visitor.visit_callable_argument(self)) + ret: T = visitor.visit_callable_argument(self) + return ret def serialize(self) -> JsonDict: assert False, "Synthetic types don't serialize" @@ -975,7 +976,8 @@ def __init__(self, items: list[Type], line: int = -1, column: int = -1) -> None: def accept(self, visitor: TypeVisitor[T]) -> T: assert isinstance(visitor, SyntheticTypeVisitor) - return cast(T, visitor.visit_type_list(self)) + ret: T = visitor.visit_type_list(self) + return ret def serialize(self) -> JsonDict: assert False, "Synthetic types don't serialize" @@ -1775,7 +1777,7 @@ def copy_modified( self: CT, arg_types: Bogus[Sequence[Type]] = _dummy, arg_kinds: Bogus[list[ArgKind]] = _dummy, - arg_names: Bogus[list[str | None]] = _dummy, + arg_names: Bogus[Sequence[str | None]] = _dummy, ret_type: Bogus[Type] = _dummy, fallback: Bogus[Instance] = _dummy, name: Bogus[str | None] = _dummy, @@ -1976,20 +1978,15 @@ def param_spec(self) -> ParamSpecType | None: arg_type = self.arg_types[-2] if not isinstance(arg_type, ParamSpecType): return None + # sometimes paramspectypes are analyzed in from mysterious places, # e.g. def f(prefix..., *args: P.args, **kwargs: P.kwargs) -> ...: ... prefix = arg_type.prefix if not prefix.arg_types: # TODO: confirm that all arg kinds are positional prefix = Parameters(self.arg_types[:-2], self.arg_kinds[:-2], self.arg_names[:-2]) - return ParamSpecType( - arg_type.name, - arg_type.fullname, - arg_type.id, - ParamSpecFlavor.BARE, - arg_type.upper_bound, - prefix=prefix, - ) + + return arg_type.copy_modified(flavor=ParamSpecFlavor.BARE, prefix=prefix) def expand_param_spec( self, c: CallableType | Parameters, no_prefix: bool = False @@ -2489,7 +2486,8 @@ def simple_name(self) -> str: def accept(self, visitor: TypeVisitor[T]) -> T: assert isinstance(visitor, SyntheticTypeVisitor) - return cast(T, visitor.visit_raw_expression_type(self)) + ret: T = visitor.visit_raw_expression_type(self) + return ret def serialize(self) -> JsonDict: assert False, "Synthetic types don't serialize" @@ -2736,7 +2734,8 @@ class EllipsisType(ProperType): def accept(self, visitor: TypeVisitor[T]) -> T: assert isinstance(visitor, SyntheticTypeVisitor) - return cast(T, visitor.visit_ellipsis_type(self)) + ret: T = visitor.visit_ellipsis_type(self) + return ret def serialize(self) -> JsonDict: assert False, "Synthetic types don't serialize" @@ -2845,7 +2844,8 @@ def __init__(self, fullname: str | None, args: list[Type], line: int) -> None: def accept(self, visitor: TypeVisitor[T]) -> T: assert isinstance(visitor, SyntheticTypeVisitor) - return cast(T, visitor.visit_placeholder_type(self)) + ret: T = visitor.visit_placeholder_type(self) + return ret def __hash__(self) -> int: return hash((self.fullname, tuple(self.args))) diff --git a/mypy/typeshed/stdlib/VERSIONS b/mypy/typeshed/stdlib/VERSIONS index bd1abd204885..d24aa35faf6e 100644 --- a/mypy/typeshed/stdlib/VERSIONS +++ b/mypy/typeshed/stdlib/VERSIONS @@ -152,6 +152,7 @@ importlib: 2.7- importlib.metadata: 3.8- importlib.metadata._meta: 3.10- importlib.resources: 3.7- +importlib.resources.abc: 3.11- inspect: 2.7- io: 2.7- ipaddress: 3.3- diff --git a/mypy/typeshed/stdlib/_bisect.pyi b/mypy/typeshed/stdlib/_bisect.pyi index 4c79eec14d72..58488e3d15af 100644 --- a/mypy/typeshed/stdlib/_bisect.pyi +++ b/mypy/typeshed/stdlib/_bisect.pyi @@ -1,6 +1,6 @@ import sys -from _typeshed import SupportsRichComparisonT -from collections.abc import Callable, MutableSequence, Sequence +from _typeshed import SupportsLenAndGetItem, SupportsRichComparisonT +from collections.abc import Callable, MutableSequence from typing import TypeVar, overload _T = TypeVar("_T") @@ -8,11 +8,16 @@ _T = TypeVar("_T") if sys.version_info >= (3, 10): @overload def bisect_left( - a: Sequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None, *, key: None = None + a: SupportsLenAndGetItem[SupportsRichComparisonT], + x: SupportsRichComparisonT, + lo: int = 0, + hi: int | None = None, + *, + key: None = None, ) -> int: ... @overload def bisect_left( - a: Sequence[_T], + a: SupportsLenAndGetItem[_T], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None, @@ -21,11 +26,16 @@ if sys.version_info >= (3, 10): ) -> int: ... @overload def bisect_right( - a: Sequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None, *, key: None = None + a: SupportsLenAndGetItem[SupportsRichComparisonT], + x: SupportsRichComparisonT, + lo: int = 0, + hi: int | None = None, + *, + key: None = None, ) -> int: ... @overload def bisect_right( - a: Sequence[_T], + a: SupportsLenAndGetItem[_T], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None, @@ -61,10 +71,10 @@ if sys.version_info >= (3, 10): else: def bisect_left( - a: Sequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None + a: SupportsLenAndGetItem[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None ) -> int: ... def bisect_right( - a: Sequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None + a: SupportsLenAndGetItem[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None ) -> int: ... def insort_left( a: MutableSequence[SupportsRichComparisonT], x: SupportsRichComparisonT, lo: int = 0, hi: int | None = None diff --git a/mypy/typeshed/stdlib/_csv.pyi b/mypy/typeshed/stdlib/_csv.pyi index 7e9b9e4e7a79..c9b9f47e6217 100644 --- a/mypy/typeshed/stdlib/_csv.pyi +++ b/mypy/typeshed/stdlib/_csv.pyi @@ -43,42 +43,42 @@ class _writer: def writer( csvfile: SupportsWrite[str], - dialect: _DialectLike = ..., + dialect: _DialectLike = "excel", *, - delimiter: str = ..., - quotechar: str | None = ..., - escapechar: str | None = ..., - doublequote: bool = ..., - skipinitialspace: bool = ..., - lineterminator: str = ..., - quoting: _QuotingType = ..., - strict: bool = ..., + delimiter: str = ",", + quotechar: str | None = '"', + escapechar: str | None = None, + doublequote: bool = True, + skipinitialspace: bool = False, + lineterminator: str = "\r\n", + quoting: _QuotingType = 0, + strict: bool = False, ) -> _writer: ... def reader( csvfile: Iterable[str], - dialect: _DialectLike = ..., + dialect: _DialectLike = "excel", *, - delimiter: str = ..., - quotechar: str | None = ..., - escapechar: str | None = ..., - doublequote: bool = ..., - skipinitialspace: bool = ..., - lineterminator: str = ..., - quoting: _QuotingType = ..., - strict: bool = ..., + delimiter: str = ",", + quotechar: str | None = '"', + escapechar: str | None = None, + doublequote: bool = True, + skipinitialspace: bool = False, + lineterminator: str = "\r\n", + quoting: _QuotingType = 0, + strict: bool = False, ) -> _reader: ... def register_dialect( name: str, - dialect: Any = ..., + dialect: type[Dialect] = ..., *, - delimiter: str = ..., - quotechar: str | None = ..., - escapechar: str | None = ..., - doublequote: bool = ..., - skipinitialspace: bool = ..., - lineterminator: str = ..., - quoting: _QuotingType = ..., - strict: bool = ..., + delimiter: str = ",", + quotechar: str | None = '"', + escapechar: str | None = None, + doublequote: bool = True, + skipinitialspace: bool = False, + lineterminator: str = "\r\n", + quoting: _QuotingType = 0, + strict: bool = False, ) -> None: ... def unregister_dialect(name: str) -> None: ... def get_dialect(name: str) -> Dialect: ... diff --git a/mypy/typeshed/stdlib/_decimal.pyi b/mypy/typeshed/stdlib/_decimal.pyi index b8208fe180a1..60c609456954 100644 --- a/mypy/typeshed/stdlib/_decimal.pyi +++ b/mypy/typeshed/stdlib/_decimal.pyi @@ -77,28 +77,28 @@ class Decimal: def as_integer_ratio(self) -> tuple[int, int]: ... def to_eng_string(self, context: Context | None = None) -> str: ... def __abs__(self) -> Decimal: ... - def __add__(self, __other: _Decimal) -> Decimal: ... - def __divmod__(self, __other: _Decimal) -> tuple[Decimal, Decimal]: ... - def __eq__(self, __other: object) -> bool: ... - def __floordiv__(self, __other: _Decimal) -> Decimal: ... - def __ge__(self, __other: _ComparableNum) -> bool: ... - def __gt__(self, __other: _ComparableNum) -> bool: ... - def __le__(self, __other: _ComparableNum) -> bool: ... - def __lt__(self, __other: _ComparableNum) -> bool: ... - def __mod__(self, __other: _Decimal) -> Decimal: ... - def __mul__(self, __other: _Decimal) -> Decimal: ... + def __add__(self, __value: _Decimal) -> Decimal: ... + def __divmod__(self, __value: _Decimal) -> tuple[Decimal, Decimal]: ... + def __eq__(self, __value: object) -> bool: ... + def __floordiv__(self, __value: _Decimal) -> Decimal: ... + def __ge__(self, __value: _ComparableNum) -> bool: ... + def __gt__(self, __value: _ComparableNum) -> bool: ... + def __le__(self, __value: _ComparableNum) -> bool: ... + def __lt__(self, __value: _ComparableNum) -> bool: ... + def __mod__(self, __value: _Decimal) -> Decimal: ... + def __mul__(self, __value: _Decimal) -> Decimal: ... def __neg__(self) -> Decimal: ... def __pos__(self) -> Decimal: ... - def __pow__(self, __other: _Decimal, __modulo: _Decimal | None = ...) -> Decimal: ... - def __radd__(self, __other: _Decimal) -> Decimal: ... - def __rdivmod__(self, __other: _Decimal) -> tuple[Decimal, Decimal]: ... - def __rfloordiv__(self, __other: _Decimal) -> Decimal: ... - def __rmod__(self, __other: _Decimal) -> Decimal: ... - def __rmul__(self, __other: _Decimal) -> Decimal: ... - def __rsub__(self, __other: _Decimal) -> Decimal: ... - def __rtruediv__(self, __other: _Decimal) -> Decimal: ... - def __sub__(self, __other: _Decimal) -> Decimal: ... - def __truediv__(self, __other: _Decimal) -> Decimal: ... + def __pow__(self, __value: _Decimal, __mod: _Decimal | None = None) -> Decimal: ... + def __radd__(self, __value: _Decimal) -> Decimal: ... + def __rdivmod__(self, __value: _Decimal) -> tuple[Decimal, Decimal]: ... + def __rfloordiv__(self, __value: _Decimal) -> Decimal: ... + def __rmod__(self, __value: _Decimal) -> Decimal: ... + def __rmul__(self, __value: _Decimal) -> Decimal: ... + def __rsub__(self, __value: _Decimal) -> Decimal: ... + def __rtruediv__(self, __value: _Decimal) -> Decimal: ... + def __sub__(self, __value: _Decimal) -> Decimal: ... + def __truediv__(self, __value: _Decimal) -> Decimal: ... def remainder_near(self, other: _Decimal, context: Context | None = None) -> Decimal: ... def __float__(self) -> float: ... def __int__(self) -> int: ... @@ -116,7 +116,7 @@ class Decimal: def __floor__(self) -> int: ... def __ceil__(self) -> int: ... def fma(self, other: _Decimal, third: _Decimal, context: Context | None = None) -> Decimal: ... - def __rpow__(self, __other: _Decimal, __context: Context | None = ...) -> Decimal: ... + def __rpow__(self, __value: _Decimal, __mod: Context | None = None) -> Decimal: ... def normalize(self, context: Context | None = None) -> Decimal: ... def quantize(self, exp: _Decimal, rounding: str | None = None, context: Context | None = None) -> Decimal: ... def same_quantum(self, other: _Decimal, context: Context | None = None) -> bool: ... diff --git a/mypy/typeshed/stdlib/_tkinter.pyi b/mypy/typeshed/stdlib/_tkinter.pyi index 271fd37df68b..89610e21d9e7 100644 --- a/mypy/typeshed/stdlib/_tkinter.pyi +++ b/mypy/typeshed/stdlib/_tkinter.pyi @@ -22,12 +22,12 @@ class Tcl_Obj: @property def typename(self) -> str: ... __hash__: ClassVar[None] # type: ignore[assignment] - def __eq__(self, __other): ... - def __ge__(self, __other): ... - def __gt__(self, __other): ... - def __le__(self, __other): ... - def __lt__(self, __other): ... - def __ne__(self, __other): ... + def __eq__(self, __value): ... + def __ge__(self, __value): ... + def __gt__(self, __value): ... + def __le__(self, __value): ... + def __lt__(self, __value): ... + def __ne__(self, __value): ... class TclError(Exception): ... diff --git a/mypy/typeshed/stdlib/_typeshed/__init__.pyi b/mypy/typeshed/stdlib/_typeshed/__init__.pyi index d0c6b3ab1173..3c29032b6b0d 100644 --- a/mypy/typeshed/stdlib/_typeshed/__init__.pyi +++ b/mypy/typeshed/stdlib/_typeshed/__init__.pyi @@ -293,7 +293,7 @@ class structseq(Generic[_T_co]): # https://github.com/python/typeshed/pull/6560#discussion_r767149830 def __new__(cls: type[Self], sequence: Iterable[_T_co], dict: dict[str, Any] = ...) -> Self: ... -# Superset of typing.AnyStr that also inclues LiteralString +# Superset of typing.AnyStr that also includes LiteralString AnyOrLiteralStr = TypeVar("AnyOrLiteralStr", str, bytes, LiteralString) # noqa: Y001 # Represents when str or LiteralStr is acceptable. Useful for string processing diff --git a/mypy/typeshed/stdlib/_weakref.pyi b/mypy/typeshed/stdlib/_weakref.pyi index 2a43de3ffd6b..b6044fac4628 100644 --- a/mypy/typeshed/stdlib/_weakref.pyi +++ b/mypy/typeshed/stdlib/_weakref.pyi @@ -20,7 +20,7 @@ class ProxyType(Generic[_T]): # "weakproxy" class ReferenceType(Generic[_T]): __callback__: Callable[[ReferenceType[_T]], Any] - def __new__(cls, o: _T, callback: Callable[[ReferenceType[_T]], Any] | None = ...) -> Self: ... + def __new__(cls, __o: _T, __callback: Callable[[ReferenceType[_T]], Any] | None = ...) -> Self: ... def __call__(self) -> _T | None: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... diff --git a/mypy/typeshed/stdlib/abc.pyi b/mypy/typeshed/stdlib/abc.pyi index 068dab4752be..ec04d8f85d12 100644 --- a/mypy/typeshed/stdlib/abc.pyi +++ b/mypy/typeshed/stdlib/abc.pyi @@ -2,12 +2,13 @@ import _typeshed import sys from _typeshed import SupportsWrite from collections.abc import Callable -from typing import Any, Generic, TypeVar -from typing_extensions import Literal +from typing import Any, TypeVar +from typing_extensions import Concatenate, Literal, ParamSpec _T = TypeVar("_T") _R_co = TypeVar("_R_co", covariant=True) _FuncT = TypeVar("_FuncT", bound=Callable[..., Any]) +_P = ParamSpec("_P") # These definitions have special processing in mypy class ABCMeta(type): @@ -28,13 +29,13 @@ class ABCMeta(type): def abstractmethod(funcobj: _FuncT) -> _FuncT: ... -class abstractclassmethod(classmethod[_R_co], Generic[_R_co]): +class abstractclassmethod(classmethod[_T, _P, _R_co]): __isabstractmethod__: Literal[True] - def __init__(self: abstractclassmethod[_R_co], callable: Callable[..., _R_co]) -> None: ... + def __init__(self, callable: Callable[Concatenate[_T, _P], _R_co]) -> None: ... -class abstractstaticmethod(staticmethod[_R_co], Generic[_R_co]): +class abstractstaticmethod(staticmethod[_P, _R_co]): __isabstractmethod__: Literal[True] - def __init__(self, callable: Callable[..., _R_co]) -> None: ... + def __init__(self, callable: Callable[_P, _R_co]) -> None: ... class abstractproperty(property): __isabstractmethod__: Literal[True] diff --git a/mypy/typeshed/stdlib/argparse.pyi b/mypy/typeshed/stdlib/argparse.pyi index 20d9dfa9d137..eb0b707bafaa 100644 --- a/mypy/typeshed/stdlib/argparse.pyi +++ b/mypy/typeshed/stdlib/argparse.pyi @@ -161,17 +161,12 @@ class ArgumentParser(_AttributeHolder, _ActionsContainer): add_help: bool = True, allow_abbrev: bool = True, ) -> None: ... - # The type-ignores in these overloads should be temporary. See: - # https://github.com/python/typeshed/pull/2643#issuecomment-442280277 + # Ignore errors about overlapping overloads @overload - def parse_args(self, args: Sequence[str] | None = None) -> Namespace: ... - @overload - def parse_args(self, args: Sequence[str] | None, namespace: None) -> Namespace: ... # type: ignore[misc] + def parse_args(self, args: Sequence[str] | None = None, namespace: None = None) -> Namespace: ... # type: ignore[misc] @overload def parse_args(self, args: Sequence[str] | None, namespace: _N) -> _N: ... @overload - def parse_args(self, *, namespace: None) -> Namespace: ... # type: ignore[misc] - @overload def parse_args(self, *, namespace: _N) -> _N: ... @overload def add_subparsers( diff --git a/mypy/typeshed/stdlib/array.pyi b/mypy/typeshed/stdlib/array.pyi index 827bbb97897f..38a815b584cd 100644 --- a/mypy/typeshed/stdlib/array.pyi +++ b/mypy/typeshed/stdlib/array.pyi @@ -61,23 +61,23 @@ class array(MutableSequence[_T], Generic[_T]): def __len__(self) -> int: ... @overload - def __getitem__(self, __i: SupportsIndex) -> _T: ... + def __getitem__(self, __key: SupportsIndex) -> _T: ... @overload - def __getitem__(self, __s: slice) -> array[_T]: ... + def __getitem__(self, __key: slice) -> array[_T]: ... @overload # type: ignore[override] - def __setitem__(self, __i: SupportsIndex, __o: _T) -> None: ... + def __setitem__(self, __key: SupportsIndex, __value: _T) -> None: ... @overload - def __setitem__(self, __s: slice, __o: array[_T]) -> None: ... - def __delitem__(self, __i: SupportsIndex | slice) -> None: ... - def __add__(self, __x: array[_T]) -> array[_T]: ... - def __ge__(self, __other: array[_T]) -> bool: ... - def __gt__(self, __other: array[_T]) -> bool: ... - def __iadd__(self, __x: array[_T]) -> Self: ... # type: ignore[override] - def __imul__(self, __n: int) -> Self: ... - def __le__(self, __other: array[_T]) -> bool: ... - def __lt__(self, __other: array[_T]) -> bool: ... - def __mul__(self, __n: int) -> array[_T]: ... - def __rmul__(self, __n: int) -> array[_T]: ... + def __setitem__(self, __key: slice, __value: array[_T]) -> None: ... + def __delitem__(self, __key: SupportsIndex | slice) -> None: ... + def __add__(self, __value: array[_T]) -> array[_T]: ... + def __ge__(self, __value: array[_T]) -> bool: ... + def __gt__(self, __value: array[_T]) -> bool: ... + def __iadd__(self, __value: array[_T]) -> Self: ... # type: ignore[override] + def __imul__(self, __value: int) -> Self: ... + def __le__(self, __value: array[_T]) -> bool: ... + def __lt__(self, __value: array[_T]) -> bool: ... + def __mul__(self, __value: int) -> array[_T]: ... + def __rmul__(self, __value: int) -> array[_T]: ... def __copy__(self) -> array[_T]: ... def __deepcopy__(self, __unused: Any) -> array[_T]: ... diff --git a/mypy/typeshed/stdlib/asyncio/base_subprocess.pyi b/mypy/typeshed/stdlib/asyncio/base_subprocess.pyi index 597c8302988e..8f262cd5c760 100644 --- a/mypy/typeshed/stdlib/asyncio/base_subprocess.pyi +++ b/mypy/typeshed/stdlib/asyncio/base_subprocess.pyi @@ -46,7 +46,7 @@ class BaseSubprocessTransport(transports.SubprocessTransport): def get_pid(self) -> int | None: ... # type: ignore[override] def get_pipe_transport(self, fd: int) -> _File: ... # type: ignore[override] def _check_proc(self) -> None: ... # undocumented - def send_signal(self, signal: int) -> None: ... # type: ignore[override] + def send_signal(self, signal: int) -> None: ... async def _connect_pipes(self, waiter: futures.Future[Any] | None) -> None: ... # undocumented def _call(self, cb: Callable[..., object], *data: Any) -> None: ... # undocumented def _pipe_connection_lost(self, fd: int, exc: BaseException | None) -> None: ... # undocumented diff --git a/mypy/typeshed/stdlib/asyncio/subprocess.pyi b/mypy/typeshed/stdlib/asyncio/subprocess.pyi index 10a414f24537..b8877b360527 100644 --- a/mypy/typeshed/stdlib/asyncio/subprocess.pyi +++ b/mypy/typeshed/stdlib/asyncio/subprocess.pyi @@ -54,24 +54,24 @@ if sys.version_info >= (3, 11): bufsize: Literal[0] = 0, encoding: None = None, errors: None = None, - text: Literal[False, None] = ..., + text: Literal[False, None] = None, # These parameters are taken by subprocess.Popen, which this ultimately delegates to - executable: StrOrBytesPath | None = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: subprocess._ENV | None = ..., - startupinfo: Any | None = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + executable: StrOrBytesPath | None = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + cwd: StrOrBytesPath | None = None, + env: subprocess._ENV | None = None, + startupinfo: Any | None = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., - group: None | str | int = ..., - extra_groups: None | Collection[str | int] = ..., - user: None | str | int = ..., - umask: int = ..., - process_group: int | None = ..., - pipesize: int = ..., + group: None | str | int = None, + extra_groups: None | Collection[str | int] = None, + user: None | str | int = None, + umask: int = -1, + process_group: int | None = None, + pipesize: int = -1, ) -> Process: ... async def create_subprocess_exec( program: _ExecArg, @@ -87,23 +87,23 @@ if sys.version_info >= (3, 11): encoding: None = None, errors: None = None, # These parameters are taken by subprocess.Popen, which this ultimately delegates to - text: bool | None = ..., - executable: StrOrBytesPath | None = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: subprocess._ENV | None = ..., - startupinfo: Any | None = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + text: bool | None = None, + executable: StrOrBytesPath | None = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + cwd: StrOrBytesPath | None = None, + env: subprocess._ENV | None = None, + startupinfo: Any | None = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., - group: None | str | int = ..., - extra_groups: None | Collection[str | int] = ..., - user: None | str | int = ..., - umask: int = ..., - process_group: int | None = ..., - pipesize: int = ..., + group: None | str | int = None, + extra_groups: None | Collection[str | int] = None, + user: None | str | int = None, + umask: int = -1, + process_group: int | None = None, + pipesize: int = -1, ) -> Process: ... elif sys.version_info >= (3, 10): @@ -120,23 +120,23 @@ elif sys.version_info >= (3, 10): bufsize: Literal[0] = 0, encoding: None = None, errors: None = None, - text: Literal[False, None] = ..., + text: Literal[False, None] = None, # These parameters are taken by subprocess.Popen, which this ultimately delegates to - executable: StrOrBytesPath | None = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: subprocess._ENV | None = ..., - startupinfo: Any | None = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + executable: StrOrBytesPath | None = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + cwd: StrOrBytesPath | None = None, + env: subprocess._ENV | None = None, + startupinfo: Any | None = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., - group: None | str | int = ..., - extra_groups: None | Collection[str | int] = ..., - user: None | str | int = ..., - umask: int = ..., - pipesize: int = ..., + group: None | str | int = None, + extra_groups: None | Collection[str | int] = None, + user: None | str | int = None, + umask: int = -1, + pipesize: int = -1, ) -> Process: ... async def create_subprocess_exec( program: _ExecArg, @@ -152,22 +152,22 @@ elif sys.version_info >= (3, 10): encoding: None = None, errors: None = None, # These parameters are taken by subprocess.Popen, which this ultimately delegates to - text: bool | None = ..., - executable: StrOrBytesPath | None = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: subprocess._ENV | None = ..., - startupinfo: Any | None = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + text: bool | None = None, + executable: StrOrBytesPath | None = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + cwd: StrOrBytesPath | None = None, + env: subprocess._ENV | None = None, + startupinfo: Any | None = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., - group: None | str | int = ..., - extra_groups: None | Collection[str | int] = ..., - user: None | str | int = ..., - umask: int = ..., - pipesize: int = ..., + group: None | str | int = None, + extra_groups: None | Collection[str | int] = None, + user: None | str | int = None, + umask: int = -1, + pipesize: int = -1, ) -> Process: ... else: # >= 3.9 @@ -185,22 +185,22 @@ else: # >= 3.9 bufsize: Literal[0] = 0, encoding: None = None, errors: None = None, - text: Literal[False, None] = ..., + text: Literal[False, None] = None, # These parameters are taken by subprocess.Popen, which this ultimately delegates to - executable: StrOrBytesPath | None = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: subprocess._ENV | None = ..., - startupinfo: Any | None = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + executable: StrOrBytesPath | None = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + cwd: StrOrBytesPath | None = None, + env: subprocess._ENV | None = None, + startupinfo: Any | None = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., - group: None | str | int = ..., - extra_groups: None | Collection[str | int] = ..., - user: None | str | int = ..., - umask: int = ..., + group: None | str | int = None, + extra_groups: None | Collection[str | int] = None, + user: None | str | int = None, + umask: int = -1, ) -> Process: ... async def create_subprocess_exec( program: _ExecArg, @@ -217,19 +217,19 @@ else: # >= 3.9 encoding: None = None, errors: None = None, # These parameters are taken by subprocess.Popen, which this ultimately delegates to - text: bool | None = ..., - executable: StrOrBytesPath | None = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: subprocess._ENV | None = ..., - startupinfo: Any | None = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + text: bool | None = None, + executable: StrOrBytesPath | None = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + cwd: StrOrBytesPath | None = None, + env: subprocess._ENV | None = None, + startupinfo: Any | None = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., - group: None | str | int = ..., - extra_groups: None | Collection[str | int] = ..., - user: None | str | int = ..., - umask: int = ..., + group: None | str | int = None, + extra_groups: None | Collection[str | int] = None, + user: None | str | int = None, + umask: int = -1, ) -> Process: ... diff --git a/mypy/typeshed/stdlib/asyncio/tasks.pyi b/mypy/typeshed/stdlib/asyncio/tasks.pyi index 0a44255a3ac8..308453709269 100644 --- a/mypy/typeshed/stdlib/asyncio/tasks.pyi +++ b/mypy/typeshed/stdlib/asyncio/tasks.pyi @@ -140,7 +140,7 @@ if sys.version_info >= (3, 10): tuple[_T1 | BaseException, _T2 | BaseException, _T3 | BaseException, _T4 | BaseException, _T5 | BaseException] ]: ... @overload - def gather(*coros_or_futures: _FutureLike[Any], return_exceptions: bool = False) -> Future[list[Any]]: ... # type: ignore[misc] + def gather(*coros_or_futures: _FutureLike[Any], return_exceptions: bool = False) -> Future[list[Any]]: ... else: @overload @@ -230,7 +230,7 @@ else: tuple[_T1 | BaseException, _T2 | BaseException, _T3 | BaseException, _T4 | BaseException, _T5 | BaseException] ]: ... @overload - def gather( # type: ignore[misc] + def gather( *coros_or_futures: _FutureLike[Any], loop: AbstractEventLoop | None = None, return_exceptions: bool = False ) -> Future[list[Any]]: ... diff --git a/mypy/typeshed/stdlib/builtins.pyi b/mypy/typeshed/stdlib/builtins.pyi index 7b8e25084c91..39b64afed5c5 100644 --- a/mypy/typeshed/stdlib/builtins.pyi +++ b/mypy/typeshed/stdlib/builtins.pyi @@ -54,7 +54,7 @@ from typing import ( # noqa: Y022 overload, type_check_only, ) -from typing_extensions import Literal, Self, SupportsIndex, TypeAlias, TypeGuard, final +from typing_extensions import Concatenate, Literal, ParamSpec, Self, SupportsIndex, TypeAlias, TypeGuard, final if sys.version_info >= (3, 9): from types import GenericAlias @@ -75,6 +75,7 @@ _SupportsNextT = TypeVar("_SupportsNextT", bound=SupportsNext[Any], covariant=Tr _SupportsAnextT = TypeVar("_SupportsAnextT", bound=SupportsAnext[Any], covariant=True) _AwaitableT = TypeVar("_AwaitableT", bound=Awaitable[Any]) _AwaitableT_co = TypeVar("_AwaitableT_co", bound=Awaitable[Any], covariant=True) +_P = ParamSpec("_P") class object: __doc__: str | None @@ -92,8 +93,8 @@ class object: # Overriding them in subclasses has different semantics, even if the override has an identical signature. def __setattr__(self, __name: str, __value: Any) -> None: ... def __delattr__(self, __name: str) -> None: ... - def __eq__(self, __o: object) -> bool: ... - def __ne__(self, __o: object) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... def __str__(self) -> str: ... # noqa: Y029 def __repr__(self) -> str: ... # noqa: Y029 def __hash__(self) -> int: ... @@ -110,33 +111,35 @@ class object: def __dir__(self) -> Iterable[str]: ... def __init_subclass__(cls) -> None: ... + @classmethod + def __subclasshook__(cls, __subclass: type) -> bool: ... -class staticmethod(Generic[_R_co]): +class staticmethod(Generic[_P, _R_co]): @property - def __func__(self) -> Callable[..., _R_co]: ... + def __func__(self) -> Callable[_P, _R_co]: ... @property def __isabstractmethod__(self) -> bool: ... - def __init__(self: staticmethod[_R_co], __f: Callable[..., _R_co]) -> None: ... - def __get__(self, __obj: _T, __type: type[_T] | None = ...) -> Callable[..., _R_co]: ... + def __init__(self, __f: Callable[_P, _R_co]) -> None: ... + def __get__(self, __instance: _T, __owner: type[_T] | None = None) -> Callable[_P, _R_co]: ... if sys.version_info >= (3, 10): __name__: str __qualname__: str @property - def __wrapped__(self) -> Callable[..., _R_co]: ... - def __call__(self, *args: Any, **kwargs: Any) -> _R_co: ... + def __wrapped__(self) -> Callable[_P, _R_co]: ... + def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R_co: ... -class classmethod(Generic[_R_co]): +class classmethod(Generic[_T, _P, _R_co]): @property - def __func__(self) -> Callable[..., _R_co]: ... + def __func__(self) -> Callable[Concatenate[_T, _P], _R_co]: ... @property def __isabstractmethod__(self) -> bool: ... - def __init__(self: classmethod[_R_co], __f: Callable[..., _R_co]) -> None: ... - def __get__(self, __obj: _T, __type: type[_T] | None = ...) -> Callable[..., _R_co]: ... + def __init__(self, __f: Callable[Concatenate[_T, _P], _R_co]) -> None: ... + def __get__(self, __instance: _T, __owner: type[_T] | None = None) -> Callable[_P, _R_co]: ... if sys.version_info >= (3, 10): __name__: str __qualname__: str @property - def __wrapped__(self) -> Callable[..., _R_co]: ... + def __wrapped__(self) -> Callable[Concatenate[_T, _P], _R_co]: ... class type: @property @@ -181,8 +184,8 @@ class type: @classmethod def __prepare__(metacls, __name: str, __bases: tuple[type, ...], **kwds: Any) -> Mapping[str, object]: ... if sys.version_info >= (3, 10): - def __or__(self, __t: Any) -> types.UnionType: ... - def __ror__(self, __t: Any) -> types.UnionType: ... + def __or__(self, __value: Any) -> types.UnionType: ... + def __ror__(self, __value: Any) -> types.UnionType: ... class super: @overload @@ -240,45 +243,45 @@ class int: signed: bool = False, ) -> Self: ... - def __add__(self, __x: int) -> int: ... - def __sub__(self, __x: int) -> int: ... - def __mul__(self, __x: int) -> int: ... - def __floordiv__(self, __x: int) -> int: ... - def __truediv__(self, __x: int) -> float: ... - def __mod__(self, __x: int) -> int: ... - def __divmod__(self, __x: int) -> tuple[int, int]: ... - def __radd__(self, __x: int) -> int: ... - def __rsub__(self, __x: int) -> int: ... - def __rmul__(self, __x: int) -> int: ... - def __rfloordiv__(self, __x: int) -> int: ... - def __rtruediv__(self, __x: int) -> float: ... - def __rmod__(self, __x: int) -> int: ... - def __rdivmod__(self, __x: int) -> tuple[int, int]: ... + def __add__(self, __value: int) -> int: ... + def __sub__(self, __value: int) -> int: ... + def __mul__(self, __value: int) -> int: ... + def __floordiv__(self, __value: int) -> int: ... + def __truediv__(self, __value: int) -> float: ... + def __mod__(self, __value: int) -> int: ... + def __divmod__(self, __value: int) -> tuple[int, int]: ... + def __radd__(self, __value: int) -> int: ... + def __rsub__(self, __value: int) -> int: ... + def __rmul__(self, __value: int) -> int: ... + def __rfloordiv__(self, __value: int) -> int: ... + def __rtruediv__(self, __value: int) -> float: ... + def __rmod__(self, __value: int) -> int: ... + def __rdivmod__(self, __value: int) -> tuple[int, int]: ... @overload def __pow__(self, __x: Literal[0]) -> Literal[1]: ... @overload - def __pow__(self, __x: Literal[0], __modulo: None) -> Literal[1]: ... + def __pow__(self, __value: Literal[0], __mod: None) -> Literal[1]: ... @overload - def __pow__(self, __x: _PositiveInteger, __modulo: None = None) -> int: ... + def __pow__(self, __value: _PositiveInteger, __mod: None = None) -> int: ... @overload - def __pow__(self, __x: _NegativeInteger, __modulo: None = None) -> float: ... + def __pow__(self, __value: _NegativeInteger, __mod: None = None) -> float: ... # positive x -> int; negative x -> float # return type must be Any as `int | float` causes too many false-positive errors @overload - def __pow__(self, __x: int, __modulo: None = None) -> Any: ... - @overload - def __pow__(self, __x: int, __modulo: int) -> int: ... - def __rpow__(self, __x: int, __mod: int | None = None) -> Any: ... - def __and__(self, __n: int) -> int: ... - def __or__(self, __n: int) -> int: ... - def __xor__(self, __n: int) -> int: ... - def __lshift__(self, __n: int) -> int: ... - def __rshift__(self, __n: int) -> int: ... - def __rand__(self, __n: int) -> int: ... - def __ror__(self, __n: int) -> int: ... - def __rxor__(self, __n: int) -> int: ... - def __rlshift__(self, __n: int) -> int: ... - def __rrshift__(self, __n: int) -> int: ... + def __pow__(self, __value: int, __mod: None = None) -> Any: ... + @overload + def __pow__(self, __value: int, __mod: int) -> int: ... + def __rpow__(self, __value: int, __mod: int | None = None) -> Any: ... + def __and__(self, __value: int) -> int: ... + def __or__(self, __value: int) -> int: ... + def __xor__(self, __value: int) -> int: ... + def __lshift__(self, __value: int) -> int: ... + def __rshift__(self, __value: int) -> int: ... + def __rand__(self, __value: int) -> int: ... + def __ror__(self, __value: int) -> int: ... + def __rxor__(self, __value: int) -> int: ... + def __rlshift__(self, __value: int) -> int: ... + def __rrshift__(self, __value: int) -> int: ... def __neg__(self) -> int: ... def __pos__(self) -> int: ... def __invert__(self) -> int: ... @@ -287,12 +290,12 @@ class int: def __floor__(self) -> int: ... def __round__(self, __ndigits: SupportsIndex = ...) -> int: ... def __getnewargs__(self) -> tuple[int]: ... - def __eq__(self, __x: object) -> bool: ... - def __ne__(self, __x: object) -> bool: ... - def __lt__(self, __x: int) -> bool: ... - def __le__(self, __x: int) -> bool: ... - def __gt__(self, __x: int) -> bool: ... - def __ge__(self, __x: int) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... + def __lt__(self, __value: int) -> bool: ... + def __le__(self, __value: int) -> bool: ... + def __gt__(self, __value: int) -> bool: ... + def __ge__(self, __value: int) -> bool: ... def __float__(self) -> float: ... def __int__(self) -> int: ... def __abs__(self) -> int: ... @@ -305,39 +308,39 @@ class float: def hex(self) -> str: ... def is_integer(self) -> bool: ... @classmethod - def fromhex(cls, __s: str) -> Self: ... + def fromhex(cls, __string: str) -> Self: ... @property def real(self) -> float: ... @property def imag(self) -> float: ... def conjugate(self) -> float: ... - def __add__(self, __x: float) -> float: ... - def __sub__(self, __x: float) -> float: ... - def __mul__(self, __x: float) -> float: ... - def __floordiv__(self, __x: float) -> float: ... - def __truediv__(self, __x: float) -> float: ... - def __mod__(self, __x: float) -> float: ... - def __divmod__(self, __x: float) -> tuple[float, float]: ... - @overload - def __pow__(self, __x: int, __mod: None = None) -> float: ... + def __add__(self, __value: float) -> float: ... + def __sub__(self, __value: float) -> float: ... + def __mul__(self, __value: float) -> float: ... + def __floordiv__(self, __value: float) -> float: ... + def __truediv__(self, __value: float) -> float: ... + def __mod__(self, __value: float) -> float: ... + def __divmod__(self, __value: float) -> tuple[float, float]: ... + @overload + def __pow__(self, __value: int, __mod: None = None) -> float: ... # positive x -> float; negative x -> complex # return type must be Any as `float | complex` causes too many false-positive errors @overload - def __pow__(self, __x: float, __mod: None = None) -> Any: ... - def __radd__(self, __x: float) -> float: ... - def __rsub__(self, __x: float) -> float: ... - def __rmul__(self, __x: float) -> float: ... - def __rfloordiv__(self, __x: float) -> float: ... - def __rtruediv__(self, __x: float) -> float: ... - def __rmod__(self, __x: float) -> float: ... - def __rdivmod__(self, __x: float) -> tuple[float, float]: ... + def __pow__(self, __value: float, __mod: None = None) -> Any: ... + def __radd__(self, __value: float) -> float: ... + def __rsub__(self, __value: float) -> float: ... + def __rmul__(self, __value: float) -> float: ... + def __rfloordiv__(self, __value: float) -> float: ... + def __rtruediv__(self, __value: float) -> float: ... + def __rmod__(self, __value: float) -> float: ... + def __rdivmod__(self, __value: float) -> tuple[float, float]: ... @overload - def __rpow__(self, __x: _PositiveInteger, __modulo: None = None) -> float: ... + def __rpow__(self, __value: _PositiveInteger, __mod: None = None) -> float: ... @overload - def __rpow__(self, __x: _NegativeInteger, __mod: None = None) -> complex: ... + def __rpow__(self, __value: _NegativeInteger, __mod: None = None) -> complex: ... # Returning `complex` for the general case gives too many false-positive errors. @overload - def __rpow__(self, __x: float, __mod: None = None) -> Any: ... + def __rpow__(self, __value: float, __mod: None = None) -> Any: ... def __getnewargs__(self) -> tuple[float]: ... def __trunc__(self) -> int: ... if sys.version_info >= (3, 9): @@ -348,12 +351,12 @@ class float: def __round__(self, __ndigits: None = None) -> int: ... @overload def __round__(self, __ndigits: SupportsIndex) -> float: ... - def __eq__(self, __x: object) -> bool: ... - def __ne__(self, __x: object) -> bool: ... - def __lt__(self, __x: float) -> bool: ... - def __le__(self, __x: float) -> bool: ... - def __gt__(self, __x: float) -> bool: ... - def __ge__(self, __x: float) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... + def __lt__(self, __value: float) -> bool: ... + def __le__(self, __value: float) -> bool: ... + def __gt__(self, __value: float) -> bool: ... + def __ge__(self, __value: float) -> bool: ... def __neg__(self) -> float: ... def __pos__(self) -> float: ... def __int__(self) -> int: ... @@ -383,18 +386,18 @@ class complex: @property def imag(self) -> float: ... def conjugate(self) -> complex: ... - def __add__(self, __x: complex) -> complex: ... - def __sub__(self, __x: complex) -> complex: ... - def __mul__(self, __x: complex) -> complex: ... - def __pow__(self, __x: complex, __mod: None = None) -> complex: ... - def __truediv__(self, __x: complex) -> complex: ... - def __radd__(self, __x: complex) -> complex: ... - def __rsub__(self, __x: complex) -> complex: ... - def __rmul__(self, __x: complex) -> complex: ... - def __rpow__(self, __x: complex, __mod: None = None) -> complex: ... - def __rtruediv__(self, __x: complex) -> complex: ... - def __eq__(self, __x: object) -> bool: ... - def __ne__(self, __x: object) -> bool: ... + def __add__(self, __value: complex) -> complex: ... + def __sub__(self, __value: complex) -> complex: ... + def __mul__(self, __value: complex) -> complex: ... + def __pow__(self, __value: complex, __mod: None = None) -> complex: ... + def __truediv__(self, __value: complex) -> complex: ... + def __radd__(self, __value: complex) -> complex: ... + def __rsub__(self, __value: complex) -> complex: ... + def __rmul__(self, __value: complex) -> complex: ... + def __rpow__(self, __value: complex, __mod: None = None) -> complex: ... + def __rtruediv__(self, __value: complex) -> complex: ... + def __eq__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... def __neg__(self) -> complex: ... def __pos__(self) -> complex: ... def __abs__(self) -> float: ... @@ -427,7 +430,7 @@ class str(Sequence[str]): def expandtabs(self, tabsize: int = 8) -> str: ... # type: ignore[misc] def find(self, __sub: str, __start: SupportsIndex | None = ..., __end: SupportsIndex | None = ...) -> int: ... - def format(self, *args: object, **kwargs: object) -> str: ... # type: ignore[misc] + def format(self, *args: object, **kwargs: object) -> str: ... def format_map(self, map: _FormatMapMapping) -> str: ... def index(self, __sub: str, __start: SupportsIndex | None = ..., __end: SupportsIndex | None = ...) -> int: ... def isalnum(self) -> bool: ... @@ -478,21 +481,21 @@ class str(Sequence[str]): @staticmethod @overload def maketrans(__x: str, __y: str, __z: str) -> dict[int, int | None]: ... - def __add__(self, __s: str) -> str: ... # type: ignore[misc] + def __add__(self, __value: str) -> str: ... # type: ignore[misc] # Incompatible with Sequence.__contains__ - def __contains__(self, __o: str) -> bool: ... # type: ignore[override] - def __eq__(self, __x: object) -> bool: ... - def __ge__(self, __x: str) -> bool: ... - def __getitem__(self, __i: SupportsIndex | slice) -> str: ... - def __gt__(self, __x: str) -> bool: ... + def __contains__(self, __key: str) -> bool: ... # type: ignore[override] + def __eq__(self, __value: object) -> bool: ... + def __ge__(self, __value: str) -> bool: ... + def __getitem__(self, __key: SupportsIndex | slice) -> str: ... + def __gt__(self, __value: str) -> bool: ... def __iter__(self) -> Iterator[str]: ... # type: ignore[misc] - def __le__(self, __x: str) -> bool: ... + def __le__(self, __value: str) -> bool: ... def __len__(self) -> int: ... - def __lt__(self, __x: str) -> bool: ... - def __mod__(self, __x: Any) -> str: ... # type: ignore[misc] - def __mul__(self, __n: SupportsIndex) -> str: ... # type: ignore[misc] - def __ne__(self, __x: object) -> bool: ... - def __rmul__(self, __n: SupportsIndex) -> str: ... # type: ignore[misc] + def __lt__(self, __value: str) -> bool: ... + def __mod__(self, __value: Any) -> str: ... + def __mul__(self, __value: SupportsIndex) -> str: ... # type: ignore[misc] + def __ne__(self, __value: object) -> bool: ... + def __rmul__(self, __value: SupportsIndex) -> str: ... # type: ignore[misc] def __getnewargs__(self) -> tuple[str]: ... class bytes(ByteString): @@ -573,27 +576,27 @@ class bytes(ByteString): def upper(self) -> bytes: ... def zfill(self, __width: SupportsIndex) -> bytes: ... @classmethod - def fromhex(cls, __s: str) -> Self: ... + def fromhex(cls, __string: str) -> Self: ... @staticmethod def maketrans(__frm: ReadableBuffer, __to: ReadableBuffer) -> bytes: ... def __len__(self) -> int: ... def __iter__(self) -> Iterator[int]: ... @overload - def __getitem__(self, __i: SupportsIndex) -> int: ... + def __getitem__(self, __key: SupportsIndex) -> int: ... @overload - def __getitem__(self, __s: slice) -> bytes: ... - def __add__(self, __s: ReadableBuffer) -> bytes: ... - def __mul__(self, __n: SupportsIndex) -> bytes: ... - def __rmul__(self, __n: SupportsIndex) -> bytes: ... + def __getitem__(self, __key: slice) -> bytes: ... + def __add__(self, __value: ReadableBuffer) -> bytes: ... + def __mul__(self, __value: SupportsIndex) -> bytes: ... + def __rmul__(self, __value: SupportsIndex) -> bytes: ... def __mod__(self, __value: Any) -> bytes: ... # Incompatible with Sequence.__contains__ - def __contains__(self, __o: SupportsIndex | ReadableBuffer) -> bool: ... # type: ignore[override] - def __eq__(self, __x: object) -> bool: ... - def __ne__(self, __x: object) -> bool: ... - def __lt__(self, __x: bytes) -> bool: ... - def __le__(self, __x: bytes) -> bool: ... - def __gt__(self, __x: bytes) -> bool: ... - def __ge__(self, __x: bytes) -> bool: ... + def __contains__(self, __key: SupportsIndex | ReadableBuffer) -> bool: ... # type: ignore[override] + def __eq__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... + def __lt__(self, __value: bytes) -> bool: ... + def __le__(self, __value: bytes) -> bool: ... + def __gt__(self, __value: bytes) -> bool: ... + def __ge__(self, __value: bytes) -> bool: ... def __getnewargs__(self) -> tuple[bytes]: ... if sys.version_info >= (3, 11): def __bytes__(self) -> bytes: ... @@ -689,29 +692,29 @@ class bytearray(MutableSequence[int], ByteString): def __iter__(self) -> Iterator[int]: ... __hash__: ClassVar[None] # type: ignore[assignment] @overload - def __getitem__(self, __i: SupportsIndex) -> int: ... + def __getitem__(self, __key: SupportsIndex) -> int: ... @overload - def __getitem__(self, __s: slice) -> bytearray: ... + def __getitem__(self, __key: slice) -> bytearray: ... @overload - def __setitem__(self, __i: SupportsIndex, __x: SupportsIndex) -> None: ... + def __setitem__(self, __key: SupportsIndex, __value: SupportsIndex) -> None: ... @overload - def __setitem__(self, __s: slice, __x: Iterable[SupportsIndex] | bytes) -> None: ... - def __delitem__(self, __i: SupportsIndex | slice) -> None: ... - def __add__(self, __s: ReadableBuffer) -> bytearray: ... + def __setitem__(self, __key: slice, __value: Iterable[SupportsIndex] | bytes) -> None: ... + def __delitem__(self, __key: SupportsIndex | slice) -> None: ... + def __add__(self, __value: ReadableBuffer) -> bytearray: ... # The superclass wants us to accept Iterable[int], but that fails at runtime. - def __iadd__(self, __s: ReadableBuffer) -> Self: ... # type: ignore[override] - def __mul__(self, __n: SupportsIndex) -> bytearray: ... - def __rmul__(self, __n: SupportsIndex) -> bytearray: ... - def __imul__(self, __n: SupportsIndex) -> Self: ... + def __iadd__(self, __value: ReadableBuffer) -> Self: ... # type: ignore[override] + def __mul__(self, __value: SupportsIndex) -> bytearray: ... + def __rmul__(self, __value: SupportsIndex) -> bytearray: ... + def __imul__(self, __value: SupportsIndex) -> Self: ... def __mod__(self, __value: Any) -> bytes: ... # Incompatible with Sequence.__contains__ - def __contains__(self, __o: SupportsIndex | ReadableBuffer) -> bool: ... # type: ignore[override] - def __eq__(self, __x: object) -> bool: ... - def __ne__(self, __x: object) -> bool: ... - def __lt__(self, __x: ReadableBuffer) -> bool: ... - def __le__(self, __x: ReadableBuffer) -> bool: ... - def __gt__(self, __x: ReadableBuffer) -> bool: ... - def __ge__(self, __x: ReadableBuffer) -> bool: ... + def __contains__(self, __key: SupportsIndex | ReadableBuffer) -> bool: ... # type: ignore[override] + def __eq__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... + def __lt__(self, __value: ReadableBuffer) -> bool: ... + def __le__(self, __value: ReadableBuffer) -> bool: ... + def __gt__(self, __value: ReadableBuffer) -> bool: ... + def __ge__(self, __value: ReadableBuffer) -> bool: ... def __alloc__(self) -> int: ... @final @@ -747,16 +750,16 @@ class memoryview(Sequence[int]): ) -> None: ... def cast(self, format: str, shape: list[int] | tuple[int, ...] = ...) -> memoryview: ... @overload - def __getitem__(self, __i: SupportsIndex) -> int: ... + def __getitem__(self, __key: SupportsIndex) -> int: ... @overload - def __getitem__(self, __s: slice) -> memoryview: ... + def __getitem__(self, __key: slice) -> memoryview: ... def __contains__(self, __x: object) -> bool: ... def __iter__(self) -> Iterator[int]: ... def __len__(self) -> int: ... @overload - def __setitem__(self, __s: slice, __o: ReadableBuffer) -> None: ... + def __setitem__(self, __key: slice, __value: ReadableBuffer) -> None: ... @overload - def __setitem__(self, __i: SupportsIndex, __o: SupportsIndex) -> None: ... + def __setitem__(self, __key: SupportsIndex, __value: SupportsIndex) -> None: ... if sys.version_info >= (3, 10): def tobytes(self, order: Literal["C", "F", "A"] | None = "C") -> bytes: ... elif sys.version_info >= (3, 8): @@ -780,29 +783,29 @@ class bool(int): # The following overloads could be represented more elegantly with a TypeVar("_B", bool, int), # however mypy has a bug regarding TypeVar constraints (https://github.com/python/mypy/issues/11880). @overload - def __and__(self, __x: bool) -> bool: ... + def __and__(self, __value: bool) -> bool: ... @overload - def __and__(self, __x: int) -> int: ... + def __and__(self, __value: int) -> int: ... @overload - def __or__(self, __x: bool) -> bool: ... + def __or__(self, __value: bool) -> bool: ... @overload - def __or__(self, __x: int) -> int: ... + def __or__(self, __value: int) -> int: ... @overload - def __xor__(self, __x: bool) -> bool: ... + def __xor__(self, __value: bool) -> bool: ... @overload - def __xor__(self, __x: int) -> int: ... + def __xor__(self, __value: int) -> int: ... @overload - def __rand__(self, __x: bool) -> bool: ... + def __rand__(self, __value: bool) -> bool: ... @overload - def __rand__(self, __x: int) -> int: ... + def __rand__(self, __value: int) -> int: ... @overload - def __ror__(self, __x: bool) -> bool: ... + def __ror__(self, __value: bool) -> bool: ... @overload - def __ror__(self, __x: int) -> int: ... + def __ror__(self, __value: int) -> int: ... @overload - def __rxor__(self, __x: bool) -> bool: ... + def __rxor__(self, __value: bool) -> bool: ... @overload - def __rxor__(self, __x: int) -> int: ... + def __rxor__(self, __value: int) -> int: ... def __getnewargs__(self) -> tuple[int]: ... @final @@ -823,22 +826,22 @@ class slice: class tuple(Sequence[_T_co], Generic[_T_co]): def __new__(cls, __iterable: Iterable[_T_co] = ...) -> Self: ... def __len__(self) -> int: ... - def __contains__(self, __x: object) -> bool: ... + def __contains__(self, __key: object) -> bool: ... @overload - def __getitem__(self, __x: SupportsIndex) -> _T_co: ... + def __getitem__(self, __key: SupportsIndex) -> _T_co: ... @overload - def __getitem__(self, __x: slice) -> tuple[_T_co, ...]: ... + def __getitem__(self, __key: slice) -> tuple[_T_co, ...]: ... def __iter__(self) -> Iterator[_T_co]: ... - def __lt__(self, __x: tuple[_T_co, ...]) -> bool: ... - def __le__(self, __x: tuple[_T_co, ...]) -> bool: ... - def __gt__(self, __x: tuple[_T_co, ...]) -> bool: ... - def __ge__(self, __x: tuple[_T_co, ...]) -> bool: ... + def __lt__(self, __value: tuple[_T_co, ...]) -> bool: ... + def __le__(self, __value: tuple[_T_co, ...]) -> bool: ... + def __gt__(self, __value: tuple[_T_co, ...]) -> bool: ... + def __ge__(self, __value: tuple[_T_co, ...]) -> bool: ... @overload - def __add__(self, __x: tuple[_T_co, ...]) -> tuple[_T_co, ...]: ... + def __add__(self, __value: tuple[_T_co, ...]) -> tuple[_T_co, ...]: ... @overload - def __add__(self, __x: tuple[_T, ...]) -> tuple[_T_co | _T, ...]: ... - def __mul__(self, __n: SupportsIndex) -> tuple[_T_co, ...]: ... - def __rmul__(self, __n: SupportsIndex) -> tuple[_T_co, ...]: ... + def __add__(self, __value: tuple[_T, ...]) -> tuple[_T_co | _T, ...]: ... + def __mul__(self, __value: SupportsIndex) -> tuple[_T_co, ...]: ... + def __rmul__(self, __value: SupportsIndex) -> tuple[_T_co, ...]: ... def count(self, __value: Any) -> int: ... def index(self, __value: Any, __start: SupportsIndex = 0, __stop: SupportsIndex = sys.maxsize) -> int: ... if sys.version_info >= (3, 9): @@ -866,7 +869,7 @@ class function: __module__: str # mypy uses `builtins.function.__get__` to represent methods, properties, and getset_descriptors so we type the return as Any. - def __get__(self, obj: object, type: type | None = ...) -> Any: ... + def __get__(self, __instance: object, __owner: type | None = None) -> Any: ... class list(MutableSequence[_T], Generic[_T]): @overload @@ -900,25 +903,25 @@ class list(MutableSequence[_T], Generic[_T]): @overload def __getitem__(self, __s: slice) -> list[_T]: ... @overload - def __setitem__(self, __i: SupportsIndex, __o: _T) -> None: ... + def __setitem__(self, __key: SupportsIndex, __value: _T) -> None: ... @overload - def __setitem__(self, __s: slice, __o: Iterable[_T]) -> None: ... - def __delitem__(self, __i: SupportsIndex | slice) -> None: ... + def __setitem__(self, __key: slice, __value: Iterable[_T]) -> None: ... + def __delitem__(self, __key: SupportsIndex | slice) -> None: ... # Overloading looks unnecessary, but is needed to work around complex mypy problems @overload - def __add__(self, __x: list[_T]) -> list[_T]: ... + def __add__(self, __value: list[_T]) -> list[_T]: ... @overload - def __add__(self, __x: list[_S]) -> list[_S | _T]: ... - def __iadd__(self, __x: Iterable[_T]) -> Self: ... # type: ignore[misc] - def __mul__(self, __n: SupportsIndex) -> list[_T]: ... - def __rmul__(self, __n: SupportsIndex) -> list[_T]: ... - def __imul__(self, __n: SupportsIndex) -> Self: ... - def __contains__(self, __o: object) -> bool: ... + def __add__(self, __value: list[_S]) -> list[_S | _T]: ... + def __iadd__(self, __value: Iterable[_T]) -> Self: ... # type: ignore[misc] + def __mul__(self, __value: SupportsIndex) -> list[_T]: ... + def __rmul__(self, __value: SupportsIndex) -> list[_T]: ... + def __imul__(self, __value: SupportsIndex) -> Self: ... + def __contains__(self, __key: object) -> bool: ... def __reversed__(self) -> Iterator[_T]: ... - def __gt__(self, __x: list[_T]) -> bool: ... - def __ge__(self, __x: list[_T]) -> bool: ... - def __lt__(self, __x: list[_T]) -> bool: ... - def __le__(self, __x: list[_T]) -> bool: ... + def __gt__(self, __value: list[_T]) -> bool: ... + def __ge__(self, __value: list[_T]) -> bool: ... + def __lt__(self, __value: list[_T]) -> bool: ... + def __le__(self, __value: list[_T]) -> bool: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @@ -1005,18 +1008,18 @@ class set(MutableSet[_T], Generic[_T]): def __len__(self) -> int: ... def __contains__(self, __o: object) -> bool: ... def __iter__(self) -> Iterator[_T]: ... - def __and__(self, __s: AbstractSet[object]) -> set[_T]: ... - def __iand__(self, __s: AbstractSet[object]) -> Self: ... - def __or__(self, __s: AbstractSet[_S]) -> set[_T | _S]: ... - def __ior__(self, __s: AbstractSet[_T]) -> Self: ... # type: ignore[override,misc] - def __sub__(self, __s: AbstractSet[_T | None]) -> set[_T]: ... - def __isub__(self, __s: AbstractSet[object]) -> Self: ... - def __xor__(self, __s: AbstractSet[_S]) -> set[_T | _S]: ... - def __ixor__(self, __s: AbstractSet[_T]) -> Self: ... # type: ignore[override,misc] - def __le__(self, __s: AbstractSet[object]) -> bool: ... - def __lt__(self, __s: AbstractSet[object]) -> bool: ... - def __ge__(self, __s: AbstractSet[object]) -> bool: ... - def __gt__(self, __s: AbstractSet[object]) -> bool: ... + def __and__(self, __value: AbstractSet[object]) -> set[_T]: ... + def __iand__(self, __value: AbstractSet[object]) -> Self: ... + def __or__(self, __value: AbstractSet[_S]) -> set[_T | _S]: ... + def __ior__(self, __value: AbstractSet[_T]) -> Self: ... # type: ignore[override,misc] + def __sub__(self, __value: AbstractSet[_T | None]) -> set[_T]: ... + def __isub__(self, __value: AbstractSet[object]) -> Self: ... + def __xor__(self, __value: AbstractSet[_S]) -> set[_T | _S]: ... + def __ixor__(self, __value: AbstractSet[_T]) -> Self: ... # type: ignore[override,misc] + def __le__(self, __value: AbstractSet[object]) -> bool: ... + def __lt__(self, __value: AbstractSet[object]) -> bool: ... + def __ge__(self, __value: AbstractSet[object]) -> bool: ... + def __gt__(self, __value: AbstractSet[object]) -> bool: ... __hash__: ClassVar[None] # type: ignore[assignment] if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @@ -1037,14 +1040,14 @@ class frozenset(AbstractSet[_T_co], Generic[_T_co]): def __len__(self) -> int: ... def __contains__(self, __o: object) -> bool: ... def __iter__(self) -> Iterator[_T_co]: ... - def __and__(self, __s: AbstractSet[_T_co]) -> frozenset[_T_co]: ... - def __or__(self, __s: AbstractSet[_S]) -> frozenset[_T_co | _S]: ... - def __sub__(self, __s: AbstractSet[_T_co]) -> frozenset[_T_co]: ... - def __xor__(self, __s: AbstractSet[_S]) -> frozenset[_T_co | _S]: ... - def __le__(self, __s: AbstractSet[object]) -> bool: ... - def __lt__(self, __s: AbstractSet[object]) -> bool: ... - def __ge__(self, __s: AbstractSet[object]) -> bool: ... - def __gt__(self, __s: AbstractSet[object]) -> bool: ... + def __and__(self, __value: AbstractSet[_T_co]) -> frozenset[_T_co]: ... + def __or__(self, __value: AbstractSet[_S]) -> frozenset[_T_co | _S]: ... + def __sub__(self, __value: AbstractSet[_T_co]) -> frozenset[_T_co]: ... + def __xor__(self, __value: AbstractSet[_S]) -> frozenset[_T_co | _S]: ... + def __le__(self, __value: AbstractSet[object]) -> bool: ... + def __lt__(self, __value: AbstractSet[object]) -> bool: ... + def __ge__(self, __value: AbstractSet[object]) -> bool: ... + def __gt__(self, __value: AbstractSet[object]) -> bool: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... @@ -1070,12 +1073,12 @@ class range(Sequence[int]): def count(self, __value: int) -> int: ... def index(self, __value: int) -> int: ... # type: ignore[override] def __len__(self) -> int: ... - def __contains__(self, __o: object) -> bool: ... + def __contains__(self, __key: object) -> bool: ... def __iter__(self) -> Iterator[int]: ... @overload - def __getitem__(self, __i: SupportsIndex) -> int: ... + def __getitem__(self, __key: SupportsIndex) -> int: ... @overload - def __getitem__(self, __s: slice) -> range: ... + def __getitem__(self, __key: slice) -> range: ... def __reversed__(self) -> Iterator[int]: ... class property: @@ -1093,12 +1096,12 @@ class property: def getter(self, __fget: Callable[[Any], Any]) -> property: ... def setter(self, __fset: Callable[[Any, Any], None]) -> property: ... def deleter(self, __fdel: Callable[[Any], None]) -> property: ... - def __get__(self, __obj: Any, __type: type | None = ...) -> Any: ... - def __set__(self, __obj: Any, __value: Any) -> None: ... - def __delete__(self, __obj: Any) -> None: ... + def __get__(self, __instance: Any, __owner: type | None = None) -> Any: ... + def __set__(self, __instance: Any, __value: Any) -> None: ... + def __delete__(self, __instance: Any) -> None: ... @final -class _NotImplementedType(Any): # type: ignore[misc] +class _NotImplementedType(Any): # A little weird, but typing the __call__ as NotImplemented makes the error message # for NotImplemented() much better __call__: NotImplemented # type: ignore[valid-type] # pyright: ignore[reportGeneralTypeIssues] @@ -1302,6 +1305,7 @@ def iter(__function: Callable[[], _T | None], __sentinel: None) -> Iterator[_T]: @overload def iter(__function: Callable[[], _T], __sentinel: object) -> Iterator[_T]: ... +# Keep this alias in sync with unittest.case._ClassInfo if sys.version_info >= (3, 10): _ClassInfo: TypeAlias = type | types.UnionType | tuple[_ClassInfo, ...] else: @@ -1518,11 +1522,11 @@ if sys.version_info >= (3, 8): @overload def pow(base: int, exp: int, mod: int) -> int: ... @overload - def pow(base: int, exp: Literal[0], mod: None = None) -> Literal[1]: ... # type: ignore[misc] + def pow(base: int, exp: Literal[0], mod: None = None) -> Literal[1]: ... @overload - def pow(base: int, exp: _PositiveInteger, mod: None = None) -> int: ... # type: ignore[misc] + def pow(base: int, exp: _PositiveInteger, mod: None = None) -> int: ... @overload - def pow(base: int, exp: _NegativeInteger, mod: None = None) -> float: ... # type: ignore[misc] + def pow(base: int, exp: _NegativeInteger, mod: None = None) -> float: ... # int base & positive-int exp -> int; int base & negative-int exp -> float # return type must be Any as `int | float` causes too many false-positive errors @overload @@ -1553,35 +1557,35 @@ if sys.version_info >= (3, 8): else: @overload - def pow(__base: int, __exp: int, __mod: int) -> int: ... + def pow(__x: int, __y: int, __z: int) -> int: ... @overload - def pow(__base: int, __exp: Literal[0], __mod: None = None) -> Literal[1]: ... # type: ignore[misc] + def pow(__x: int, __y: Literal[0], __z: None = None) -> Literal[1]: ... @overload - def pow(__base: int, __exp: _PositiveInteger, __mod: None = None) -> int: ... # type: ignore[misc] + def pow(__x: int, __y: _PositiveInteger, __z: None = None) -> int: ... @overload - def pow(__base: int, __exp: _NegativeInteger, __mod: None = None) -> float: ... # type: ignore[misc] + def pow(__x: int, __y: _NegativeInteger, __z: None = None) -> float: ... @overload - def pow(__base: int, __exp: int, __mod: None = None) -> Any: ... + def pow(__x: int, __y: int, __z: None = None) -> Any: ... @overload - def pow(__base: _PositiveInteger, __exp: float, __mod: None = None) -> float: ... + def pow(__x: _PositiveInteger, __y: float, __z: None = None) -> float: ... @overload - def pow(__base: _NegativeInteger, __exp: float, __mod: None = None) -> complex: ... + def pow(__x: _NegativeInteger, __y: float, __z: None = None) -> complex: ... @overload - def pow(__base: float, __exp: int, __mod: None = None) -> float: ... + def pow(__x: float, __y: int, __z: None = None) -> float: ... @overload - def pow(__base: float, __exp: complex | _SupportsSomeKindOfPow, __mod: None = None) -> Any: ... + def pow(__x: float, __y: complex | _SupportsSomeKindOfPow, __z: None = None) -> Any: ... @overload - def pow(__base: complex, __exp: complex | _SupportsSomeKindOfPow, __mod: None = None) -> complex: ... + def pow(__x: complex, __y: complex | _SupportsSomeKindOfPow, __z: None = None) -> complex: ... @overload - def pow(__base: _SupportsPow2[_E, _T_co], __exp: _E, __mod: None = None) -> _T_co: ... + def pow(__x: _SupportsPow2[_E, _T_co], __y: _E, __z: None = None) -> _T_co: ... @overload - def pow(__base: _SupportsPow3NoneOnly[_E, _T_co], __exp: _E, __mod: None = None) -> _T_co: ... + def pow(__x: _SupportsPow3NoneOnly[_E, _T_co], __y: _E, __z: None = None) -> _T_co: ... @overload - def pow(__base: _SupportsPow3[_E, _M, _T_co], __exp: _E, __mod: _M) -> _T_co: ... + def pow(__x: _SupportsPow3[_E, _M, _T_co], __y: _E, __z: _M) -> _T_co: ... @overload - def pow(__base: _SupportsSomeKindOfPow, __exp: float, __mod: None = None) -> Any: ... + def pow(__x: _SupportsSomeKindOfPow, __y: float, __z: None = None) -> Any: ... @overload - def pow(__base: _SupportsSomeKindOfPow, __exp: complex, __mod: None = None) -> complex: ... + def pow(__x: _SupportsSomeKindOfPow, __y: complex, __z: None = None) -> complex: ... def quit(code: sys._ExitCode = None) -> NoReturn: ... diff --git a/mypy/typeshed/stdlib/codecs.pyi b/mypy/typeshed/stdlib/codecs.pyi index 5a22853b6aee..3f6d2d3d16b7 100644 --- a/mypy/typeshed/stdlib/codecs.pyi +++ b/mypy/typeshed/stdlib/codecs.pyi @@ -272,8 +272,9 @@ class StreamRecoder(BinaryIO): def readlines(self, sizehint: int | None = None) -> list[bytes]: ... def __next__(self) -> bytes: ... def __iter__(self) -> Self: ... + # Base class accepts more types than just bytes def write(self, data: bytes) -> None: ... # type: ignore[override] - def writelines(self, list: Iterable[bytes]) -> None: ... + def writelines(self, list: Iterable[bytes]) -> None: ... # type: ignore[override] def reset(self) -> None: ... def __getattr__(self, name: str) -> Any: ... def __enter__(self) -> Self: ... diff --git a/mypy/typeshed/stdlib/collections/__init__.pyi b/mypy/typeshed/stdlib/collections/__init__.pyi index 893a289d3cb1..1a40421146cc 100644 --- a/mypy/typeshed/stdlib/collections/__init__.pyi +++ b/mypy/typeshed/stdlib/collections/__init__.pyi @@ -236,19 +236,19 @@ class deque(MutableSequence[_T], Generic[_T]): def __copy__(self) -> Self: ... def __len__(self) -> int: ... # These methods of deque don't take slices, unlike MutableSequence, hence the type: ignores - def __getitem__(self, __index: SupportsIndex) -> _T: ... # type: ignore[override] - def __setitem__(self, __i: SupportsIndex, __x: _T) -> None: ... # type: ignore[override] - def __delitem__(self, __i: SupportsIndex) -> None: ... # type: ignore[override] - def __contains__(self, __o: object) -> bool: ... + def __getitem__(self, __key: SupportsIndex) -> _T: ... # type: ignore[override] + def __setitem__(self, __key: SupportsIndex, __value: _T) -> None: ... # type: ignore[override] + def __delitem__(self, __key: SupportsIndex) -> None: ... # type: ignore[override] + def __contains__(self, __key: object) -> bool: ... def __reduce__(self) -> tuple[type[Self], tuple[()], None, Iterator[_T]]: ... - def __iadd__(self, __iterable: Iterable[_T]) -> Self: ... - def __add__(self, __other: Self) -> Self: ... - def __mul__(self, __other: int) -> Self: ... - def __imul__(self, __other: int) -> Self: ... - def __lt__(self, __other: deque[_T]) -> bool: ... - def __le__(self, __other: deque[_T]) -> bool: ... - def __gt__(self, __other: deque[_T]) -> bool: ... - def __ge__(self, __other: deque[_T]) -> bool: ... + def __iadd__(self, __value: Iterable[_T]) -> Self: ... + def __add__(self, __value: Self) -> Self: ... + def __mul__(self, __value: int) -> Self: ... + def __imul__(self, __value: int) -> Self: ... + def __lt__(self, __value: deque[_T]) -> bool: ... + def __le__(self, __value: deque[_T]) -> bool: ... + def __gt__(self, __value: deque[_T]) -> bool: ... + def __ge__(self, __value: deque[_T]) -> bool: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, __item: Any) -> GenericAlias: ... diff --git a/mypy/typeshed/stdlib/concurrent/futures/_base.pyi b/mypy/typeshed/stdlib/concurrent/futures/_base.pyi index e792cf1a83c0..eb5ca4e2dd35 100644 --- a/mypy/typeshed/stdlib/concurrent/futures/_base.pyi +++ b/mypy/typeshed/stdlib/concurrent/futures/_base.pyi @@ -1,11 +1,11 @@ import sys import threading from _typeshed import Unused -from collections.abc import Callable, Iterable, Iterator, Sequence +from collections.abc import Callable, Iterable, Iterator from logging import Logger from types import TracebackType -from typing import Any, Generic, TypeVar, overload -from typing_extensions import Literal, ParamSpec, Self, SupportsIndex +from typing import Any, Generic, NamedTuple, TypeVar +from typing_extensions import Literal, ParamSpec, Self if sys.version_info >= (3, 9): from types import GenericAlias @@ -69,20 +69,9 @@ class Executor: def as_completed(fs: Iterable[Future[_T]], timeout: float | None = None) -> Iterator[Future[_T]]: ... -# Ideally this would be a namedtuple, but mypy doesn't support generic tuple types. See #1976 -class DoneAndNotDoneFutures(Sequence[set[Future[_T]]]): - if sys.version_info >= (3, 10): - __match_args__ = ("done", "not_done") - @property - def done(self) -> set[Future[_T]]: ... - @property - def not_done(self) -> set[Future[_T]]: ... - def __new__(_cls, done: set[Future[_T]], not_done: set[Future[_T]]) -> DoneAndNotDoneFutures[_T]: ... - def __len__(self) -> int: ... - @overload - def __getitem__(self, __i: SupportsIndex) -> set[Future[_T]]: ... - @overload - def __getitem__(self, __s: slice) -> DoneAndNotDoneFutures[_T]: ... +class DoneAndNotDoneFutures(NamedTuple, Generic[_T]): + done: set[Future[_T]] + not_done: set[Future[_T]] def wait( fs: Iterable[Future[_T]], timeout: float | None = None, return_when: str = "ALL_COMPLETED" diff --git a/mypy/typeshed/stdlib/contextlib.pyi b/mypy/typeshed/stdlib/contextlib.pyi index feb43aabb039..dc2101dc01f7 100644 --- a/mypy/typeshed/stdlib/contextlib.pyi +++ b/mypy/typeshed/stdlib/contextlib.pyi @@ -64,9 +64,14 @@ class _GeneratorContextManager(AbstractContextManager[_T_co], ContextDecorator, func: Callable[..., Generator[_T_co, Any, Any]] args: tuple[Any, ...] kwds: dict[str, Any] - def __exit__( - self, typ: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None - ) -> bool | None: ... + if sys.version_info >= (3, 9): + def __exit__( + self, typ: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None + ) -> bool | None: ... + else: + def __exit__( + self, type: type[BaseException] | None, value: BaseException | None, traceback: TracebackType | None + ) -> bool | None: ... def contextmanager(func: Callable[_P, Iterator[_T_co]]) -> Callable[_P, _GeneratorContextManager[_T_co]]: ... diff --git a/mypy/typeshed/stdlib/contextvars.pyi b/mypy/typeshed/stdlib/contextvars.pyi index 266d96bce6ff..ef6e2700e667 100644 --- a/mypy/typeshed/stdlib/contextvars.pyi +++ b/mypy/typeshed/stdlib/contextvars.pyi @@ -22,8 +22,13 @@ class ContextVar(Generic[_T]): def name(self) -> str: ... @overload def get(self) -> _T: ... - @overload - def get(self, default: _D | _T) -> _D | _T: ... + if sys.version_info >= (3, 8): + @overload + def get(self, default: _D | _T) -> _D | _T: ... + else: + @overload + def get(self, __default: _D | _T) -> _D | _T: ... + def set(self, __value: _T) -> Token[_T]: ... def reset(self, __token: Token[_T]) -> None: ... if sys.version_info >= (3, 9): diff --git a/mypy/typeshed/stdlib/csv.pyi b/mypy/typeshed/stdlib/csv.pyi index 234b189fb3db..59f2e7a3c96b 100644 --- a/mypy/typeshed/stdlib/csv.pyi +++ b/mypy/typeshed/stdlib/csv.pyi @@ -80,14 +80,14 @@ class DictReader(Generic[_T], Iterator[_DictReadMapping[_T | Any, str | Any]]): restval: str | None = None, dialect: _DialectLike = "excel", *, - delimiter: str = ..., - quotechar: str | None = ..., - escapechar: str | None = ..., - doublequote: bool = ..., - skipinitialspace: bool = ..., - lineterminator: str = ..., - quoting: _QuotingType = ..., - strict: bool = ..., + delimiter: str = ",", + quotechar: str | None = '"', + escapechar: str | None = None, + doublequote: bool = True, + skipinitialspace: bool = False, + lineterminator: str = "\r\n", + quoting: _QuotingType = 0, + strict: bool = False, ) -> None: ... @overload def __init__( @@ -98,14 +98,14 @@ class DictReader(Generic[_T], Iterator[_DictReadMapping[_T | Any, str | Any]]): restval: str | None = None, dialect: _DialectLike = "excel", *, - delimiter: str = ..., - quotechar: str | None = ..., - escapechar: str | None = ..., - doublequote: bool = ..., - skipinitialspace: bool = ..., - lineterminator: str = ..., - quoting: _QuotingType = ..., - strict: bool = ..., + delimiter: str = ",", + quotechar: str | None = '"', + escapechar: str | None = None, + doublequote: bool = True, + skipinitialspace: bool = False, + lineterminator: str = "\r\n", + quoting: _QuotingType = 0, + strict: bool = False, ) -> None: ... def __iter__(self) -> Self: ... def __next__(self) -> _DictReadMapping[_T | Any, str | Any]: ... @@ -125,14 +125,14 @@ class DictWriter(Generic[_T]): extrasaction: Literal["raise", "ignore"] = "raise", dialect: _DialectLike = "excel", *, - delimiter: str = ..., - quotechar: str | None = ..., - escapechar: str | None = ..., - doublequote: bool = ..., - skipinitialspace: bool = ..., - lineterminator: str = ..., - quoting: _QuotingType = ..., - strict: bool = ..., + delimiter: str = ",", + quotechar: str | None = '"', + escapechar: str | None = None, + doublequote: bool = True, + skipinitialspace: bool = False, + lineterminator: str = "\r\n", + quoting: _QuotingType = 0, + strict: bool = False, ) -> None: ... if sys.version_info >= (3, 8): def writeheader(self) -> Any: ... diff --git a/mypy/typeshed/stdlib/ctypes/__init__.pyi b/mypy/typeshed/stdlib/ctypes/__init__.pyi index 497e2f7db70b..2ae5b22f3074 100644 --- a/mypy/typeshed/stdlib/ctypes/__init__.pyi +++ b/mypy/typeshed/stdlib/ctypes/__init__.pyi @@ -170,10 +170,10 @@ class _Pointer(Generic[_CT], _PointerLike, _CData): @overload def __init__(self, arg: _CT) -> None: ... @overload - def __getitem__(self, __i: int) -> Any: ... + def __getitem__(self, __key: int) -> Any: ... @overload - def __getitem__(self, __s: slice) -> list[Any]: ... - def __setitem__(self, __i: int, __o: Any) -> None: ... + def __getitem__(self, __key: slice) -> list[Any]: ... + def __setitem__(self, __key: int, __value: Any) -> None: ... def pointer(__arg: _CT) -> _Pointer[_CT]: ... def resize(obj: _CData, size: int) -> None: ... @@ -287,13 +287,13 @@ class Array(Generic[_CT], _CData): # the array element type would belong are annotated with Any instead. def __init__(self, *args: Any) -> None: ... @overload - def __getitem__(self, __i: int) -> Any: ... + def __getitem__(self, __key: int) -> Any: ... @overload - def __getitem__(self, __s: slice) -> list[Any]: ... + def __getitem__(self, __key: slice) -> list[Any]: ... @overload - def __setitem__(self, __i: int, __o: Any) -> None: ... + def __setitem__(self, __key: int, __value: Any) -> None: ... @overload - def __setitem__(self, __s: slice, __o: Iterable[Any]) -> None: ... + def __setitem__(self, __key: slice, __value: Iterable[Any]) -> None: ... def __iter__(self) -> Iterator[Any]: ... # Can't inherit from Sized because the metaclass conflict between # Sized and _CData prevents using _CDataMeta. diff --git a/mypy/typeshed/stdlib/dataclasses.pyi b/mypy/typeshed/stdlib/dataclasses.pyi index c02aaabe6196..d254a594d8e8 100644 --- a/mypy/typeshed/stdlib/dataclasses.pyi +++ b/mypy/typeshed/stdlib/dataclasses.pyi @@ -223,7 +223,7 @@ else: def fields(class_or_instance: DataclassInstance | type[DataclassInstance]) -> tuple[Field[Any], ...]: ... @overload -def is_dataclass(obj: DataclassInstance | type[DataclassInstance]) -> Literal[True]: ... +def is_dataclass(obj: DataclassInstance) -> Literal[True]: ... @overload def is_dataclass(obj: type) -> TypeGuard[type[DataclassInstance]]: ... @overload diff --git a/mypy/typeshed/stdlib/datetime.pyi b/mypy/typeshed/stdlib/datetime.pyi index 4da5501ce76d..f78737e98910 100644 --- a/mypy/typeshed/stdlib/datetime.pyi +++ b/mypy/typeshed/stdlib/datetime.pyi @@ -82,29 +82,29 @@ class date: def timetuple(self) -> struct_time: ... def toordinal(self) -> int: ... def replace(self, year: int = ..., month: int = ..., day: int = ...) -> Self: ... - def __le__(self, __other: date) -> bool: ... - def __lt__(self, __other: date) -> bool: ... - def __ge__(self, __other: date) -> bool: ... - def __gt__(self, __other: date) -> bool: ... + def __le__(self, __value: date) -> bool: ... + def __lt__(self, __value: date) -> bool: ... + def __ge__(self, __value: date) -> bool: ... + def __gt__(self, __value: date) -> bool: ... if sys.version_info >= (3, 8): - def __add__(self, __other: timedelta) -> Self: ... - def __radd__(self, __other: timedelta) -> Self: ... + def __add__(self, __value: timedelta) -> Self: ... + def __radd__(self, __value: timedelta) -> Self: ... @overload - def __sub__(self, __other: timedelta) -> Self: ... + def __sub__(self, __value: timedelta) -> Self: ... @overload - def __sub__(self, __other: datetime) -> NoReturn: ... + def __sub__(self, __value: datetime) -> NoReturn: ... @overload - def __sub__(self: _D, __other: _D) -> timedelta: ... + def __sub__(self: _D, __value: _D) -> timedelta: ... else: # Prior to Python 3.8, arithmetic operations always returned `date`, even in subclasses - def __add__(self, __other: timedelta) -> date: ... - def __radd__(self, __other: timedelta) -> date: ... + def __add__(self, __value: timedelta) -> date: ... + def __radd__(self, __value: timedelta) -> date: ... @overload - def __sub__(self, __other: timedelta) -> date: ... + def __sub__(self, __value: timedelta) -> date: ... @overload - def __sub__(self, __other: datetime) -> NoReturn: ... + def __sub__(self, __value: datetime) -> NoReturn: ... @overload - def __sub__(self, __other: date) -> timedelta: ... + def __sub__(self, __value: date) -> timedelta: ... def weekday(self) -> int: ... def isoweekday(self) -> int: ... @@ -139,10 +139,10 @@ class time: def tzinfo(self) -> _TzInfo | None: ... @property def fold(self) -> int: ... - def __le__(self, __other: time) -> bool: ... - def __lt__(self, __other: time) -> bool: ... - def __ge__(self, __other: time) -> bool: ... - def __gt__(self, __other: time) -> bool: ... + def __le__(self, __value: time) -> bool: ... + def __lt__(self, __value: time) -> bool: ... + def __ge__(self, __value: time) -> bool: ... + def __gt__(self, __value: time) -> bool: ... def isoformat(self, timespec: str = ...) -> str: ... @classmethod def fromisoformat(cls, __time_string: str) -> Self: ... @@ -193,29 +193,29 @@ class timedelta: @property def microseconds(self) -> int: ... def total_seconds(self) -> float: ... - def __add__(self, __other: timedelta) -> timedelta: ... - def __radd__(self, __other: timedelta) -> timedelta: ... - def __sub__(self, __other: timedelta) -> timedelta: ... - def __rsub__(self, __other: timedelta) -> timedelta: ... + def __add__(self, __value: timedelta) -> timedelta: ... + def __radd__(self, __value: timedelta) -> timedelta: ... + def __sub__(self, __value: timedelta) -> timedelta: ... + def __rsub__(self, __value: timedelta) -> timedelta: ... def __neg__(self) -> timedelta: ... def __pos__(self) -> timedelta: ... def __abs__(self) -> timedelta: ... - def __mul__(self, __other: float) -> timedelta: ... - def __rmul__(self, __other: float) -> timedelta: ... + def __mul__(self, __value: float) -> timedelta: ... + def __rmul__(self, __value: float) -> timedelta: ... @overload - def __floordiv__(self, __other: timedelta) -> int: ... + def __floordiv__(self, __value: timedelta) -> int: ... @overload - def __floordiv__(self, __other: int) -> timedelta: ... + def __floordiv__(self, __value: int) -> timedelta: ... @overload - def __truediv__(self, __other: timedelta) -> float: ... + def __truediv__(self, __value: timedelta) -> float: ... @overload - def __truediv__(self, __other: float) -> timedelta: ... - def __mod__(self, __other: timedelta) -> timedelta: ... - def __divmod__(self, __other: timedelta) -> tuple[int, timedelta]: ... - def __le__(self, __other: timedelta) -> bool: ... - def __lt__(self, __other: timedelta) -> bool: ... - def __ge__(self, __other: timedelta) -> bool: ... - def __gt__(self, __other: timedelta) -> bool: ... + def __truediv__(self, __value: float) -> timedelta: ... + def __mod__(self, __value: timedelta) -> timedelta: ... + def __divmod__(self, __value: timedelta) -> tuple[int, timedelta]: ... + def __le__(self, __value: timedelta) -> bool: ... + def __lt__(self, __value: timedelta) -> bool: ... + def __ge__(self, __value: timedelta) -> bool: ... + def __gt__(self, __value: timedelta) -> bool: ... def __bool__(self) -> bool: ... class datetime(date): @@ -302,20 +302,20 @@ class datetime(date): def utcoffset(self) -> timedelta | None: ... def tzname(self) -> str | None: ... def dst(self) -> timedelta | None: ... - def __le__(self, __other: datetime) -> bool: ... # type: ignore[override] - def __lt__(self, __other: datetime) -> bool: ... # type: ignore[override] - def __ge__(self, __other: datetime) -> bool: ... # type: ignore[override] - def __gt__(self, __other: datetime) -> bool: ... # type: ignore[override] + def __le__(self, __value: datetime) -> bool: ... # type: ignore[override] + def __lt__(self, __value: datetime) -> bool: ... # type: ignore[override] + def __ge__(self, __value: datetime) -> bool: ... # type: ignore[override] + def __gt__(self, __value: datetime) -> bool: ... # type: ignore[override] if sys.version_info >= (3, 8): @overload # type: ignore[override] - def __sub__(self, __other: timedelta) -> Self: ... + def __sub__(self, __value: timedelta) -> Self: ... @overload - def __sub__(self: _D, __other: _D) -> timedelta: ... + def __sub__(self: _D, __value: _D) -> timedelta: ... else: # Prior to Python 3.8, arithmetic operations always returned `datetime`, even in subclasses - def __add__(self, __other: timedelta) -> datetime: ... - def __radd__(self, __other: timedelta) -> datetime: ... + def __add__(self, __value: timedelta) -> datetime: ... + def __radd__(self, __value: timedelta) -> datetime: ... @overload # type: ignore[override] - def __sub__(self, __other: datetime) -> timedelta: ... + def __sub__(self, __value: datetime) -> timedelta: ... @overload - def __sub__(self, __other: timedelta) -> datetime: ... + def __sub__(self, __value: timedelta) -> datetime: ... diff --git a/mypy/typeshed/stdlib/difflib.pyi b/mypy/typeshed/stdlib/difflib.pyi index 310519602695..894ebaaeca98 100644 --- a/mypy/typeshed/stdlib/difflib.pyi +++ b/mypy/typeshed/stdlib/difflib.pyi @@ -57,9 +57,8 @@ class SequenceMatcher(Generic[_T]): if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... -# mypy thinks the signatures of the overloads overlap, but the types still work fine @overload -def get_close_matches(word: AnyStr, possibilities: Iterable[AnyStr], n: int = 3, cutoff: float = 0.6) -> list[AnyStr]: ... # type: ignore[misc] +def get_close_matches(word: AnyStr, possibilities: Iterable[AnyStr], n: int = 3, cutoff: float = 0.6) -> list[AnyStr]: ... @overload def get_close_matches( word: Sequence[_T], possibilities: Iterable[Sequence[_T]], n: int = 3, cutoff: float = 0.6 diff --git a/mypy/typeshed/stdlib/distutils/cmd.pyi b/mypy/typeshed/stdlib/distutils/cmd.pyi index d9ffee9cb832..a9aade0206dd 100644 --- a/mypy/typeshed/stdlib/distutils/cmd.pyi +++ b/mypy/typeshed/stdlib/distutils/cmd.pyi @@ -1,3 +1,4 @@ +from _typeshed import Incomplete from abc import abstractmethod from collections.abc import Callable, Iterable from distutils.dist import Distribution @@ -60,3 +61,5 @@ class Command: skip_msg: str | None = None, level: Any = 1, ) -> None: ... # level is not used + def ensure_finalized(self) -> None: ... + def dump_options(self, header: Incomplete | None = None, indent: str = "") -> None: ... diff --git a/mypy/typeshed/stdlib/distutils/core.pyi b/mypy/typeshed/stdlib/distutils/core.pyi index 56081f921378..7b0bdd1b35bd 100644 --- a/mypy/typeshed/stdlib/distutils/core.pyi +++ b/mypy/typeshed/stdlib/distutils/core.pyi @@ -1,9 +1,17 @@ +from _typeshed import StrOrBytesPath from collections.abc import Mapping from distutils.cmd import Command as Command from distutils.dist import Distribution as Distribution from distutils.extension import Extension as Extension from typing import Any +USAGE: str + +def gen_usage(script_name: StrOrBytesPath) -> str: ... + +setup_keywords: tuple[str, ...] +extension_keywords: tuple[str, ...] + def setup( *, name: str = ..., diff --git a/mypy/typeshed/stdlib/distutils/cygwinccompiler.pyi b/mypy/typeshed/stdlib/distutils/cygwinccompiler.pyi index 1f85b254860b..a990c3e28f36 100644 --- a/mypy/typeshed/stdlib/distutils/cygwinccompiler.pyi +++ b/mypy/typeshed/stdlib/distutils/cygwinccompiler.pyi @@ -1,4 +1,20 @@ from distutils.unixccompiler import UnixCCompiler +from distutils.version import LooseVersion +from re import Pattern +from typing_extensions import Literal + +def get_msvcr() -> list[str] | None: ... class CygwinCCompiler(UnixCCompiler): ... class Mingw32CCompiler(CygwinCCompiler): ... + +CONFIG_H_OK: str +CONFIG_H_NOTOK: str +CONFIG_H_UNCERTAIN: str + +def check_config_h() -> tuple[Literal["ok", "not ok", "uncertain"], str]: ... + +RE_VERSION: Pattern[bytes] + +def get_versions() -> tuple[LooseVersion | None, ...]: ... +def is_cygwingcc() -> bool: ... diff --git a/mypy/typeshed/stdlib/distutils/dist.pyi b/mypy/typeshed/stdlib/distutils/dist.pyi index b411324c4ce6..dfffdc5e11bb 100644 --- a/mypy/typeshed/stdlib/distutils/dist.pyi +++ b/mypy/typeshed/stdlib/distutils/dist.pyi @@ -1,8 +1,11 @@ -from _typeshed import FileDescriptorOrPath, SupportsWrite +from _typeshed import FileDescriptorOrPath, Incomplete, SupportsWrite from collections.abc import Iterable, Mapping from distutils.cmd import Command +from re import Pattern from typing import IO, Any +command_re: Pattern[str] + class DistributionMetadata: def __init__(self, path: FileDescriptorOrPath | None = None) -> None: ... name: str | None @@ -57,3 +60,57 @@ class Distribution: def get_option_dict(self, command: str) -> dict[str, tuple[str, str]]: ... def parse_config_files(self, filenames: Iterable[str] | None = None) -> None: ... def get_command_obj(self, command: str, create: bool = ...) -> Command | None: ... + global_options: Incomplete + common_usage: str + display_options: Incomplete + display_option_names: Incomplete + negative_opt: Incomplete + verbose: int + dry_run: int + help: int + command_packages: Incomplete + script_name: Incomplete + script_args: Incomplete + command_options: Incomplete + dist_files: Incomplete + packages: Incomplete + package_data: Incomplete + package_dir: Incomplete + py_modules: Incomplete + libraries: Incomplete + headers: Incomplete + ext_modules: Incomplete + ext_package: Incomplete + include_dirs: Incomplete + extra_path: Incomplete + scripts: Incomplete + data_files: Incomplete + password: str + command_obj: Incomplete + have_run: Incomplete + want_user_cfg: bool + def dump_option_dicts( + self, header: Incomplete | None = None, commands: Incomplete | None = None, indent: str = "" + ) -> None: ... + def find_config_files(self): ... + commands: Incomplete + def parse_command_line(self): ... + def finalize_options(self) -> None: ... + def handle_display_options(self, option_order): ... + def print_command_list(self, commands, header, max_length) -> None: ... + def print_commands(self) -> None: ... + def get_command_list(self): ... + def get_command_packages(self): ... + def get_command_class(self, command): ... + def reinitialize_command(self, command, reinit_subcommands: int = 0): ... + def announce(self, msg, level: int = ...) -> None: ... + def run_commands(self) -> None: ... + def run_command(self, command) -> None: ... + def has_pure_modules(self): ... + def has_ext_modules(self): ... + def has_c_libraries(self): ... + def has_modules(self): ... + def has_headers(self): ... + def has_scripts(self): ... + def has_data_files(self): ... + def is_pure(self): ... diff --git a/mypy/typeshed/stdlib/distutils/fancy_getopt.pyi b/mypy/typeshed/stdlib/distutils/fancy_getopt.pyi index 153583be6b5d..c15bb8a167dd 100644 --- a/mypy/typeshed/stdlib/distutils/fancy_getopt.pyi +++ b/mypy/typeshed/stdlib/distutils/fancy_getopt.pyi @@ -1,14 +1,15 @@ from collections.abc import Iterable, Mapping +from re import Pattern from typing import Any, overload from typing_extensions import TypeAlias _Option: TypeAlias = tuple[str, str | None, str] _GR: TypeAlias = tuple[list[str], OptionDummy] -def fancy_getopt( - options: list[_Option], negative_opt: Mapping[_Option, _Option], object: Any, args: list[str] | None -) -> list[str] | _GR: ... -def wrap_text(text: str, width: int) -> list[str]: ... +longopt_pat: str +longopt_re: Pattern[str] +neg_alias_re: Pattern[str] +longopt_xlate: dict[int, int] class FancyGetopt: def __init__(self, option_table: list[_Option] | None = None) -> None: ... @@ -20,5 +21,14 @@ class FancyGetopt: def get_option_order(self) -> list[tuple[str, str]]: ... def generate_help(self, header: str | None = None) -> list[str]: ... +def fancy_getopt( + options: list[_Option], negative_opt: Mapping[_Option, _Option], object: Any, args: list[str] | None +) -> list[str] | _GR: ... + +WS_TRANS: dict[int, str] + +def wrap_text(text: str, width: int) -> list[str]: ... +def translate_longopt(opt: str) -> str: ... + class OptionDummy: def __init__(self, options: Iterable[str] = ...) -> None: ... diff --git a/mypy/typeshed/stdlib/distutils/sysconfig.pyi b/mypy/typeshed/stdlib/distutils/sysconfig.pyi index 8b291e8b94a5..464cfb639c6d 100644 --- a/mypy/typeshed/stdlib/distutils/sysconfig.pyi +++ b/mypy/typeshed/stdlib/distutils/sysconfig.pyi @@ -1,9 +1,15 @@ +import sys from collections.abc import Mapping from distutils.ccompiler import CCompiler PREFIX: str EXEC_PREFIX: str +BASE_PREFIX: str +BASE_EXEC_PREFIX: str +project_base: str +python_build: bool +def expand_makefile_vars(s: str, vars: Mapping[str, str]) -> str: ... def get_config_var(name: str) -> int | str | None: ... def get_config_vars(*args: str) -> Mapping[str, int | str]: ... def get_config_h_filename() -> str: ... @@ -11,3 +17,6 @@ def get_makefile_filename() -> str: ... def get_python_inc(plat_specific: bool = ..., prefix: str | None = None) -> str: ... def get_python_lib(plat_specific: bool = ..., standard_lib: bool = ..., prefix: str | None = None) -> str: ... def customize_compiler(compiler: CCompiler) -> None: ... + +if sys.version_info < (3, 10): + def get_python_version() -> str: ... diff --git a/mypy/typeshed/stdlib/distutils/util.pyi b/mypy/typeshed/stdlib/distutils/util.pyi index f03844307581..83b03747fda6 100644 --- a/mypy/typeshed/stdlib/distutils/util.pyi +++ b/mypy/typeshed/stdlib/distutils/util.pyi @@ -1,8 +1,12 @@ +import sys from _typeshed import StrPath, Unused from collections.abc import Callable, Container, Iterable, Mapping from typing import Any from typing_extensions import Literal +if sys.version_info >= (3, 8): + def get_host_platform() -> str: ... + def get_platform() -> str: ... def convert_path(pathname: str) -> str: ... def change_root(new_root: str, pathname: str) -> str: ... diff --git a/mypy/typeshed/stdlib/email/charset.pyi b/mypy/typeshed/stdlib/email/charset.pyi index 24b8fd768b7b..e612847c75b6 100644 --- a/mypy/typeshed/stdlib/email/charset.pyi +++ b/mypy/typeshed/stdlib/email/charset.pyi @@ -20,7 +20,7 @@ class Charset: def header_encode_lines(self, string: str, maxlengths: Iterator[int]) -> list[str]: ... def body_encode(self, string: str) -> str: ... def __eq__(self, other: object) -> bool: ... - def __ne__(self, __other: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... def add_charset( charset: str, header_enc: int | None = None, body_enc: int | None = None, output_charset: str | None = None diff --git a/mypy/typeshed/stdlib/email/header.pyi b/mypy/typeshed/stdlib/email/header.pyi index c6f0c6fbf6fc..fc9d73331bae 100644 --- a/mypy/typeshed/stdlib/email/header.pyi +++ b/mypy/typeshed/stdlib/email/header.pyi @@ -17,7 +17,7 @@ class Header: def append(self, s: bytes | bytearray | str, charset: Charset | str | None = None, errors: str = "strict") -> None: ... def encode(self, splitchars: str = ";, \t", maxlinelen: int | None = None, linesep: str = "\n") -> str: ... def __eq__(self, other: object) -> bool: ... - def __ne__(self, __other: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... # decode_header() either returns list[tuple[str, None]] if the header # contains no encoded parts, or list[tuple[bytes, str | None]] if the header diff --git a/mypy/typeshed/stdlib/encodings/utf_8_sig.pyi b/mypy/typeshed/stdlib/encodings/utf_8_sig.pyi index 150fe22f8f6e..af69217d6732 100644 --- a/mypy/typeshed/stdlib/encodings/utf_8_sig.pyi +++ b/mypy/typeshed/stdlib/encodings/utf_8_sig.pyi @@ -4,7 +4,7 @@ from _typeshed import ReadableBuffer class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors: str = "strict") -> None: ... def encode(self, input: str, final: bool = False) -> bytes: ... - def getstate(self) -> int: ... # type: ignore[override] + def getstate(self) -> int: ... def setstate(self, state: int) -> None: ... # type: ignore[override] class IncrementalDecoder(codecs.BufferedIncrementalDecoder): diff --git a/mypy/typeshed/stdlib/enum.pyi b/mypy/typeshed/stdlib/enum.pyi index b46fe429cacb..5a39c456b4b4 100644 --- a/mypy/typeshed/stdlib/enum.pyi +++ b/mypy/typeshed/stdlib/enum.pyi @@ -106,7 +106,15 @@ class EnumMeta(ABCMeta): def __iter__(self: type[_EnumMemberT]) -> Iterator[_EnumMemberT]: ... def __reversed__(self: type[_EnumMemberT]) -> Iterator[_EnumMemberT]: ... - def __contains__(self: type[Any], obj: object) -> bool: ... + if sys.version_info >= (3, 12): + def __contains__(self: type[Any], value: object) -> bool: ... + elif sys.version_info >= (3, 11): + def __contains__(self: type[Any], member: object) -> bool: ... + elif sys.version_info >= (3, 10): + def __contains__(self: type[Any], obj: object) -> bool: ... + else: + def __contains__(self: type[Any], member: object) -> bool: ... + def __getitem__(self: type[_EnumMemberT], name: str) -> _EnumMemberT: ... @_builtins_property def __members__(self: type[_EnumMemberT]) -> types.MappingProxyType[str, _EnumMemberT]: ... @@ -114,7 +122,7 @@ class EnumMeta(ABCMeta): def __bool__(self) -> Literal[True]: ... def __dir__(self) -> list[str]: ... # Simple value lookup - @overload # type: ignore[override] + @overload def __call__(cls: type[_EnumMemberT], value: Any, names: None = None) -> _EnumMemberT: ... # Functional Enum API if sys.version_info >= (3, 11): diff --git a/mypy/typeshed/stdlib/fractions.pyi b/mypy/typeshed/stdlib/fractions.pyi index 97cefc916d9b..3c84978c1bad 100644 --- a/mypy/typeshed/stdlib/fractions.pyi +++ b/mypy/typeshed/stdlib/fractions.pyi @@ -103,14 +103,25 @@ class Fraction(Rational): def __rmod__(b, a: int | Fraction) -> Fraction: ... @overload def __rmod__(b, a: float) -> float: ... - @overload - def __divmod__(a, b: int | Fraction) -> tuple[int, Fraction]: ... - @overload - def __divmod__(a, b: float) -> tuple[float, Fraction]: ... - @overload - def __rdivmod__(b, a: int | Fraction) -> tuple[int, Fraction]: ... - @overload - def __rdivmod__(b, a: float) -> tuple[float, Fraction]: ... + if sys.version_info >= (3, 8): + @overload + def __divmod__(a, b: int | Fraction) -> tuple[int, Fraction]: ... + @overload + def __divmod__(a, b: float) -> tuple[float, Fraction]: ... + @overload + def __rdivmod__(a, b: int | Fraction) -> tuple[int, Fraction]: ... + @overload + def __rdivmod__(a, b: float) -> tuple[float, Fraction]: ... + else: + @overload + def __divmod__(self, other: int | Fraction) -> tuple[int, Fraction]: ... + @overload + def __divmod__(self, other: float) -> tuple[float, Fraction]: ... + @overload + def __rdivmod__(self, other: int | Fraction) -> tuple[int, Fraction]: ... + @overload + def __rdivmod__(self, other: float) -> tuple[float, Fraction]: ... + @overload def __pow__(a, b: int) -> Fraction: ... @overload diff --git a/mypy/typeshed/stdlib/functools.pyi b/mypy/typeshed/stdlib/functools.pyi index 1214e349f605..fe36a134f74e 100644 --- a/mypy/typeshed/stdlib/functools.pyi +++ b/mypy/typeshed/stdlib/functools.pyi @@ -41,7 +41,7 @@ def reduce(function: Callable[[_T, _T], _T], sequence: Iterable[_T]) -> _T: ... class _CacheInfo(NamedTuple): hits: int misses: int - maxsize: int + maxsize: int | None currsize: int @final @@ -148,6 +148,8 @@ if sys.version_info >= (3, 8): @overload def __get__(self, instance: object, owner: type[Any] | None = None) -> _T: ... def __set_name__(self, owner: type[Any], name: str) -> None: ... + # __set__ is not defined at runtime, but @cached_property is designed to be settable + def __set__(self, instance: object, value: _T) -> None: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... diff --git a/mypy/typeshed/stdlib/gettext.pyi b/mypy/typeshed/stdlib/gettext.pyi index 5d98227ec1f4..57e81120b8ca 100644 --- a/mypy/typeshed/stdlib/gettext.pyi +++ b/mypy/typeshed/stdlib/gettext.pyi @@ -57,8 +57,8 @@ class GNUTranslations(NullTranslations): CONTEXT: str VERSIONS: Sequence[int] -@overload # ignores incompatible overloads -def find( # type: ignore[misc] +@overload +def find( domain: str, localedir: StrPath | None = None, languages: Iterable[str] | None = None, all: Literal[False] = False ) -> str | None: ... @overload diff --git a/mypy/typeshed/stdlib/heapq.pyi b/mypy/typeshed/stdlib/heapq.pyi index 61418b3704d6..3f4f274b9769 100644 --- a/mypy/typeshed/stdlib/heapq.pyi +++ b/mypy/typeshed/stdlib/heapq.pyi @@ -15,4 +15,4 @@ def merge( ) -> Iterable[_S]: ... def nlargest(n: int, iterable: Iterable[_S], key: Callable[[_S], SupportsRichComparison] | None = None) -> list[_S]: ... def nsmallest(n: int, iterable: Iterable[_S], key: Callable[[_S], SupportsRichComparison] | None = None) -> list[_S]: ... -def _heapify_max(__x: list[Any]) -> None: ... # undocumented +def _heapify_max(__heap: list[Any]) -> None: ... # undocumented diff --git a/mypy/typeshed/stdlib/hmac.pyi b/mypy/typeshed/stdlib/hmac.pyi index b9a867f7bd61..ee8af1b48d83 100644 --- a/mypy/typeshed/stdlib/hmac.pyi +++ b/mypy/typeshed/stdlib/hmac.pyi @@ -30,7 +30,13 @@ class HMAC: block_size: int @property def name(self) -> str: ... - def __init__(self, key: bytes | bytearray, msg: ReadableBuffer | None = None, digestmod: _DigestMod = "") -> None: ... + if sys.version_info >= (3, 8): + def __init__(self, key: bytes | bytearray, msg: ReadableBuffer | None = None, digestmod: _DigestMod = "") -> None: ... + else: + def __init__( + self, key: bytes | bytearray, msg: ReadableBuffer | None = None, digestmod: _DigestMod | None = None + ) -> None: ... + def update(self, msg: ReadableBuffer) -> None: ... def digest(self) -> bytes: ... def hexdigest(self) -> str: ... diff --git a/mypy/typeshed/stdlib/http/client.pyi b/mypy/typeshed/stdlib/http/client.pyi index b1506b50e750..cc142fbb23fd 100644 --- a/mypy/typeshed/stdlib/http/client.pyi +++ b/mypy/typeshed/stdlib/http/client.pyi @@ -101,7 +101,7 @@ class HTTPMessage(email.message.Message): def parse_headers(fp: io.BufferedIOBase, _class: Callable[[], email.message.Message] = ...) -> HTTPMessage: ... -class HTTPResponse(io.BufferedIOBase, BinaryIO): +class HTTPResponse(io.BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible method definitions in the base classes msg: HTTPMessage headers: HTTPMessage version: int @@ -174,7 +174,7 @@ class HTTPConnection: class HTTPSConnection(HTTPConnection): # Can be `None` if `.connect()` was not called: - sock: ssl.SSLSocket | Any # type: ignore[override] + sock: ssl.SSLSocket | Any def __init__( self, host: str, diff --git a/mypy/typeshed/stdlib/importlib/abc.pyi b/mypy/typeshed/stdlib/importlib/abc.pyi index 3d0c2d38c4e9..4bf46104ba6d 100644 --- a/mypy/typeshed/stdlib/importlib/abc.pyi +++ b/mypy/typeshed/stdlib/importlib/abc.pyi @@ -1,3 +1,4 @@ +import _ast import sys import types from _typeshed import ( @@ -7,6 +8,7 @@ from _typeshed import ( OpenBinaryModeWriting, OpenTextMode, ReadableBuffer, + StrPath, ) from abc import ABCMeta, abstractmethod from collections.abc import Iterator, Mapping, Sequence @@ -52,7 +54,9 @@ class InspectLoader(Loader): def get_source(self, fullname: str) -> str | None: ... def exec_module(self, module: types.ModuleType) -> None: ... @staticmethod - def source_to_code(data: ReadableBuffer | str, path: str = "") -> types.CodeType: ... + def source_to_code( + data: ReadableBuffer | str | _ast.Module | _ast.Expression | _ast.Interactive, path: ReadableBuffer | StrPath = "" + ) -> types.CodeType: ... class ExecutionLoader(InspectLoader): @abstractmethod @@ -191,7 +195,7 @@ if sys.version_info >= (3, 9): class TraversableResources(ResourceReader): @abstractmethod def files(self) -> Traversable: ... - def open_resource(self, resource: str) -> BufferedReader: ... # type: ignore[override] + def open_resource(self, resource: str) -> BufferedReader: ... def resource_path(self, resource: Any) -> NoReturn: ... def is_resource(self, path: str) -> bool: ... def contents(self) -> Iterator[str]: ... diff --git a/mypy/typeshed/stdlib/importlib/resources.pyi b/mypy/typeshed/stdlib/importlib/resources/__init__.pyi similarity index 100% rename from mypy/typeshed/stdlib/importlib/resources.pyi rename to mypy/typeshed/stdlib/importlib/resources/__init__.pyi diff --git a/mypy/typeshed/stdlib/importlib/resources/abc.pyi b/mypy/typeshed/stdlib/importlib/resources/abc.pyi new file mode 100644 index 000000000000..a36c952d01ac --- /dev/null +++ b/mypy/typeshed/stdlib/importlib/resources/abc.pyi @@ -0,0 +1,12 @@ +import sys + +if sys.version_info >= (3, 11): + # These are all actually defined in this file on 3.11+, + # and re-exported from importlib.abc, + # but it's much less code duplication for typeshed if we pretend that they're still defined + # in importlib.abc on 3.11+, and re-exported from this file + from importlib.abc import ( + ResourceReader as ResourceReader, + Traversable as Traversable, + TraversableResources as TraversableResources, + ) diff --git a/mypy/typeshed/stdlib/io.pyi b/mypy/typeshed/stdlib/io.pyi index c3e07bacbe5a..c114f839594f 100644 --- a/mypy/typeshed/stdlib/io.pyi +++ b/mypy/typeshed/stdlib/io.pyi @@ -90,7 +90,7 @@ class BufferedIOBase(IOBase): def read(self, __size: int | None = ...) -> bytes: ... def read1(self, __size: int = ...) -> bytes: ... -class FileIO(RawIOBase, BinaryIO): +class FileIO(RawIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of writelines in the base classes mode: str name: FileDescriptorOrPath # type: ignore[assignment] def __init__( @@ -102,7 +102,7 @@ class FileIO(RawIOBase, BinaryIO): def read(self, __size: int = -1) -> bytes: ... def __enter__(self) -> Self: ... -class BytesIO(BufferedIOBase, BinaryIO): +class BytesIO(BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of methods in the base classes def __init__(self, initial_bytes: ReadableBuffer = ...) -> None: ... # BytesIO does not contain a "name" field. This workaround is necessary # to allow BytesIO sub-classes to add this field, as it is defined @@ -113,17 +113,17 @@ class BytesIO(BufferedIOBase, BinaryIO): def getbuffer(self) -> memoryview: ... def read1(self, __size: int | None = -1) -> bytes: ... -class BufferedReader(BufferedIOBase, BinaryIO): +class BufferedReader(BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of methods in the base classes def __enter__(self) -> Self: ... def __init__(self, raw: RawIOBase, buffer_size: int = ...) -> None: ... def peek(self, __size: int = 0) -> bytes: ... -class BufferedWriter(BufferedIOBase, BinaryIO): +class BufferedWriter(BufferedIOBase, BinaryIO): # type: ignore[misc] # incompatible definitions of writelines in the base classes def __enter__(self) -> Self: ... def __init__(self, raw: RawIOBase, buffer_size: int = ...) -> None: ... def write(self, __buffer: ReadableBuffer) -> int: ... -class BufferedRandom(BufferedReader, BufferedWriter): +class BufferedRandom(BufferedReader, BufferedWriter): # type: ignore[misc] # incompatible definitions of methods in the base classes def __enter__(self) -> Self: ... def seek(self, __target: int, __whence: int = 0) -> int: ... # stubtest needs this @@ -144,7 +144,7 @@ class TextIOBase(IOBase): def readlines(self, __hint: int = -1) -> list[str]: ... # type: ignore[override] def read(self, __size: int | None = ...) -> str: ... -class TextIOWrapper(TextIOBase, TextIO): +class TextIOWrapper(TextIOBase, TextIO): # type: ignore[misc] # incompatible definitions of write in the base classes def __init__( self, buffer: IO[bytes], diff --git a/mypy/typeshed/stdlib/ipaddress.pyi b/mypy/typeshed/stdlib/ipaddress.pyi index 9f9662137765..7a4146885b29 100644 --- a/mypy/typeshed/stdlib/ipaddress.pyi +++ b/mypy/typeshed/stdlib/ipaddress.pyi @@ -36,7 +36,9 @@ class _BaseAddress(_IPAddressBase, SupportsInt): def __add__(self, other: int) -> Self: ... def __int__(self) -> int: ... def __sub__(self, other: int) -> Self: ... - def __format__(self, fmt: str) -> str: ... + if sys.version_info >= (3, 9): + def __format__(self, fmt: str) -> str: ... + def __eq__(self, other: object) -> bool: ... def __lt__(self, other: Self) -> bool: ... if sys.version_info >= (3, 11): diff --git a/mypy/typeshed/stdlib/logging/__init__.pyi b/mypy/typeshed/stdlib/logging/__init__.pyi index c74afa45ded1..3c547a6e0ff8 100644 --- a/mypy/typeshed/stdlib/logging/__init__.pyi +++ b/mypy/typeshed/stdlib/logging/__init__.pyi @@ -116,74 +116,74 @@ class Logger(Filterer): self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def info( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def warning( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def warn( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def error( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def exception( self, msg: object, *args: object, exc_info: _ExcInfoType = True, - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def critical( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def log( self, level: int, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def _log( self, @@ -200,66 +200,66 @@ class Logger(Filterer): self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def info( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def warning( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def warn( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def error( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def critical( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def log( self, level: int, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def exception( self, msg: object, *args: object, exc_info: _ExcInfoType = True, - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def _log( self, @@ -432,50 +432,50 @@ class LoggerAdapter(Generic[_L]): self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def info( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def warning( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def warn( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def error( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def exception( @@ -483,19 +483,19 @@ class LoggerAdapter(Generic[_L]): msg: object, *args: object, exc_info: _ExcInfoType = True, - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def critical( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def log( @@ -503,10 +503,10 @@ class LoggerAdapter(Generic[_L]): level: int, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... else: @@ -514,45 +514,45 @@ class LoggerAdapter(Generic[_L]): self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def info( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def warning( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def warn( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def error( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def exception( @@ -560,17 +560,17 @@ class LoggerAdapter(Generic[_L]): msg: object, *args: object, exc_info: _ExcInfoType = True, - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def critical( self, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... def log( @@ -578,9 +578,9 @@ class LoggerAdapter(Generic[_L]): level: int, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, **kwargs: object, ) -> None: ... @@ -610,102 +610,126 @@ if sys.version_info >= (3, 8): def debug( msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def info( msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def warning( msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def warn( msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def error( msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def critical( msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def exception( msg: object, *args: object, exc_info: _ExcInfoType = True, - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... def log( level: int, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - stacklevel: int = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + stacklevel: int = 1, + extra: Mapping[str, object] | None = None, ) -> None: ... else: def debug( - msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., extra: Mapping[str, object] | None = ... + msg: object, + *args: object, + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def info( - msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., extra: Mapping[str, object] | None = ... + msg: object, + *args: object, + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def warning( - msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., extra: Mapping[str, object] | None = ... + msg: object, + *args: object, + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def warn( - msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., extra: Mapping[str, object] | None = ... + msg: object, + *args: object, + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def error( - msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., extra: Mapping[str, object] | None = ... + msg: object, + *args: object, + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def critical( - msg: object, *args: object, exc_info: _ExcInfoType = ..., stack_info: bool = ..., extra: Mapping[str, object] | None = ... + msg: object, + *args: object, + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def exception( msg: object, *args: object, exc_info: _ExcInfoType = True, - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... def log( level: int, msg: object, *args: object, - exc_info: _ExcInfoType = ..., - stack_info: bool = ..., - extra: Mapping[str, object] | None = ..., + exc_info: _ExcInfoType = None, + stack_info: bool = False, + extra: Mapping[str, object] | None = None, ) -> None: ... fatal = critical diff --git a/mypy/typeshed/stdlib/lzma.pyi b/mypy/typeshed/stdlib/lzma.pyi index 34bd6f3f8db1..8e296bb5b357 100644 --- a/mypy/typeshed/stdlib/lzma.pyi +++ b/mypy/typeshed/stdlib/lzma.pyi @@ -104,7 +104,7 @@ class LZMACompressor: class LZMAError(Exception): ... -class LZMAFile(io.BufferedIOBase, IO[bytes]): +class LZMAFile(io.BufferedIOBase, IO[bytes]): # type: ignore[misc] # incompatible definitions of writelines in the base classes def __init__( self, filename: _PathOrFile | None = None, diff --git a/mypy/typeshed/stdlib/mmap.pyi b/mypy/typeshed/stdlib/mmap.pyi index c74ad3cda6db..8da4ea7ca864 100644 --- a/mypy/typeshed/stdlib/mmap.pyi +++ b/mypy/typeshed/stdlib/mmap.pyi @@ -60,14 +60,14 @@ class mmap(Iterable[int], Sized): def read(self, n: int | None = ...) -> bytes: ... def write(self, bytes: ReadableBuffer) -> int: ... @overload - def __getitem__(self, __index: int) -> int: ... + def __getitem__(self, __key: int) -> int: ... @overload - def __getitem__(self, __index: slice) -> bytes: ... - def __delitem__(self, __index: int | slice) -> NoReturn: ... + def __getitem__(self, __key: slice) -> bytes: ... + def __delitem__(self, __key: int | slice) -> NoReturn: ... @overload - def __setitem__(self, __index: int, __object: int) -> None: ... + def __setitem__(self, __key: int, __value: int) -> None: ... @overload - def __setitem__(self, __index: slice, __object: ReadableBuffer) -> None: ... + def __setitem__(self, __key: slice, __value: ReadableBuffer) -> None: ... # Doesn't actually exist, but the object actually supports "in" because it has __getitem__, # so we claim that there is also a __contains__ to help type checkers. def __contains__(self, __o: object) -> bool: ... diff --git a/mypy/typeshed/stdlib/multiprocessing/dummy/connection.pyi b/mypy/typeshed/stdlib/multiprocessing/dummy/connection.pyi index fcd03a657319..d7e982129466 100644 --- a/mypy/typeshed/stdlib/multiprocessing/dummy/connection.pyi +++ b/mypy/typeshed/stdlib/multiprocessing/dummy/connection.pyi @@ -1,14 +1,13 @@ +from multiprocessing.connection import _Address from queue import Queue from types import TracebackType from typing import Any -from typing_extensions import Self, TypeAlias +from typing_extensions import Self __all__ = ["Client", "Listener", "Pipe"] families: list[None] -_Address: TypeAlias = str | tuple[str, int] - class Connection: _in: Any _out: Any diff --git a/mypy/typeshed/stdlib/multiprocessing/managers.pyi b/mypy/typeshed/stdlib/multiprocessing/managers.pyi index e035a1875650..ad147fca36ed 100644 --- a/mypy/typeshed/stdlib/multiprocessing/managers.pyi +++ b/mypy/typeshed/stdlib/multiprocessing/managers.pyi @@ -116,8 +116,8 @@ class BaseListProxy(BaseProxy, MutableSequence[_T]): def sort(self, *, key: Callable[[_T], SupportsRichComparison], reverse: bool = ...) -> None: ... class ListProxy(BaseListProxy[_T]): - def __iadd__(self, __x: Iterable[_T]) -> Self: ... # type: ignore[override] - def __imul__(self, __n: SupportsIndex) -> Self: ... # type: ignore[override] + def __iadd__(self, __value: Iterable[_T]) -> Self: ... # type: ignore[override] + def __imul__(self, __value: SupportsIndex) -> Self: ... # type: ignore[override] # Returned by BaseManager.get_server() class Server: diff --git a/mypy/typeshed/stdlib/multiprocessing/queues.pyi b/mypy/typeshed/stdlib/multiprocessing/queues.pyi index 7ba17dcfbe05..f821b6df4b37 100644 --- a/mypy/typeshed/stdlib/multiprocessing/queues.pyi +++ b/mypy/typeshed/stdlib/multiprocessing/queues.pyi @@ -15,7 +15,7 @@ class Queue(queue.Queue[_T]): def __init__(self, maxsize: int = 0, *, ctx: Any = ...) -> None: ... def get(self, block: bool = True, timeout: float | None = None) -> _T: ... def put(self, obj: _T, block: bool = True, timeout: float | None = None) -> None: ... - def put_nowait(self, item: _T) -> None: ... + def put_nowait(self, obj: _T) -> None: ... def get_nowait(self) -> _T: ... def close(self) -> None: ... def join_thread(self) -> None: ... @@ -30,6 +30,6 @@ class SimpleQueue(Generic[_T]): def empty(self) -> bool: ... def get(self) -> _T: ... - def put(self, item: _T) -> None: ... + def put(self, obj: _T) -> None: ... if sys.version_info >= (3, 9): def __class_getitem__(cls, item: Any) -> GenericAlias: ... diff --git a/mypy/typeshed/stdlib/multiprocessing/synchronize.pyi b/mypy/typeshed/stdlib/multiprocessing/synchronize.pyi index 7043759078a2..6c2e18954343 100644 --- a/mypy/typeshed/stdlib/multiprocessing/synchronize.pyi +++ b/mypy/typeshed/stdlib/multiprocessing/synchronize.pyi @@ -30,7 +30,7 @@ class Condition(AbstractContextManager[bool]): ) -> None: ... class Event: - def __init__(self, lock: _LockLike | None = ..., *, ctx: BaseContext) -> None: ... + def __init__(self, *, ctx: BaseContext) -> None: ... def is_set(self) -> bool: ... def set(self) -> None: ... def clear(self) -> None: ... diff --git a/mypy/typeshed/stdlib/os/__init__.pyi b/mypy/typeshed/stdlib/os/__init__.pyi index 595b78789c6a..efe80d82ffba 100644 --- a/mypy/typeshed/stdlib/os/__init__.pyi +++ b/mypy/typeshed/stdlib/os/__init__.pyi @@ -885,7 +885,10 @@ def times() -> times_result: ... def waitpid(__pid: int, __options: int) -> tuple[int, int]: ... if sys.platform == "win32": - def startfile(path: StrOrBytesPath, operation: str | None = None) -> None: ... + if sys.version_info >= (3, 8): + def startfile(path: StrOrBytesPath, operation: str | None = None) -> None: ... + else: + def startfile(filepath: StrOrBytesPath, operation: str | None = None) -> None: ... else: def spawnlp(mode: int, file: StrOrBytesPath, arg0: StrOrBytesPath, *args: StrOrBytesPath) -> int: ... diff --git a/mypy/typeshed/stdlib/pydoc.pyi b/mypy/typeshed/stdlib/pydoc.pyi index c6893d50c66a..f09976ad3809 100644 --- a/mypy/typeshed/stdlib/pydoc.pyi +++ b/mypy/typeshed/stdlib/pydoc.pyi @@ -40,7 +40,7 @@ class ErrorDuringImport(Exception): def __init__(self, filename: str, exc_info: OptExcInfo) -> None: ... def importfile(path: str) -> ModuleType: ... -def safeimport(path: str, forceload: bool = ..., cache: MutableMapping[str, ModuleType] = ...) -> ModuleType: ... +def safeimport(path: str, forceload: bool = ..., cache: MutableMapping[str, ModuleType] = ...) -> ModuleType | None: ... class Doc: PYTHONDOCS: str diff --git a/mypy/typeshed/stdlib/select.pyi b/mypy/typeshed/stdlib/select.pyi index 412fd71ee38d..c86d20c352e0 100644 --- a/mypy/typeshed/stdlib/select.pyi +++ b/mypy/typeshed/stdlib/select.pyi @@ -110,7 +110,7 @@ if sys.platform == "linux": def __exit__( self, __exc_type: type[BaseException] | None = None, - __exc_val: BaseException | None = ..., + __exc_value: BaseException | None = ..., __exc_tb: TracebackType | None = None, ) -> None: ... def close(self) -> None: ... diff --git a/mypy/typeshed/stdlib/signal.pyi b/mypy/typeshed/stdlib/signal.pyi index e411d47016b6..4c961a0c9aab 100644 --- a/mypy/typeshed/stdlib/signal.pyi +++ b/mypy/typeshed/stdlib/signal.pyi @@ -53,6 +53,8 @@ class Signals(IntEnum): SIGPWR: int SIGRTMAX: int SIGRTMIN: int + if sys.version_info >= (3, 11): + SIGSTKFLT: int class Handlers(IntEnum): SIG_DFL: int @@ -147,6 +149,8 @@ else: SIGPWR: Signals SIGRTMAX: Signals SIGRTMIN: Signals + if sys.version_info >= (3, 11): + SIGSTKFLT: Signals @final class struct_siginfo(structseq[int], tuple[int, int, int, int, int, int, int]): if sys.version_info >= (3, 10): diff --git a/mypy/typeshed/stdlib/socket.pyi b/mypy/typeshed/stdlib/socket.pyi index dbc1d46ec1d4..6c897b919909 100644 --- a/mypy/typeshed/stdlib/socket.pyi +++ b/mypy/typeshed/stdlib/socket.pyi @@ -664,7 +664,7 @@ class socket(_socket.socket): # Note that the makefile's documented windows-specific behavior is not represented # mode strings with duplicates are intentionally excluded @overload - def makefile( # type: ignore[misc] + def makefile( self, mode: Literal["b", "rb", "br", "wb", "bw", "rwb", "rbw", "wrb", "wbr", "brw", "bwr"], buffering: Literal[0], @@ -725,9 +725,9 @@ class socket(_socket.socket): ) -> TextIOWrapper: ... def sendfile(self, file: _SendableFile, offset: int = 0, count: int | None = None) -> int: ... @property - def family(self) -> AddressFamily: ... # type: ignore[override] + def family(self) -> AddressFamily: ... @property - def type(self) -> SocketKind: ... # type: ignore[override] + def type(self) -> SocketKind: ... def get_inheritable(self) -> bool: ... def set_inheritable(self, inheritable: bool) -> None: ... diff --git a/mypy/typeshed/stdlib/socketserver.pyi b/mypy/typeshed/stdlib/socketserver.pyi index 3f0bb0eea0ce..3799d82a0065 100644 --- a/mypy/typeshed/stdlib/socketserver.pyi +++ b/mypy/typeshed/stdlib/socketserver.pyi @@ -30,7 +30,7 @@ if sys.platform != "win32": ] _RequestType: TypeAlias = _socket | tuple[bytes, _socket] -_AfUnixAddress: TypeAlias = str | ReadableBuffer # adddress acceptable for an AF_UNIX socket +_AfUnixAddress: TypeAlias = str | ReadableBuffer # address acceptable for an AF_UNIX socket _AfInetAddress: TypeAlias = tuple[str | bytes | bytearray, int] # address acceptable for an AF_INET socket # This can possibly be generic at some point: @@ -42,14 +42,10 @@ class BaseServer: request_queue_size: int socket_type: int timeout: float | None + RequestHandlerClass: Callable[[Any, _RetAddress, Self], BaseRequestHandler] def __init__( self, server_address: _Address, RequestHandlerClass: Callable[[Any, _RetAddress, Self], BaseRequestHandler] ) -> None: ... - # It is not actually a `@property`, but we need a `Self` type: - @property - def RequestHandlerClass(self) -> Callable[[Any, _RetAddress, Self], BaseRequestHandler]: ... - @RequestHandlerClass.setter - def RequestHandlerClass(self, val: Callable[[Any, _RetAddress, Self], BaseRequestHandler]) -> None: ... def fileno(self) -> int: ... def handle_request(self) -> None: ... def serve_forever(self, poll_interval: float = 0.5) -> None: ... @@ -74,7 +70,7 @@ class BaseServer: class TCPServer(BaseServer): if sys.version_info >= (3, 11): allow_reuse_port: bool - server_address: _AfInetAddress # type: ignore[assignment] + server_address: _AfInetAddress def __init__( self, server_address: _AfInetAddress, diff --git a/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi b/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi index 26188445547e..da58c3aa97fd 100644 --- a/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi +++ b/mypy/typeshed/stdlib/sqlite3/dbapi2.pyi @@ -340,7 +340,7 @@ class Connection: def set_trace_callback(self, trace_callback: Callable[[str], object] | None) -> None: ... # enable_load_extension and load_extension is not available on python distributions compiled # without sqlite3 loadable extension support. see footnotes https://docs.python.org/3/library/sqlite3.html#f1 - def enable_load_extension(self, __enabled: bool) -> None: ... + def enable_load_extension(self, __enable: bool) -> None: ... def load_extension(self, __name: str) -> None: ... def backup( self, @@ -417,18 +417,18 @@ class Row: def __init__(self, __cursor: Cursor, __data: tuple[Any, ...]) -> None: ... def keys(self) -> list[str]: ... @overload - def __getitem__(self, __index: int | str) -> Any: ... + def __getitem__(self, __key: int | str) -> Any: ... @overload - def __getitem__(self, __index: slice) -> tuple[Any, ...]: ... + def __getitem__(self, __key: slice) -> tuple[Any, ...]: ... def __iter__(self) -> Iterator[Any]: ... def __len__(self) -> int: ... # These return NotImplemented for anything that is not a Row. - def __eq__(self, __other: object) -> bool: ... - def __ge__(self, __other: object) -> bool: ... - def __gt__(self, __other: object) -> bool: ... - def __le__(self, __other: object) -> bool: ... - def __lt__(self, __other: object) -> bool: ... - def __ne__(self, __other: object) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __ge__(self, __value: object) -> bool: ... + def __gt__(self, __value: object) -> bool: ... + def __le__(self, __value: object) -> bool: ... + def __lt__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... if sys.version_info >= (3, 8): @final @@ -453,6 +453,6 @@ if sys.version_info >= (3, 11): def seek(self, __offset: int, __origin: int = 0) -> None: ... def __len__(self) -> int: ... def __enter__(self) -> Self: ... - def __exit__(self, __typ: object, __val: object, __tb: object) -> Literal[False]: ... - def __getitem__(self, __item: SupportsIndex | slice) -> int: ... - def __setitem__(self, __item: SupportsIndex | slice, __value: int) -> None: ... + def __exit__(self, __type: object, __val: object, __tb: object) -> Literal[False]: ... + def __getitem__(self, __key: SupportsIndex | slice) -> int: ... + def __setitem__(self, __key: SupportsIndex | slice, __value: int) -> None: ... diff --git a/mypy/typeshed/stdlib/ssl.pyi b/mypy/typeshed/stdlib/ssl.pyi index bbf8a4c6d65a..20b8802bd7b9 100644 --- a/mypy/typeshed/stdlib/ssl.pyi +++ b/mypy/typeshed/stdlib/ssl.pyi @@ -94,8 +94,8 @@ else: _create_default_https_context: Callable[..., SSLContext] -def RAND_bytes(__num: int) -> bytes: ... -def RAND_pseudo_bytes(__num: int) -> tuple[bytes, bool]: ... +def RAND_bytes(__n: int) -> bytes: ... +def RAND_pseudo_bytes(__n: int) -> tuple[bytes, bool]: ... def RAND_status() -> bool: ... def RAND_egd(path: str) -> None: ... def RAND_add(__string: str | ReadableBuffer, __entropy: float) -> None: ... @@ -291,11 +291,14 @@ ALERT_DESCRIPTION_UNSUPPORTED_CERTIFICATE: AlertDescription ALERT_DESCRIPTION_UNSUPPORTED_EXTENSION: AlertDescription ALERT_DESCRIPTION_USER_CANCELLED: AlertDescription -class _ASN1Object(NamedTuple): +class _ASN1ObjectBase(NamedTuple): nid: int shortname: str longname: str oid: str + +class _ASN1Object(_ASN1ObjectBase): + def __new__(cls, oid: str) -> Self: ... @classmethod def fromnid(cls, nid: int) -> Self: ... @classmethod @@ -467,7 +470,7 @@ class MemoryBIO: pending: int eof: bool def read(self, __size: int = -1) -> bytes: ... - def write(self, __buf: ReadableBuffer) -> int: ... + def write(self, __b: ReadableBuffer) -> int: ... def write_eof(self) -> None: ... @final diff --git a/mypy/typeshed/stdlib/subprocess.pyi b/mypy/typeshed/stdlib/subprocess.pyi index 3940fad7b915..3c8041811ef3 100644 --- a/mypy/typeshed/stdlib/subprocess.pyi +++ b/mypy/typeshed/stdlib/subprocess.pyi @@ -101,158 +101,158 @@ if sys.version_info >= (3, 11): @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, text: Literal[True], timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, encoding: str, - errors: str | None = ..., + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., + encoding: str | None = None, errors: str, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the *real* keyword only args start capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, @@ -260,48 +260,48 @@ if sys.version_info >= (3, 11): encoding: None = None, errors: None = None, input: ReadableBuffer | None = None, - text: Literal[None, False] = ..., + text: Literal[None, False] = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> CompletedProcess[bytes]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: _InputString | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> CompletedProcess[Any]: ... elif sys.version_info >= (3, 10): @@ -309,154 +309,154 @@ elif sys.version_info >= (3, 10): @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, text: Literal[True], timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, encoding: str, - errors: str | None = ..., + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., + encoding: str | None = None, errors: str, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the *real* keyword only args start capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, @@ -464,46 +464,46 @@ elif sys.version_info >= (3, 10): encoding: None = None, errors: None = None, input: ReadableBuffer | None = None, - text: Literal[None, False] = ..., + text: Literal[None, False] = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> CompletedProcess[bytes]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: _InputString | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> CompletedProcess[Any]: ... elif sys.version_info >= (3, 9): @@ -511,150 +511,150 @@ elif sys.version_info >= (3, 9): @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, text: Literal[True], timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, encoding: str, - errors: str | None = ..., + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., + encoding: str | None = None, errors: str, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the *real* keyword only args start capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, @@ -662,71 +662,71 @@ elif sys.version_info >= (3, 9): encoding: None = None, errors: None = None, input: ReadableBuffer | None = None, - text: Literal[None, False] = ..., + text: Literal[None, False] = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> CompletedProcess[bytes]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: _InputString | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> CompletedProcess[Any]: ... else: @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, text: Literal[True], timeout: float | None = None, @@ -734,106 +734,106 @@ else: @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, encoding: str, - errors: str | None = ..., + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., + encoding: str | None = None, errors: str, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the *real* keyword only args start capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: str | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, ) -> CompletedProcess[str]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, @@ -841,35 +841,35 @@ else: encoding: None = None, errors: None = None, input: ReadableBuffer | None = None, - text: Literal[None, False] = ..., + text: Literal[None, False] = None, timeout: float | None = None, ) -> CompletedProcess[bytes]: ... @overload def run( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, capture_output: bool = False, check: bool = False, - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, input: _InputString | None = None, - text: bool | None = ..., + text: bool | None = None, timeout: float | None = None, ) -> CompletedProcess[Any]: ... @@ -878,114 +878,114 @@ if sys.version_info >= (3, 11): # 3.11 adds "process_group" argument def call( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> int: ... elif sys.version_info >= (3, 10): # 3.10 adds "pipesize" argument def call( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> int: ... elif sys.version_info >= (3, 9): # 3.9 adds arguments "user", "group", "extra_groups" and "umask" def call( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> int: ... else: def call( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, - text: bool | None = ..., + text: bool | None = None, ) -> int: ... # Same args as Popen.__init__ @@ -993,114 +993,114 @@ if sys.version_info >= (3, 11): # 3.11 adds "process_group" argument def check_call( args: _CMD, - bufsize: int = ..., + bufsize: int = -1, executable: StrOrBytesPath | None = None, - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., timeout: float | None = ..., *, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> int: ... elif sys.version_info >= (3, 10): # 3.10 adds "pipesize" argument def check_call( args: _CMD, - bufsize: int = ..., + bufsize: int = -1, executable: StrOrBytesPath | None = None, - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., timeout: float | None = ..., *, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> int: ... elif sys.version_info >= (3, 9): # 3.9 adds arguments "user", "group", "extra_groups" and "umask" def check_call( args: _CMD, - bufsize: int = ..., + bufsize: int = -1, executable: StrOrBytesPath | None = None, - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., timeout: float | None = ..., *, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> int: ... else: def check_call( args: _CMD, - bufsize: int = ..., + bufsize: int = -1, executable: StrOrBytesPath | None = None, - stdin: _FILE = ..., - stdout: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + stdin: _FILE = None, + stdout: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., timeout: float | None = ..., *, - text: bool | None = ..., + text: bool | None = None, ) -> int: ... if sys.version_info >= (3, 11): @@ -1108,189 +1108,189 @@ if sys.version_info >= (3, 11): @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, text: Literal[True], - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: str, - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., + encoding: str | None = None, errors: str, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the real keyword only ones start timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: None = None, errors: None = None, - text: Literal[None, False] = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + text: Literal[None, False] = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> bytes: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., - process_group: int | None = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, + process_group: int | None = None, ) -> Any: ... # morally: -> str | bytes elif sys.version_info >= (3, 10): @@ -1298,183 +1298,183 @@ elif sys.version_info >= (3, 10): @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, text: Literal[True], - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: str, - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., + encoding: str | None = None, errors: str, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the real keyword only ones start timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: None = None, errors: None = None, - text: Literal[None, False] = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + text: Literal[None, False] = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> bytes: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., - pipesize: int = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, + pipesize: int = -1, ) -> Any: ... # morally: -> str | bytes elif sys.version_info >= (3, 9): @@ -1482,330 +1482,330 @@ elif sys.version_info >= (3, 9): @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, text: Literal[True], - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: str, - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., + encoding: str | None = None, errors: str, - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the real keyword only ones start timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: None = None, errors: None = None, - text: Literal[None, False] = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + text: Literal[None, False] = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> bytes: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., - user: str | int | None = ..., - group: str | int | None = ..., - extra_groups: Iterable[str | int] | None = ..., - umask: int = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, + user: str | int | None = None, + group: str | int | None = None, + extra_groups: Iterable[str | int] | None = None, + umask: int = -1, ) -> Any: ... # morally: -> str | bytes else: @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., + encoding: str | None = None, + errors: str | None = None, text: Literal[True], ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: str, - errors: str | None = ..., - text: bool | None = ..., + errors: str | None = None, + text: bool | None = None, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., + encoding: str | None = None, errors: str, - text: bool | None = ..., + text: bool | None = None, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, *, universal_newlines: Literal[True], - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., # where the real keyword only ones start timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, ) -> str: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: Literal[False, None] = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: Literal[False, None] = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., encoding: None = None, errors: None = None, - text: Literal[None, False] = ..., + text: Literal[None, False] = None, ) -> bytes: ... @overload def check_output( args: _CMD, - bufsize: int = ..., - executable: StrOrBytesPath | None = ..., - stdin: _FILE = ..., - stderr: _FILE = ..., - preexec_fn: Callable[[], Any] | None = ..., - close_fds: bool = ..., - shell: bool = ..., - cwd: StrOrBytesPath | None = ..., - env: _ENV | None = ..., - universal_newlines: bool | None = ..., - startupinfo: Any = ..., - creationflags: int = ..., - restore_signals: bool = ..., - start_new_session: bool = ..., + bufsize: int = -1, + executable: StrOrBytesPath | None = None, + stdin: _FILE = None, + stderr: _FILE = None, + preexec_fn: Callable[[], Any] | None = None, + close_fds: bool = True, + shell: bool = False, + cwd: StrOrBytesPath | None = None, + env: _ENV | None = None, + universal_newlines: bool | None = None, + startupinfo: Any = None, + creationflags: int = 0, + restore_signals: bool = True, + start_new_session: bool = False, pass_fds: Collection[int] = ..., *, timeout: float | None = None, input: _InputString | None = ..., - encoding: str | None = ..., - errors: str | None = ..., - text: bool | None = ..., + encoding: str | None = None, + errors: str | None = None, + text: bool | None = None, ) -> Any: ... # morally: -> str | bytes PIPE: int diff --git a/mypy/typeshed/stdlib/sys.pyi b/mypy/typeshed/stdlib/sys.pyi index e12881599b4a..6e97fbb328b2 100644 --- a/mypy/typeshed/stdlib/sys.pyi +++ b/mypy/typeshed/stdlib/sys.pyi @@ -221,7 +221,7 @@ def _clear_type_cache() -> None: ... def _current_frames() -> dict[int, FrameType]: ... def _getframe(__depth: int = 0) -> FrameType: ... def _debugmallocstats() -> None: ... -def __displayhook__(__value: object) -> None: ... +def __displayhook__(__object: object) -> None: ... def __excepthook__(__exctype: type[BaseException], __value: BaseException, __traceback: TracebackType | None) -> None: ... def exc_info() -> OptExcInfo: ... diff --git a/mypy/typeshed/stdlib/tempfile.pyi b/mypy/typeshed/stdlib/tempfile.pyi index dbff6d632d02..cd27e91fbc75 100644 --- a/mypy/typeshed/stdlib/tempfile.pyi +++ b/mypy/typeshed/stdlib/tempfile.pyi @@ -1,6 +1,6 @@ import io import sys -from _typeshed import BytesPath, GenericPath, StrPath, WriteableBuffer +from _typeshed import BytesPath, GenericPath, ReadableBuffer, StrPath, WriteableBuffer from collections.abc import Iterable, Iterator from types import TracebackType from typing import IO, Any, AnyStr, Generic, overload @@ -215,7 +215,17 @@ class _TemporaryFileWrapper(Generic[AnyStr], IO[AnyStr]): def tell(self) -> int: ... def truncate(self, size: int | None = ...) -> int: ... def writable(self) -> bool: ... + @overload + def write(self: _TemporaryFileWrapper[str], s: str) -> int: ... + @overload + def write(self: _TemporaryFileWrapper[bytes], s: ReadableBuffer) -> int: ... + @overload def write(self, s: AnyStr) -> int: ... + @overload + def writelines(self: _TemporaryFileWrapper[str], lines: Iterable[str]) -> None: ... + @overload + def writelines(self: _TemporaryFileWrapper[bytes], lines: Iterable[ReadableBuffer]) -> None: ... + @overload def writelines(self, lines: Iterable[AnyStr]) -> None: ... if sys.version_info >= (3, 11): @@ -392,8 +402,18 @@ class SpooledTemporaryFile(IO[AnyStr], _SpooledTemporaryFileBase): def seek(self, offset: int, whence: int = ...) -> int: ... def tell(self) -> int: ... def truncate(self, size: int | None = None) -> None: ... # type: ignore[override] + @overload + def write(self: SpooledTemporaryFile[str], s: str) -> int: ... + @overload + def write(self: SpooledTemporaryFile[bytes], s: ReadableBuffer) -> int: ... + @overload def write(self, s: AnyStr) -> int: ... - def writelines(self, iterable: Iterable[AnyStr]) -> None: ... # type: ignore[override] + @overload + def writelines(self: SpooledTemporaryFile[str], iterable: Iterable[str]) -> None: ... + @overload + def writelines(self: SpooledTemporaryFile[bytes], iterable: Iterable[ReadableBuffer]) -> None: ... + @overload + def writelines(self, iterable: Iterable[AnyStr]) -> None: ... def __iter__(self) -> Iterator[AnyStr]: ... # type: ignore[override] # These exist at runtime only on 3.11+. def readable(self) -> bool: ... diff --git a/mypy/typeshed/stdlib/textwrap.pyi b/mypy/typeshed/stdlib/textwrap.pyi index e4a5b7899e8e..c00cce3c2d57 100644 --- a/mypy/typeshed/stdlib/textwrap.pyi +++ b/mypy/typeshed/stdlib/textwrap.pyi @@ -55,49 +55,49 @@ def wrap( text: str, width: int = 70, *, - initial_indent: str = ..., - subsequent_indent: str = ..., - expand_tabs: bool = ..., - tabsize: int = ..., - replace_whitespace: bool = ..., - fix_sentence_endings: bool = ..., - break_long_words: bool = ..., - break_on_hyphens: bool = ..., - drop_whitespace: bool = ..., - max_lines: int = ..., - placeholder: str = ..., + initial_indent: str = "", + subsequent_indent: str = "", + expand_tabs: bool = True, + tabsize: int = 8, + replace_whitespace: bool = True, + fix_sentence_endings: bool = False, + break_long_words: bool = True, + break_on_hyphens: bool = True, + drop_whitespace: bool = True, + max_lines: int | None = None, + placeholder: str = " [...]", ) -> list[str]: ... def fill( text: str, width: int = 70, *, - initial_indent: str = ..., - subsequent_indent: str = ..., - expand_tabs: bool = ..., - tabsize: int = ..., - replace_whitespace: bool = ..., - fix_sentence_endings: bool = ..., - break_long_words: bool = ..., - break_on_hyphens: bool = ..., - drop_whitespace: bool = ..., - max_lines: int = ..., - placeholder: str = ..., + initial_indent: str = "", + subsequent_indent: str = "", + expand_tabs: bool = True, + tabsize: int = 8, + replace_whitespace: bool = True, + fix_sentence_endings: bool = False, + break_long_words: bool = True, + break_on_hyphens: bool = True, + drop_whitespace: bool = True, + max_lines: int | None = None, + placeholder: str = " [...]", ) -> str: ... def shorten( text: str, width: int, *, - initial_indent: str = ..., - subsequent_indent: str = ..., - expand_tabs: bool = ..., - tabsize: int = ..., - replace_whitespace: bool = ..., - fix_sentence_endings: bool = ..., - break_long_words: bool = ..., - break_on_hyphens: bool = ..., - drop_whitespace: bool = ..., + initial_indent: str = "", + subsequent_indent: str = "", + expand_tabs: bool = True, + tabsize: int = 8, + replace_whitespace: bool = True, + fix_sentence_endings: bool = False, + break_long_words: bool = True, + break_on_hyphens: bool = True, + drop_whitespace: bool = True, # Omit `max_lines: int = None`, it is forced to 1 here. - placeholder: str = ..., + placeholder: str = " [...]", ) -> str: ... def dedent(text: str) -> str: ... def indent(text: str, prefix: str, predicate: Callable[[str], bool] | None = None) -> str: ... diff --git a/mypy/typeshed/stdlib/threading.pyi b/mypy/typeshed/stdlib/threading.pyi index c0b344fe757d..c017978808dd 100644 --- a/mypy/typeshed/stdlib/threading.pyi +++ b/mypy/typeshed/stdlib/threading.pyi @@ -3,6 +3,7 @@ from _typeshed import ProfileFunction, TraceFunction from collections.abc import Callable, Iterable, Mapping from types import TracebackType from typing import Any, TypeVar +from typing_extensions import final _T = TypeVar("_T") @@ -101,6 +102,7 @@ class Thread: class _DummyThread(Thread): def __init__(self) -> None: ... +@final class Lock: def __enter__(self) -> bool: ... def __exit__( @@ -110,6 +112,7 @@ class Lock: def release(self) -> None: ... def locked(self) -> bool: ... +@final class _RLock: def acquire(self, blocking: bool = True, timeout: float = -1) -> bool: ... def release(self) -> None: ... diff --git a/mypy/typeshed/stdlib/tkinter/__init__.pyi b/mypy/typeshed/stdlib/tkinter/__init__.pyi index 1d30e4b73c23..7b4b06be4ecb 100644 --- a/mypy/typeshed/stdlib/tkinter/__init__.pyi +++ b/mypy/typeshed/stdlib/tkinter/__init__.pyi @@ -6,7 +6,7 @@ from enum import Enum from tkinter.constants import * from tkinter.font import _FontDescription from types import TracebackType -from typing import Any, Generic, NamedTuple, Protocol, TypeVar, overload +from typing import Any, Generic, NamedTuple, Protocol, TypeVar, overload, type_check_only from typing_extensions import Literal, TypeAlias, TypedDict if sys.version_info >= (3, 9): @@ -666,7 +666,7 @@ class Wm: iconmask = wm_iconmask def wm_iconname(self, newName: Incomplete | None = None) -> str: ... iconname = wm_iconname - def wm_iconphoto(self, default: bool, __image1: Image, *args: Image) -> None: ... + def wm_iconphoto(self, default: bool, __image1: _PhotoImageLike | str, *args: _PhotoImageLike | str) -> None: ... iconphoto = wm_iconphoto def wm_iconposition(self, x: int | None = None, y: int | None = None) -> tuple[int, int] | None: ... iconposition = wm_iconposition @@ -3206,12 +3206,19 @@ class OptionMenu(Menubutton): # configure, config, cget are inherited from Menubutton # destroy and __getitem__ are overridden, signature does not change -class _Image(Protocol): - tk: _tkinter.TkappType - def height(self) -> int: ... - def width(self) -> int: ... +# Marker to indicate that it is a valid bitmap/photo image. PIL implements compatible versions +# which don't share a class hierarchy. The actual API is a __str__() which returns a valid name, +# not something that type checkers can detect. +@type_check_only +class _Image: ... + +@type_check_only +class _BitmapImageLike(_Image): ... + +@type_check_only +class _PhotoImageLike(_Image): ... -class Image: +class Image(_Image): name: Incomplete tk: _tkinter.TkappType def __init__( @@ -3226,7 +3233,8 @@ class Image: def type(self): ... def width(self) -> int: ... -class PhotoImage(Image): +class PhotoImage(Image, _PhotoImageLike): + # This should be kept in sync with PIL.ImageTK.PhotoImage.__init__() def __init__( self, name: str | None = None, @@ -3278,7 +3286,8 @@ class PhotoImage(Image): def transparency_get(self, x: int, y: int) -> bool: ... def transparency_set(self, x: int, y: int, boolean: bool) -> None: ... -class BitmapImage(Image): +class BitmapImage(Image, _BitmapImageLike): + # This should be kept in sync with PIL.ImageTK.BitmapImage.__init__() def __init__( self, name: Incomplete | None = None, diff --git a/mypy/typeshed/stdlib/traceback.pyi b/mypy/typeshed/stdlib/traceback.pyi index 4483a8c2a1b0..a6d6d3e168b3 100644 --- a/mypy/typeshed/stdlib/traceback.pyi +++ b/mypy/typeshed/stdlib/traceback.pyi @@ -132,12 +132,12 @@ class TracebackException: cls, exc: BaseException, *, - limit: int | None = ..., - lookup_lines: bool = ..., - capture_locals: bool = ..., - compact: bool = ..., - max_group_width: int = ..., - max_group_depth: int = ..., + limit: int | None = None, + lookup_lines: bool = True, + capture_locals: bool = False, + compact: bool = False, + max_group_width: int = 15, + max_group_depth: int = 10, ) -> Self: ... elif sys.version_info >= (3, 10): def __init__( @@ -157,10 +157,10 @@ class TracebackException: cls, exc: BaseException, *, - limit: int | None = ..., - lookup_lines: bool = ..., - capture_locals: bool = ..., - compact: bool = ..., + limit: int | None = None, + lookup_lines: bool = True, + capture_locals: bool = False, + compact: bool = False, ) -> Self: ... else: def __init__( @@ -176,7 +176,7 @@ class TracebackException: ) -> None: ... @classmethod def from_exception( - cls, exc: BaseException, *, limit: int | None = ..., lookup_lines: bool = ..., capture_locals: bool = ... + cls, exc: BaseException, *, limit: int | None = None, lookup_lines: bool = True, capture_locals: bool = False ) -> Self: ... def __eq__(self, other: object) -> bool: ... diff --git a/mypy/typeshed/stdlib/types.pyi b/mypy/typeshed/stdlib/types.pyi index d529b3d9ad1a..2b3e58b8a7f6 100644 --- a/mypy/typeshed/stdlib/types.pyi +++ b/mypy/typeshed/stdlib/types.pyi @@ -103,9 +103,9 @@ class FunctionType: ) -> None: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... @overload - def __get__(self, obj: None, type: type) -> FunctionType: ... + def __get__(self, __instance: None, __owner: type) -> FunctionType: ... @overload - def __get__(self, obj: object, type: type | None = ...) -> MethodType: ... + def __get__(self, __instance: object, __owner: type | None = None) -> MethodType: ... LambdaType = FunctionType @@ -454,7 +454,7 @@ class WrapperDescriptorType: @property def __objclass__(self) -> type: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - def __get__(self, __obj: Any, __type: type = ...) -> Any: ... + def __get__(self, __instance: Any, __owner: type | None = None) -> Any: ... @final class MethodWrapperType: @@ -467,8 +467,8 @@ class MethodWrapperType: @property def __objclass__(self) -> type: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - def __eq__(self, __other: object) -> bool: ... - def __ne__(self, __other: object) -> bool: ... + def __eq__(self, __value: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... @final class MethodDescriptorType: @@ -479,7 +479,7 @@ class MethodDescriptorType: @property def __objclass__(self) -> type: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - def __get__(self, obj: Any, type: type = ...) -> Any: ... + def __get__(self, __instance: Any, __owner: type | None = None) -> Any: ... @final class ClassMethodDescriptorType: @@ -490,7 +490,7 @@ class ClassMethodDescriptorType: @property def __objclass__(self) -> type: ... def __call__(self, *args: Any, **kwargs: Any) -> Any: ... - def __get__(self, obj: Any, type: type = ...) -> Any: ... + def __get__(self, __instance: Any, __owner: type | None = None) -> Any: ... @final class TracebackType: @@ -536,9 +536,9 @@ class GetSetDescriptorType: def __qualname__(self) -> str: ... @property def __objclass__(self) -> type: ... - def __get__(self, __obj: Any, __type: type = ...) -> Any: ... + def __get__(self, __instance: Any, __owner: type | None = None) -> Any: ... def __set__(self, __instance: Any, __value: Any) -> None: ... - def __delete__(self, __obj: Any) -> None: ... + def __delete__(self, __instance: Any) -> None: ... @final class MemberDescriptorType: @@ -548,9 +548,9 @@ class MemberDescriptorType: def __qualname__(self) -> str: ... @property def __objclass__(self) -> type: ... - def __get__(self, __obj: Any, __type: type = ...) -> Any: ... + def __get__(self, __instance: Any, __owner: type | None = None) -> Any: ... def __set__(self, __instance: Any, __value: Any) -> None: ... - def __delete__(self, __obj: Any) -> None: ... + def __delete__(self, __instance: Any) -> None: ... def new_class( name: str, @@ -611,5 +611,5 @@ if sys.version_info >= (3, 10): class UnionType: @property def __args__(self) -> tuple[Any, ...]: ... - def __or__(self, __obj: Any) -> UnionType: ... - def __ror__(self, __obj: Any) -> UnionType: ... + def __or__(self, __value: Any) -> UnionType: ... + def __ror__(self, __value: Any) -> UnionType: ... diff --git a/mypy/typeshed/stdlib/typing.pyi b/mypy/typeshed/stdlib/typing.pyi index d06b081d3ddc..0a8de1a7b538 100644 --- a/mypy/typeshed/stdlib/typing.pyi +++ b/mypy/typeshed/stdlib/typing.pyi @@ -2,7 +2,7 @@ import collections # Needed by aliases like DefaultDict, see mypy issue 2986 import sys import typing_extensions from _collections_abc import dict_items, dict_keys, dict_values -from _typeshed import IdentityFunction, Incomplete, SupportsKeysAndGetItem +from _typeshed import IdentityFunction, Incomplete, ReadableBuffer, SupportsKeysAndGetItem from abc import ABCMeta, abstractmethod from contextlib import AbstractAsyncContextManager, AbstractContextManager from re import Match as Match, Pattern as Pattern @@ -20,6 +20,11 @@ from types import ( ) from typing_extensions import Never as _Never, ParamSpec as _ParamSpec, final as _final +if sys.version_info >= (3, 10): + from types import UnionType +if sys.version_info >= (3, 9): + from types import GenericAlias + __all__ = [ "AbstractSet", "Any", @@ -254,7 +259,7 @@ _T_contra = TypeVar("_T_contra", contravariant=True) # Ditto contravariant. _TC = TypeVar("_TC", bound=Type[object]) def no_type_check(arg: _F) -> _F: ... -def no_type_check_decorator(decorator: Callable[_P, _T]) -> Callable[_P, _T]: ... # type: ignore[misc] +def no_type_check_decorator(decorator: Callable[_P, _T]) -> Callable[_P, _T]: ... # Type aliases and type constructors @@ -588,7 +593,7 @@ class Mapping(Collection[_KT], Generic[_KT, _VT_co]): def items(self) -> ItemsView[_KT, _VT_co]: ... def keys(self) -> KeysView[_KT]: ... def values(self) -> ValuesView[_VT_co]: ... - def __contains__(self, __o: object) -> bool: ... + def __contains__(self, __key: object) -> bool: ... class MutableMapping(Mapping[_KT, _VT], Generic[_KT, _VT]): @abstractmethod @@ -682,8 +687,22 @@ class IO(Iterator[AnyStr], Generic[AnyStr]): @abstractmethod def writable(self) -> bool: ... @abstractmethod + @overload + def write(self: IO[str], __s: str) -> int: ... + @abstractmethod + @overload + def write(self: IO[bytes], __s: ReadableBuffer) -> int: ... + @abstractmethod + @overload def write(self, __s: AnyStr) -> int: ... @abstractmethod + @overload + def writelines(self: IO[str], __lines: Iterable[str]) -> None: ... + @abstractmethod + @overload + def writelines(self: IO[bytes], __lines: Iterable[ReadableBuffer]) -> None: ... + @abstractmethod + @overload def writelines(self, __lines: Iterable[AnyStr]) -> None: ... @abstractmethod def __next__(self) -> AnyStr: ... @@ -693,7 +712,7 @@ class IO(Iterator[AnyStr], Generic[AnyStr]): def __enter__(self) -> IO[AnyStr]: ... @abstractmethod def __exit__( - self, __t: Type[BaseException] | None, __value: BaseException | None, __traceback: TracebackType | None + self, __type: Type[BaseException] | None, __value: BaseException | None, __traceback: TracebackType | None ) -> None: ... class BinaryIO(IO[bytes]): @@ -745,9 +764,21 @@ else: ) -> dict[str, Any]: ... if sys.version_info >= (3, 8): - def get_origin(tp: Any) -> Any | None: ... def get_args(tp: Any) -> tuple[Any, ...]: ... + if sys.version_info >= (3, 10): + @overload + def get_origin(tp: ParamSpecArgs | ParamSpecKwargs) -> ParamSpec: ... + @overload + def get_origin(tp: UnionType) -> type[UnionType]: ... + if sys.version_info >= (3, 9): + @overload + def get_origin(tp: GenericAlias) -> type: ... + @overload + def get_origin(tp: Any) -> Any | None: ... + else: + def get_origin(tp: Any) -> Any | None: ... + @overload def cast(typ: Type[_T], val: Any) -> _T: ... @overload @@ -766,6 +797,7 @@ if sys.version_info >= (3, 11): eq_default: bool = True, order_default: bool = False, kw_only_default: bool = False, + frozen_default: bool = False, # on 3.11, runtime accepts it as part of kwargs field_specifiers: tuple[type[Any] | Callable[..., Any], ...] = ..., **kwargs: Any, ) -> IdentityFunction: ... diff --git a/mypy/typeshed/stdlib/typing_extensions.pyi b/mypy/typeshed/stdlib/typing_extensions.pyi index bf3892d5709e..100f91632f29 100644 --- a/mypy/typeshed/stdlib/typing_extensions.pyi +++ b/mypy/typeshed/stdlib/typing_extensions.pyi @@ -32,6 +32,11 @@ from typing import ( # noqa: Y022,Y039 type_check_only, ) +if sys.version_info >= (3, 10): + from types import UnionType +if sys.version_info >= (3, 9): + from types import GenericAlias + __all__ = [ "Any", "ClassVar", @@ -65,6 +70,7 @@ __all__ = [ "assert_never", "assert_type", "dataclass_transform", + "deprecated", "final", "IntVar", "is_typeddict", @@ -155,6 +161,18 @@ def get_type_hints( include_extras: bool = False, ) -> dict[str, Any]: ... def get_args(tp: Any) -> tuple[Any, ...]: ... + +if sys.version_info >= (3, 10): + @overload + def get_origin(tp: UnionType) -> type[UnionType]: ... + +if sys.version_info >= (3, 9): + @overload + def get_origin(tp: GenericAlias) -> type: ... + +@overload +def get_origin(tp: ParamSpecArgs | ParamSpecKwargs) -> ParamSpec: ... +@overload def get_origin(tp: Any) -> Any | None: ... Annotated: _SpecialForm @@ -226,6 +244,7 @@ else: eq_default: bool = True, order_default: bool = False, kw_only_default: bool = False, + frozen_default: bool = False, field_specifiers: tuple[type[Any] | Callable[..., Any], ...] = ..., **kwargs: object, ) -> IdentityFunction: ... @@ -308,3 +327,4 @@ class TypeVarTuple: def __iter__(self) -> Any: ... # Unpack[Self] def override(__arg: _F) -> _F: ... +def deprecated(__msg: str, *, category: type[Warning] | None = ..., stacklevel: int = 1) -> Callable[[_T], _T]: ... diff --git a/mypy/typeshed/stdlib/unittest/case.pyi b/mypy/typeshed/stdlib/unittest/case.pyi index 8f8cf43385a8..45c39e3f3010 100644 --- a/mypy/typeshed/stdlib/unittest/case.pyi +++ b/mypy/typeshed/stdlib/unittest/case.pyi @@ -48,7 +48,7 @@ else: def __init__(self, test_case: TestCase, logger_name: str, level: int) -> None: ... def __enter__(self) -> _LoggingWatcher: ... def __exit__( - self, exc_type: type[BaseException] | None, exc_val: BaseException | None, exc_tb: TracebackType | None + self, exc_type: type[BaseException] | None, exc_value: BaseException | None, tb: TracebackType | None ) -> bool | None: ... if sys.version_info >= (3, 8): @@ -68,10 +68,13 @@ class SkipTest(Exception): class _SupportsAbsAndDunderGE(SupportsDunderGE[Any], SupportsAbs[Any], Protocol): ... +# Keep this alias in sync with builtins._ClassInfo +# We can't import it from builtins or pytype crashes, +# due to the fact that pytype uses a custom builtins stub rather than typeshed's builtins stub if sys.version_info >= (3, 10): - _IsInstanceClassInfo: TypeAlias = type | UnionType | tuple[type | UnionType | tuple[Any, ...], ...] + _ClassInfo: TypeAlias = type | UnionType | tuple[_ClassInfo, ...] else: - _IsInstanceClassInfo: TypeAlias = type | tuple[type | tuple[Any, ...], ...] + _ClassInfo: TypeAlias = type | tuple[_ClassInfo, ...] class TestCase: failureException: type[BaseException] @@ -107,8 +110,8 @@ class TestCase: def assertIsNotNone(self, obj: object, msg: Any = None) -> None: ... def assertIn(self, member: Any, container: Iterable[Any] | Container[Any], msg: Any = None) -> None: ... def assertNotIn(self, member: Any, container: Iterable[Any] | Container[Any], msg: Any = None) -> None: ... - def assertIsInstance(self, obj: object, cls: _IsInstanceClassInfo, msg: Any = None) -> None: ... - def assertNotIsInstance(self, obj: object, cls: _IsInstanceClassInfo, msg: Any = None) -> None: ... + def assertIsInstance(self, obj: object, cls: _ClassInfo, msg: Any = None) -> None: ... + def assertNotIsInstance(self, obj: object, cls: _ClassInfo, msg: Any = None) -> None: ... @overload def assertGreater(self, a: SupportsDunderGT[_T], b: _T, msg: Any = None) -> None: ... @overload @@ -129,7 +132,7 @@ class TestCase: # are not using `ParamSpec` intentionally, # because they might be used with explicitly wrong arg types to raise some error in tests. @overload - def assertRaises( # type: ignore[misc] + def assertRaises( self, expected_exception: type[BaseException] | tuple[type[BaseException], ...], callable: Callable[..., Any], @@ -141,7 +144,7 @@ class TestCase: self, expected_exception: type[_E] | tuple[type[_E], ...], *, msg: Any = ... ) -> _AssertRaisesContext[_E]: ... @overload - def assertRaisesRegex( # type: ignore[misc] + def assertRaisesRegex( self, expected_exception: type[BaseException] | tuple[type[BaseException], ...], expected_regex: str | Pattern[str], @@ -154,7 +157,7 @@ class TestCase: self, expected_exception: type[_E] | tuple[type[_E], ...], expected_regex: str | Pattern[str], *, msg: Any = ... ) -> _AssertRaisesContext[_E]: ... @overload - def assertWarns( # type: ignore[misc] + def assertWarns( self, expected_warning: type[Warning] | tuple[type[Warning], ...], callable: Callable[_P, Any], @@ -166,7 +169,7 @@ class TestCase: self, expected_warning: type[Warning] | tuple[type[Warning], ...], *, msg: Any = ... ) -> _AssertWarnsContext: ... @overload - def assertWarnsRegex( # type: ignore[misc] + def assertWarnsRegex( self, expected_warning: type[Warning] | tuple[type[Warning], ...], expected_regex: str | Pattern[str], diff --git a/mypy/typeshed/stdlib/unittest/mock.pyi b/mypy/typeshed/stdlib/unittest/mock.pyi index f0345c903a3b..953480549fb2 100644 --- a/mypy/typeshed/stdlib/unittest/mock.pyi +++ b/mypy/typeshed/stdlib/unittest/mock.pyi @@ -81,7 +81,7 @@ class _Call(tuple[Any, ...]): from_kall: bool = True, ) -> None: ... def __eq__(self, other: object) -> bool: ... - def __ne__(self, __other: object) -> bool: ... + def __ne__(self, __value: object) -> bool: ... def __call__(self, *args: Any, **kwargs: Any) -> _Call: ... def __getattr__(self, attr: str) -> Any: ... def __getattribute__(self, attr: str) -> Any: ... @@ -437,6 +437,6 @@ class PropertyMock(Mock): else: def __get__(self, obj: _T, obj_type: type[_T] | None) -> Self: ... - def __set__(self, obj: Any, value: Any) -> None: ... + def __set__(self, obj: Any, val: Any) -> None: ... def seal(mock: Any) -> None: ... diff --git a/mypy/typeshed/stdlib/urllib/parse.pyi b/mypy/typeshed/stdlib/urllib/parse.pyi index 50c5d44cdd80..8e179ca765b1 100644 --- a/mypy/typeshed/stdlib/urllib/parse.pyi +++ b/mypy/typeshed/stdlib/urllib/parse.pyi @@ -40,13 +40,10 @@ scheme_chars: str if sys.version_info < (3, 11): MAX_CACHE_SIZE: int -class _ResultMixinBase(Generic[AnyStr]): - def geturl(self) -> AnyStr: ... - -class _ResultMixinStr(_ResultMixinBase[str]): +class _ResultMixinStr: def encode(self, encoding: str = "ascii", errors: str = "strict") -> _ResultMixinBytes: ... -class _ResultMixinBytes(_ResultMixinBase[bytes]): +class _ResultMixinBytes: def decode(self, encoding: str = "ascii", errors: str = "strict") -> _ResultMixinStr: ... class _NetlocResultMixinBase(Generic[AnyStr]): @@ -64,55 +61,44 @@ class _NetlocResultMixinBase(Generic[AnyStr]): class _NetlocResultMixinStr(_NetlocResultMixinBase[str], _ResultMixinStr): ... class _NetlocResultMixinBytes(_NetlocResultMixinBase[bytes], _ResultMixinBytes): ... -# Ideally this would be a generic fixed-length tuple, -# but mypy doesn't support that yet: https://github.com/python/mypy/issues/685#issuecomment-992014179 -class _DefragResultBase(tuple[AnyStr, ...], Generic[AnyStr]): - if sys.version_info >= (3, 10): - __match_args__ = ("url", "fragment") - @property - def url(self) -> AnyStr: ... - @property - def fragment(self) -> AnyStr: ... - -class _SplitResultBase(NamedTuple): - scheme: str - netloc: str - path: str - query: str - fragment: str - -class _SplitResultBytesBase(NamedTuple): - scheme: bytes - netloc: bytes - path: bytes - query: bytes - fragment: bytes - -class _ParseResultBase(NamedTuple): - scheme: str - netloc: str - path: str - params: str - query: str - fragment: str - -class _ParseResultBytesBase(NamedTuple): - scheme: bytes - netloc: bytes - path: bytes - params: bytes - query: bytes - fragment: bytes +class _DefragResultBase(NamedTuple, Generic[AnyStr]): + url: AnyStr + fragment: AnyStr + +class _SplitResultBase(NamedTuple, Generic[AnyStr]): + scheme: AnyStr + netloc: AnyStr + path: AnyStr + query: AnyStr + fragment: AnyStr + +class _ParseResultBase(NamedTuple, Generic[AnyStr]): + scheme: AnyStr + netloc: AnyStr + path: AnyStr + params: AnyStr + query: AnyStr + fragment: AnyStr # Structured result objects for string data -class DefragResult(_DefragResultBase[str], _ResultMixinStr): ... -class SplitResult(_SplitResultBase, _NetlocResultMixinStr): ... -class ParseResult(_ParseResultBase, _NetlocResultMixinStr): ... +class DefragResult(_DefragResultBase[str], _ResultMixinStr): + def geturl(self) -> str: ... + +class SplitResult(_SplitResultBase[str], _NetlocResultMixinStr): + def geturl(self) -> str: ... + +class ParseResult(_ParseResultBase[str], _NetlocResultMixinStr): + def geturl(self) -> str: ... # Structured result objects for bytes data -class DefragResultBytes(_DefragResultBase[bytes], _ResultMixinBytes): ... -class SplitResultBytes(_SplitResultBytesBase, _NetlocResultMixinBytes): ... -class ParseResultBytes(_ParseResultBytesBase, _NetlocResultMixinBytes): ... +class DefragResultBytes(_DefragResultBase[bytes], _ResultMixinBytes): + def geturl(self) -> bytes: ... + +class SplitResultBytes(_SplitResultBase[bytes], _NetlocResultMixinBytes): + def geturl(self) -> bytes: ... + +class ParseResultBytes(_ParseResultBase[bytes], _NetlocResultMixinBytes): + def geturl(self) -> bytes: ... def parse_qs( qs: AnyStr | None, diff --git a/mypy/typeshed/stdlib/winreg.pyi b/mypy/typeshed/stdlib/winreg.pyi index 5b2d09a3bebc..70ea6a1ced11 100644 --- a/mypy/typeshed/stdlib/winreg.pyi +++ b/mypy/typeshed/stdlib/winreg.pyi @@ -14,7 +14,7 @@ if sys.platform == "win32": def DeleteValue(__key: _KeyType, __value: str) -> None: ... def EnumKey(__key: _KeyType, __index: int) -> str: ... def EnumValue(__key: _KeyType, __index: int) -> tuple[str, Any, int]: ... - def ExpandEnvironmentStrings(__str: str) -> str: ... + def ExpandEnvironmentStrings(__string: str) -> str: ... def FlushKey(__key: _KeyType) -> None: ... def LoadKey(__key: _KeyType, __sub_key: str, __file_name: str) -> None: ... def OpenKey(key: _KeyType, sub_key: str, reserved: int = 0, access: int = 131097) -> HKEYType: ... diff --git a/mypy/typeshed/stdlib/xml/dom/minidom.pyi b/mypy/typeshed/stdlib/xml/dom/minidom.pyi index 7bbffb88c8f7..ecc7bb6bcdf7 100644 --- a/mypy/typeshed/stdlib/xml/dom/minidom.pyi +++ b/mypy/typeshed/stdlib/xml/dom/minidom.pyi @@ -1,10 +1,14 @@ import sys import xml.dom from _typeshed import Incomplete, ReadableBuffer, SupportsRead, SupportsWrite +from typing import NoReturn, TypeVar from typing_extensions import Literal, Self +from xml.dom.minicompat import NodeList from xml.dom.xmlbuilder import DocumentLS, DOMImplementationLS from xml.sax.xmlreader import XMLReader +_N = TypeVar("_N", bound=Node) + def parse(file: str | SupportsRead[ReadableBuffer | str], parser: XMLReader | None = None, bufsize: int | None = None): ... def parseString(string: str | ReadableBuffer, parser: XMLReader | None = None): ... def getDOMImplementation(features=None) -> DOMImplementation | None: ... @@ -34,7 +38,7 @@ class Node(xml.dom.Node): def hasChildNodes(self) -> bool: ... def insertBefore(self, newChild, refChild): ... - def appendChild(self, node): ... + def appendChild(self, node: _N) -> _N: ... def replaceChild(self, newChild, oldChild): ... def removeChild(self, oldChild): ... def normalize(self) -> None: ... @@ -143,7 +147,7 @@ class Element(Node): removeAttributeNodeNS: Incomplete def hasAttribute(self, name: str) -> bool: ... def hasAttributeNS(self, namespaceURI: str, localName) -> bool: ... - def getElementsByTagName(self, name: str): ... + def getElementsByTagName(self, name: str) -> NodeList[Node]: ... def getElementsByTagNameNS(self, namespaceURI: str, localName): ... def writexml(self, writer: SupportsWrite[str], indent: str = "", addindent: str = "", newl: str = "") -> None: ... def hasAttributes(self) -> bool: ... @@ -158,12 +162,12 @@ class Childless: childNodes: Incomplete firstChild: Incomplete lastChild: Incomplete - def appendChild(self, node) -> None: ... + def appendChild(self, node) -> NoReturn: ... def hasChildNodes(self) -> bool: ... - def insertBefore(self, newChild, refChild) -> None: ... - def removeChild(self, oldChild) -> None: ... + def insertBefore(self, newChild, refChild) -> NoReturn: ... + def removeChild(self, oldChild) -> NoReturn: ... def normalize(self) -> None: ... - def replaceChild(self, newChild, oldChild) -> None: ... + def replaceChild(self, newChild, oldChild) -> NoReturn: ... class ProcessingInstruction(Childless, Node): nodeType: int @@ -254,10 +258,10 @@ class Entity(Identified, Node): notationName: Incomplete childNodes: Incomplete def __init__(self, name, publicId, systemId, notation) -> None: ... - def appendChild(self, newChild) -> None: ... - def insertBefore(self, newChild, refChild) -> None: ... - def removeChild(self, oldChild) -> None: ... - def replaceChild(self, newChild, oldChild) -> None: ... + def appendChild(self, newChild) -> NoReturn: ... + def insertBefore(self, newChild, refChild) -> NoReturn: ... + def removeChild(self, oldChild) -> NoReturn: ... + def replaceChild(self, newChild, oldChild) -> NoReturn: ... class Notation(Identified, Childless, Node): nodeType: int @@ -300,7 +304,7 @@ class Document(Node, DocumentLS): doctype: DocumentType | None childNodes: Incomplete def __init__(self) -> None: ... - def appendChild(self, node): ... + def appendChild(self, node: _N) -> _N: ... documentElement: Incomplete def removeChild(self, oldChild): ... def unlink(self) -> None: ... @@ -315,7 +319,7 @@ class Document(Node, DocumentLS): def createElementNS(self, namespaceURI: str, qualifiedName: str): ... def createAttributeNS(self, namespaceURI: str, qualifiedName: str) -> Attr: ... def getElementById(self, id): ... - def getElementsByTagName(self, name: str): ... + def getElementsByTagName(self, name: str) -> NodeList[Node]: ... def getElementsByTagNameNS(self, namespaceURI: str, localName): ... def isSupported(self, feature: str, version: str | None) -> bool: ... def importNode(self, node, deep): ... diff --git a/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi b/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi index db33b2d673d7..2cf8dbbe7025 100644 --- a/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi +++ b/mypy/typeshed/stdlib/xml/etree/ElementTree.pyi @@ -58,13 +58,13 @@ if sys.version_info >= (3, 8): *, out: None = None, from_file: _FileRead | None = None, - with_comments: bool = ..., - strip_text: bool = ..., - rewrite_prefixes: bool = ..., - qname_aware_tags: Iterable[str] | None = ..., - qname_aware_attrs: Iterable[str] | None = ..., - exclude_attrs: Iterable[str] | None = ..., - exclude_tags: Iterable[str] | None = ..., + with_comments: bool = False, + strip_text: bool = False, + rewrite_prefixes: bool = False, + qname_aware_tags: Iterable[str] | None = None, + qname_aware_attrs: Iterable[str] | None = None, + exclude_attrs: Iterable[str] | None = None, + exclude_tags: Iterable[str] | None = None, ) -> str: ... @overload def canonicalize( @@ -72,13 +72,13 @@ if sys.version_info >= (3, 8): *, out: SupportsWrite[str], from_file: _FileRead | None = None, - with_comments: bool = ..., - strip_text: bool = ..., - rewrite_prefixes: bool = ..., - qname_aware_tags: Iterable[str] | None = ..., - qname_aware_attrs: Iterable[str] | None = ..., - exclude_attrs: Iterable[str] | None = ..., - exclude_tags: Iterable[str] | None = ..., + with_comments: bool = False, + strip_text: bool = False, + rewrite_prefixes: bool = False, + qname_aware_tags: Iterable[str] | None = None, + qname_aware_attrs: Iterable[str] | None = None, + exclude_attrs: Iterable[str] | None = None, + exclude_tags: Iterable[str] | None = None, ) -> None: ... class Element: @@ -112,18 +112,18 @@ class Element: def set(self, __key: str, __value: str) -> None: ... def __copy__(self) -> Element: ... # returns the type of self in Python impl, but not in C impl def __deepcopy__(self, __memo: Any) -> Element: ... # Only exists in C impl - def __delitem__(self, __i: SupportsIndex | slice) -> None: ... + def __delitem__(self, __key: SupportsIndex | slice) -> None: ... @overload - def __getitem__(self, __i: SupportsIndex) -> Element: ... + def __getitem__(self, __key: SupportsIndex) -> Element: ... @overload - def __getitem__(self, __s: slice) -> list[Element]: ... + def __getitem__(self, __key: slice) -> list[Element]: ... def __len__(self) -> int: ... # Doesn't actually exist at runtime, but instance of the class are indeed iterable due to __getitem__. def __iter__(self) -> Iterator[Element]: ... @overload - def __setitem__(self, __i: SupportsIndex, __o: Element) -> None: ... + def __setitem__(self, __key: SupportsIndex, __value: Element) -> None: ... @overload - def __setitem__(self, __s: slice, __o: Iterable[Element]) -> None: ... + def __setitem__(self, __key: slice, __value: Iterable[Element]) -> None: ... if sys.version_info < (3, 9): def getchildren(self) -> list[Element]: ... def getiterator(self, tag: str | None = None) -> list[Element]: ... diff --git a/mypy/version.py b/mypy/version.py index 258a0e4f8bcb..5e8f0f646764 100644 --- a/mypy/version.py +++ b/mypy/version.py @@ -8,7 +8,7 @@ # - Release versions have the form "1.2.3". # - Dev versions have the form "1.2.3+dev" (PLUS sign to conform to PEP 440). # - Before 1.0 we had the form "0.NNN". -__version__ = "1.1.0+dev" +__version__ = "1.2.0" base_version = __version__ mypy_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) diff --git a/mypyc/analysis/dataflow.py b/mypyc/analysis/dataflow.py index 21c4da8981d1..877fdaf77884 100644 --- a/mypyc/analysis/dataflow.py +++ b/mypyc/analysis/dataflow.py @@ -18,6 +18,10 @@ ComparisonOp, ControlOp, Extend, + Float, + FloatComparisonOp, + FloatNeg, + FloatOp, GetAttr, GetElementPtr, Goto, @@ -245,9 +249,18 @@ def visit_load_global(self, op: LoadGlobal) -> GenAndKill[T]: def visit_int_op(self, op: IntOp) -> GenAndKill[T]: return self.visit_register_op(op) + def visit_float_op(self, op: FloatOp) -> GenAndKill[T]: + return self.visit_register_op(op) + + def visit_float_neg(self, op: FloatNeg) -> GenAndKill[T]: + return self.visit_register_op(op) + def visit_comparison_op(self, op: ComparisonOp) -> GenAndKill[T]: return self.visit_register_op(op) + def visit_float_comparison_op(self, op: FloatComparisonOp) -> GenAndKill[T]: + return self.visit_register_op(op) + def visit_load_mem(self, op: LoadMem) -> GenAndKill[T]: return self.visit_register_op(op) @@ -444,7 +457,7 @@ def analyze_undefined_regs( def non_trivial_sources(op: Op) -> set[Value]: result = set() for source in op.sources(): - if not isinstance(source, Integer): + if not isinstance(source, (Integer, Float)): result.add(source) return result @@ -454,7 +467,7 @@ def visit_branch(self, op: Branch) -> GenAndKill[Value]: return non_trivial_sources(op), set() def visit_return(self, op: Return) -> GenAndKill[Value]: - if not isinstance(op.value, Integer): + if not isinstance(op.value, (Integer, Float)): return {op.value}, set() else: return set(), set() diff --git a/mypyc/analysis/ircheck.py b/mypyc/analysis/ircheck.py index 719faebfcee8..2e6b7320e898 100644 --- a/mypyc/analysis/ircheck.py +++ b/mypyc/analysis/ircheck.py @@ -16,6 +16,9 @@ ControlOp, DecRef, Extend, + FloatComparisonOp, + FloatNeg, + FloatOp, GetAttr, GetElementPtr, Goto, @@ -43,6 +46,7 @@ TupleSet, Unbox, Unreachable, + Value, ) from mypyc.ir.pprint import format_func from mypyc.ir.rtypes import ( @@ -54,6 +58,7 @@ bytes_rprimitive, dict_rprimitive, int_rprimitive, + is_float_rprimitive, is_object_rprimitive, list_rprimitive, range_rprimitive, @@ -221,6 +226,14 @@ def check_compatibility(self, op: Op, t: RType, s: RType) -> None: if not can_coerce_to(t, s) or not can_coerce_to(s, t): self.fail(source=op, desc=f"{t.name} and {s.name} are not compatible") + def expect_float(self, op: Op, v: Value) -> None: + if not is_float_rprimitive(v.type): + self.fail(op, f"Float expected (actual type is {v.type})") + + def expect_non_float(self, op: Op, v: Value) -> None: + if is_float_rprimitive(v.type): + self.fail(op, "Float not expected") + def visit_goto(self, op: Goto) -> None: self.check_control_op_targets(op) @@ -376,10 +389,24 @@ def visit_load_global(self, op: LoadGlobal) -> None: pass def visit_int_op(self, op: IntOp) -> None: - pass + self.expect_non_float(op, op.lhs) + self.expect_non_float(op, op.rhs) def visit_comparison_op(self, op: ComparisonOp) -> None: self.check_compatibility(op, op.lhs.type, op.rhs.type) + self.expect_non_float(op, op.lhs) + self.expect_non_float(op, op.rhs) + + def visit_float_op(self, op: FloatOp) -> None: + self.expect_float(op, op.lhs) + self.expect_float(op, op.rhs) + + def visit_float_neg(self, op: FloatNeg) -> None: + self.expect_float(op, op.src) + + def visit_float_comparison_op(self, op: FloatComparisonOp) -> None: + self.expect_float(op, op.lhs) + self.expect_float(op, op.rhs) def visit_load_mem(self, op: LoadMem) -> None: pass diff --git a/mypyc/analysis/selfleaks.py b/mypyc/analysis/selfleaks.py index 16c1050acf91..288c366e50e5 100644 --- a/mypyc/analysis/selfleaks.py +++ b/mypyc/analysis/selfleaks.py @@ -14,6 +14,9 @@ Cast, ComparisonOp, Extend, + FloatComparisonOp, + FloatNeg, + FloatOp, GetAttr, GetElementPtr, Goto, @@ -160,6 +163,15 @@ def visit_int_op(self, op: IntOp) -> GenAndKill: def visit_comparison_op(self, op: ComparisonOp) -> GenAndKill: return CLEAN + def visit_float_op(self, op: FloatOp) -> GenAndKill: + return CLEAN + + def visit_float_neg(self, op: FloatNeg) -> GenAndKill: + return CLEAN + + def visit_float_comparison_op(self, op: FloatComparisonOp) -> GenAndKill: + return CLEAN + def visit_load_mem(self, op: LoadMem) -> GenAndKill: return CLEAN diff --git a/mypyc/build.py b/mypyc/build.py index cc03eba95b4e..8e1ee8078c11 100644 --- a/mypyc/build.py +++ b/mypyc/build.py @@ -25,7 +25,7 @@ import re import sys import time -from typing import TYPE_CHECKING, Any, Dict, Iterable, NoReturn, cast +from typing import TYPE_CHECKING, Any, Dict, Iterable, NoReturn, Union, cast from mypy.build import BuildSource from mypy.errors import CompileError @@ -41,11 +41,17 @@ from mypyc.options import CompilerOptions if TYPE_CHECKING: - from distutils.core import Extension + from distutils.core import Extension as _distutils_Extension + from typing_extensions import TypeAlias + + from setuptools import Extension as _setuptools_Extension + + Extension: TypeAlias = Union[_setuptools_Extension, _distutils_Extension] + try: # Import setuptools so that it monkey-patch overrides distutils - import setuptools # noqa: F401 + import setuptools except ImportError: if sys.version_info >= (3, 12): # Raise on Python 3.12, since distutils will go away forever @@ -57,13 +63,16 @@ def get_extension() -> type[Extension]: # We can work with either setuptools or distutils, and pick setuptools # if it has been imported. use_setuptools = "setuptools" in sys.modules + extension_class: type[Extension] if not use_setuptools: - from distutils.core import Extension + import distutils.core + + extension_class = distutils.core.Extension else: - from setuptools import Extension + extension_class = setuptools.Extension - return Extension + return extension_class def setup_mypycify_vars() -> None: diff --git a/mypyc/codegen/emit.py b/mypyc/codegen/emit.py index 6e0c89dd0ecf..0f1f5ad071ad 100644 --- a/mypyc/codegen/emit.py +++ b/mypyc/codegen/emit.py @@ -895,6 +895,16 @@ def emit_unbox( self.emit_line(f"{dest} = CPyLong_AsInt32({src});") # TODO: Handle 'optional' # TODO: Handle 'failure' + elif is_float_rprimitive(typ): + if declare_dest: + self.emit_line("double {};".format(dest)) + # TODO: Don't use __float__ and __index__ + self.emit_line(f"{dest} = PyFloat_AsDouble({src});") + self.emit_lines( + f"if ({dest} == -1.0 && PyErr_Occurred()) {{", f"{dest} = -113.0;", "}" + ) + # TODO: Handle 'optional' + # TODO: Handle 'failure' elif isinstance(typ, RTuple): self.declare_tuple_struct(typ) if declare_dest: @@ -983,6 +993,8 @@ def emit_box( self.emit_line(f"{declaration}{dest} = PyLong_FromLong({src});") elif is_int64_rprimitive(typ): self.emit_line(f"{declaration}{dest} = PyLong_FromLongLong({src});") + elif is_float_rprimitive(typ): + self.emit_line(f"{declaration}{dest} = PyFloat_FromDouble({src});") elif isinstance(typ, RTuple): self.declare_tuple_struct(typ) self.emit_line(f"{declaration}{dest} = PyTuple_New({len(typ.types)});") diff --git a/mypyc/codegen/emitclass.py b/mypyc/codegen/emitclass.py index a9b51b8ff1a4..bf1f152f1bb1 100644 --- a/mypyc/codegen/emitclass.py +++ b/mypyc/codegen/emitclass.py @@ -1004,6 +1004,7 @@ def generate_readonly_getter( emitter.ctype_spaced(rtype), NATIVE_PREFIX, func_ir.cname(emitter.names) ) ) + emitter.emit_error_check("retval", rtype, "return NULL;") emitter.emit_box("retval", "retbox", rtype, declare_dest=True) emitter.emit_line("return retbox;") else: diff --git a/mypyc/codegen/emitfunc.py b/mypyc/codegen/emitfunc.py index e7fb7db80413..c6af1309550b 100644 --- a/mypyc/codegen/emitfunc.py +++ b/mypyc/codegen/emitfunc.py @@ -25,6 +25,10 @@ ComparisonOp, DecRef, Extend, + Float, + FloatComparisonOp, + FloatNeg, + FloatOp, GetAttr, GetElementPtr, Goto, @@ -671,6 +675,27 @@ def visit_comparison_op(self, op: ComparisonOp) -> None: lhs_cast = self.emit_signed_int_cast(op.lhs.type) self.emit_line(f"{dest} = {lhs_cast}{lhs} {op.op_str[op.op]} {rhs_cast}{rhs};") + def visit_float_op(self, op: FloatOp) -> None: + dest = self.reg(op) + lhs = self.reg(op.lhs) + rhs = self.reg(op.rhs) + if op.op != FloatOp.MOD: + self.emit_line("%s = %s %s %s;" % (dest, lhs, op.op_str[op.op], rhs)) + else: + # TODO: This may set errno as a side effect, that is a little sketchy. + self.emit_line("%s = fmod(%s, %s);" % (dest, lhs, rhs)) + + def visit_float_neg(self, op: FloatNeg) -> None: + dest = self.reg(op) + src = self.reg(op.src) + self.emit_line(f"{dest} = -{src};") + + def visit_float_comparison_op(self, op: FloatComparisonOp) -> None: + dest = self.reg(op) + lhs = self.reg(op.lhs) + rhs = self.reg(op.rhs) + self.emit_line("%s = %s %s %s;" % (dest, lhs, op.op_str[op.op], rhs)) + def visit_load_mem(self, op: LoadMem) -> None: dest = self.reg(op) src = self.reg(op.src) @@ -732,6 +757,13 @@ def reg(self, reg: Value) -> str: elif val <= -(1 << 31): s += "LL" return s + elif isinstance(reg, Float): + r = repr(reg.value) + if r == "inf": + return "INFINITY" + elif r == "-inf": + return "-INFINITY" + return r else: return self.emitter.reg(reg) diff --git a/mypyc/codegen/emitmodule.py b/mypyc/codegen/emitmodule.py index 9f65aa77c47f..a8226314039d 100644 --- a/mypyc/codegen/emitmodule.py +++ b/mypyc/codegen/emitmodule.py @@ -369,7 +369,7 @@ def write_cache( "src_hashes": hashes[group_map[id]], } - result.manager.metastore.write(newpath, json.dumps(ir_data)) + result.manager.metastore.write(newpath, json.dumps(ir_data, separators=(",", ":"))) result.manager.metastore.commit() diff --git a/mypyc/codegen/literals.py b/mypyc/codegen/literals.py index 05884b754452..d8fd115be99e 100644 --- a/mypyc/codegen/literals.py +++ b/mypyc/codegen/literals.py @@ -1,7 +1,7 @@ from __future__ import annotations -from typing import Any, FrozenSet, List, Tuple, Union, cast -from typing_extensions import Final +from typing import FrozenSet, List, Tuple, Union +from typing_extensions import Final, TypeGuard # Supported Python literal types. All tuple / frozenset items must have supported # literal types as well, but we can't represent the type precisely. @@ -9,6 +9,11 @@ str, bytes, int, bool, float, complex, Tuple[object, ...], FrozenSet[object], None ] + +def _is_literal_value(obj: object) -> TypeGuard[LiteralValue]: + return isinstance(obj, (str, bytes, int, float, complex, tuple, frozenset, type(None))) + + # Some literals are singletons and handled specially (None, False and True) NUM_SINGLETONS: Final = 3 @@ -55,13 +60,15 @@ def record_literal(self, value: LiteralValue) -> None: tuple_literals = self.tuple_literals if value not in tuple_literals: for item in value: - self.record_literal(cast(Any, item)) + assert _is_literal_value(item) + self.record_literal(item) tuple_literals[value] = len(tuple_literals) elif isinstance(value, frozenset): frozenset_literals = self.frozenset_literals if value not in frozenset_literals: for item in value: - self.record_literal(cast(Any, item)) + assert _is_literal_value(item) + self.record_literal(item) frozenset_literals[value] = len(frozenset_literals) else: assert False, "invalid literal: %r" % value @@ -159,7 +166,8 @@ def _encode_collection_values( value = value_by_index[i] result.append(str(len(value))) for item in value: - index = self.literal_index(cast(Any, item)) + assert _is_literal_value(item) + index = self.literal_index(item) result.append(str(index)) return result diff --git a/mypyc/common.py b/mypyc/common.py index c8da5ff63bab..05e13370cb98 100644 --- a/mypyc/common.py +++ b/mypyc/common.py @@ -69,6 +69,7 @@ "getargs.c", "getargsfast.c", "int_ops.c", + "float_ops.c", "str_ops.c", "bytes_ops.c", "list_ops.c", diff --git a/mypyc/doc/float_operations.rst b/mypyc/doc/float_operations.rst index c1e4d284c4ba..915c184ae8e7 100644 --- a/mypyc/doc/float_operations.rst +++ b/mypyc/doc/float_operations.rst @@ -7,18 +7,40 @@ These ``float`` operations have fast, optimized implementations. Other floating point operations use generic implementations that are often slower. -.. note:: - - At the moment, only a few float operations are optimized. This will - improve in future mypyc releases. - Construction ------------ * Float literal -* ``float(string)`` +* ``float(x: int)`` +* ``float(x: i64)`` +* ``float(x: i32)`` +* ``float(x: str)`` +* ``float(x: float)`` (no-op) + +Operators +--------- + +* Arithmetic (``+``, ``-``, ``*``, ``/``, ``//``, ``%``) +* Comparisons (``==``, ``!=``, ``<``, etc.) +* Augmented assignment (``x += y``, etc.) Functions --------- +* ``int(f)`` +* ``i32(f)`` (convert to ``i32``) +* ``i64(f)`` (convert to ``i64``) * ``abs(f)`` +* ``math.sin(f)`` +* ``math.cos(f)`` +* ``math.tan(f)`` +* ``math.sqrt(f)`` +* ``math.exp(f)`` +* ``math.log(f)`` +* ``math.floor(f)`` +* ``math.ceil(f)`` +* ``math.fabs(f)`` +* ``math.pow(x, y)`` +* ``math.copysign(x, y)`` +* ``math.isinf(f)`` +* ``math.isnan(f)`` diff --git a/mypyc/doc/int_operations.rst b/mypyc/doc/int_operations.rst index 038b6e5dbc63..058fdbd511dd 100644 --- a/mypyc/doc/int_operations.rst +++ b/mypyc/doc/int_operations.rst @@ -3,32 +3,133 @@ Native integer operations ========================= -Operations on ``int`` values that are listed here have fast, optimized +Mypyc supports these integer types: + +* ``int`` (arbitrary-precision integer) +* ``i64`` (64-bit signed integer) +* ``i32`` (32-bit signed integer) + +``i64`` and ``i32`` are *native integer types* and must be imported +from the ``mypy_extensions`` module. ``int`` corresponds to the Python +``int`` type, but uses a more efficient runtime representation (tagged +pointer). Native integer types are value types. All integer types have +optimized primitive operations, but the native integer types are more +efficient than ``int``, since they don't require range or bounds +checks. + +Operations on integers that are listed here have fast, optimized implementations. Other integer operations use generic implementations -that are often slower. Some operations involving integers and other -types are documented elsewhere, such as list indexing. +that are generally slower. Some operations involving integers and other +types, such as list indexing, are documented elsewhere. Construction ------------ +``int`` type: + * Integer literal * ``int(x: float)`` +* ``int(x: i64)`` +* ``int(x: i32)`` * ``int(x: str)`` * ``int(x: str, base: int)`` +* ``int(x: int)`` (no-op) + +``i64`` type: + +* ``i64(x: int)`` +* ``i64(x: float)`` +* ``i64(x: i32)`` +* ``i64(x: str)`` +* ``i64(x: str, base: int)`` +* ``i64(x: i64)`` (no-op) + +``i32`` type: + +* ``i32(x: int)`` +* ``i32(x: float)`` +* ``i32(x: i64)`` (truncate) +* ``i32(x: str)`` +* ``i32(x: str, base: int)`` +* ``i32(x: i32)`` (no-op) + +Conversions from ``int`` to a native integer type raise +``OverflowError`` if the value is too large or small. Conversions from +a wider native integer type to a narrower one truncate the value and never +fail. More generally, operations between native integer types don't +check for overflow. + +Implicit conversions +-------------------- + +``int`` values can be implicitly converted to a native integer type, +for convenience. This means that these are equivalent:: + + def implicit() -> None: + # Implicit conversion of 0 (int) to i64 + x: i64 = 0 + + def explicit() -> None: + # Explicit conversion of 0 (int) to i64 + x = i64(0) + +Similarly, a native integer value can be implicitly converted to an +arbitrary-precision integer. These two functions are equivalent:: + + def implicit(x: i64) -> int: + # Implicit conversion from i64 to int + return x + + def explicit(x: i64) -> int: + # Explicit conversion from i64 to int + return int(x) Operators --------- -* Arithmetic (``+``, ``-``, ``*``, ``//``, ``%``) +* Arithmetic (``+``, ``-``, ``*``, ``//``, ``/``, ``%``) * Bitwise operations (``&``, ``|``, ``^``, ``<<``, ``>>``, ``~``) * Comparisons (``==``, ``!=``, ``<``, etc.) * Augmented assignment (``x += y``, etc.) +If one of the above native integer operations overflows or underflows, +the behavior is undefined. Native integer types should only be used if +all possible values are small enough for the type. For this reason, +the arbitrary-precision ``int`` type is recommended unless the +performance of integer operations is critical. + +It's a compile-time error to mix different native integer types in a +binary operation such as addition. An explicit conversion is required:: + + def add(x: i64, y: i32) -> None: + a = x + y # Error (i64 + i32) + b = x + i64(y) # OK + +You can freely mix a native integer value and an arbitrary-precision +``int`` value in an operation. The native integer type is "sticky" +and the ``int`` operand is coerced to the native integer type:: + + def example(x: i64, y: int) -> None: + a = x * y + # Type of "a" is "i64" + ... + b = 1 - x + # Similarly, type of "b" is "i64" + Statements ---------- -For loop over range: +For loop over a range is compiled efficiently, if the ``range(...)`` object +is constructed in the for statement (after ``in``): * ``for x in range(end)`` * ``for x in range(start, end)`` * ``for x in range(start, end, step)`` + +If one of the arguments to ``range`` in a for loop is a native integer +type, the type of the loop variable is inferred to have this native +integer type, instead of ``int``:: + + for x in range(i64(n)): + # Type of "x" is "i64" + ... diff --git a/mypyc/doc/performance_tips_and_tricks.rst b/mypyc/doc/performance_tips_and_tricks.rst index 668d32827402..ae0b2950814c 100644 --- a/mypyc/doc/performance_tips_and_tricks.rst +++ b/mypyc/doc/performance_tips_and_tricks.rst @@ -103,8 +103,6 @@ These things also tend to be relatively slow: * Using generator functions -* Using floating point numbers (they are relatively unoptimized) - * Using callable values (i.e. not leveraging early binding to call functions or methods) @@ -160,6 +158,8 @@ Here are examples of features that are fast, in no particular order * Many integer operations +* Many ``float`` operations + * Booleans * :ref:`Native list operations `, such as indexing, diff --git a/mypyc/doc/using_type_annotations.rst b/mypyc/doc/using_type_annotations.rst index a01246ab0914..6c9277786751 100644 --- a/mypyc/doc/using_type_annotations.rst +++ b/mypyc/doc/using_type_annotations.rst @@ -30,6 +30,8 @@ mypyc, and many operations on these types have efficient implementations: * ``int`` (:ref:`native operations `) +* ``i64`` (:ref:`documentation `, :ref:`native operations `) +* ``i32`` (:ref:`documentation `, :ref:`native operations `) * ``float`` (:ref:`native operations `) * ``bool`` (:ref:`native operations `) * ``str`` (:ref:`native operations `) @@ -271,7 +273,8 @@ Value and heap types In CPython, memory for all objects is dynamically allocated on the heap. All Python types are thus *heap types*. In compiled code, some types are *value types* -- no object is (necessarily) allocated on the -heap. ``bool``, ``None`` and fixed-length tuples are value types. +heap. ``bool``, ``float``, ``None``, :ref:`native integer types ` +and fixed-length tuples are value types. ``int`` is a hybrid. For typical integer values, it is a value type. Large enough integer values, those that require more than 63 @@ -287,9 +290,9 @@ Value types have a few differences from heap types: * Similarly, mypyc transparently changes from a heap-based representation to a value representation (unboxing). -* Object identity of integers and tuples is not preserved. You should - use ``==`` instead of ``is`` if you are comparing two integers or - fixed-length tuples. +* Object identity of integers, floating point values and tuples is not + preserved. You should use ``==`` instead of ``is`` if you are comparing + two integers, floats or fixed-length tuples. * When an instance of a subclass of a value type is converted to the base type, it is implicitly converted to an instance of the target @@ -312,3 +315,81 @@ Example:: x = a[0] # True is converted to 1 on assignment x = True + +Since integers and floating point values have a different runtime +representations and neither can represent all the values of the other +type, type narrowing of floating point values through assignment is +disallowed in compiled code. For consistency, mypyc rejects assigning +an integer value to a float variable even in variable initialization. +An explicit conversion is required. + +Examples:: + + def narrowing(n: int) -> None: + # Error: Incompatible value representations in assignment + # (expression has type "int", variable has type "float") + x: float = 0 + + y: float = 0.0 # Ok + + if f(): + y = n # Error + if f(): + y = float(n) # Ok + +.. _native-ints: + +Native integer types +-------------------- + +You can use the native integer types ``i64`` (64-bit signed integer) +and ``i32`` (32-bit signed integer) if you know that integer values +will always fit within fixed bounds. These types are faster than the +arbitrary-precision ``int`` type, since they don't require overflow +checks on operations. ``i32`` may also use less memory than ``int`` +values. The types are imported from the ``mypy_extensions`` module +(installed via ``pip install mypy_extensions``). + +Example:: + + from mypy_extensions import i64 + + def sum_list(l: list[i64]) -> i64: + s: i64 = 0 + for n in l: + s += n + return s + + # Implicit conversions from int to i64 + print(sum_list([1, 3, 5])) + +.. note:: + + Since there are no overflow checks when performing native integer + arithmetic, the above function could result in an overflow or other + undefined behavior if the sum might not fit within 64 bits. + + The behavior when running as interpreted Python program will be + different if there are overflows. Declaring native integer types + have no effect unless code is compiled. Native integer types are + effectively equivalent to ``int`` when interpreted. + +Native integer types have these additional properties: + +* Values can be implicitly converted between ``int`` and a native + integer type (both ways). + +* Conversions between different native integer types must be explicit. + A conversion to a narrower native integer type truncates the value + without a runtime overflow check. + +* If a binary operation (such as ``+``) or an augmented assignment + (such as ``+=``) mixes native integer and ``int`` values, the + ``int`` operand is implicitly coerced to the native integer type + (native integer types are "sticky"). + +* You can't mix different native integer types in binary + operations. Instead, convert between types explicitly. + +For more information about native integer types, refer to +:ref:`native integer operations `. diff --git a/mypyc/ir/class_ir.py b/mypyc/ir/class_ir.py index a1534780b79b..a5ac2133ce13 100644 --- a/mypyc/ir/class_ir.py +++ b/mypyc/ir/class_ir.py @@ -169,7 +169,9 @@ def __init__( self.base_mro: list[ClassIR] = [self] # Direct subclasses of this class (use subclasses() to also include non-direct ones) - # None if separate compilation prevents this from working + # None if separate compilation prevents this from working. + # + # Often it's better to use has_no_subclasses() or subclasses() instead. self.children: list[ClassIR] | None = [] # Instance attributes that are initialized in the class body. @@ -301,6 +303,9 @@ def get_method(self, name: str, *, prefer_method: bool = False) -> FuncIR | None def has_method_decl(self, name: str) -> bool: return any(name in ir.method_decls for ir in self.mro) + def has_no_subclasses(self) -> bool: + return self.children == [] and not self.allow_interpreted_subclasses + def subclasses(self) -> set[ClassIR] | None: """Return all subclasses of this class, both direct and indirect. diff --git a/mypyc/ir/ops.py b/mypyc/ir/ops.py index 51a0bffcf3f1..adf24de235ff 100644 --- a/mypyc/ir/ops.py +++ b/mypyc/ir/ops.py @@ -25,6 +25,7 @@ RVoid, bit_rprimitive, bool_rprimitive, + float_rprimitive, int_rprimitive, is_bit_rprimitive, is_bool_rprimitive, @@ -190,6 +191,25 @@ def __init__(self, value: int, rtype: RType = short_int_rprimitive, line: int = self.type = rtype self.line = line + def numeric_value(self) -> int: + if is_short_int_rprimitive(self.type) or is_int_rprimitive(self.type): + return self.value // 2 + return self.value + + +class Float(Value): + """Float literal. + + Floating point literals are treated as constant values and are generally + not included in data flow analyses and such, unlike Register and + Op subclasses. + """ + + def __init__(self, value: float, line: int = -1) -> None: + self.value = value + self.type = float_rprimitive + self.line = line + class Op(Value): """Abstract base class for all IR operations. @@ -895,6 +915,7 @@ class RaiseStandardError(RegisterOp): UNBOUND_LOCAL_ERROR: Final = "UnboundLocalError" RUNTIME_ERROR: Final = "RuntimeError" NAME_ERROR: Final = "NameError" + ZERO_DIVISION_ERROR: Final = "ZeroDivisionError" def __init__(self, class_name: str, value: str | Value | None, line: int) -> None: super().__init__(line) @@ -1042,7 +1063,7 @@ class IntOp(RegisterOp): """Binary arithmetic or bitwise op on integer operands (e.g., r1 = r2 + r3). These ops are low-level and are similar to the corresponding C - operations (and unlike Python operations). + operations. The left and right values must have low-level integer types with compatible representations. Fixed-width integers, short_int_rprimitive, @@ -1156,6 +1177,94 @@ def accept(self, visitor: OpVisitor[T]) -> T: return visitor.visit_comparison_op(self) +class FloatOp(RegisterOp): + """Binary float arithmetic op (e.g., r1 = r2 + r3). + + These ops are low-level and are similar to the corresponding C + operations (and somewhat different from Python operations). + + The left and right values must be floats. + """ + + error_kind = ERR_NEVER + + ADD: Final = 0 + SUB: Final = 1 + MUL: Final = 2 + DIV: Final = 3 + MOD: Final = 4 + + op_str: Final = {ADD: "+", SUB: "-", MUL: "*", DIV: "/", MOD: "%"} + + def __init__(self, lhs: Value, rhs: Value, op: int, line: int = -1) -> None: + super().__init__(line) + self.type = float_rprimitive + self.lhs = lhs + self.rhs = rhs + self.op = op + + def sources(self) -> List[Value]: + return [self.lhs, self.rhs] + + def accept(self, visitor: "OpVisitor[T]") -> T: + return visitor.visit_float_op(self) + + +# We can't have this in the FloatOp class body, because of +# https://github.com/mypyc/mypyc/issues/932. +float_op_to_id: Final = {op: op_id for op_id, op in FloatOp.op_str.items()} + + +class FloatNeg(RegisterOp): + """Float negation op (r1 = -r2).""" + + error_kind = ERR_NEVER + + def __init__(self, src: Value, line: int = -1) -> None: + super().__init__(line) + self.type = float_rprimitive + self.src = src + + def sources(self) -> List[Value]: + return [self.src] + + def accept(self, visitor: "OpVisitor[T]") -> T: + return visitor.visit_float_neg(self) + + +class FloatComparisonOp(RegisterOp): + """Low-level comparison op for floats.""" + + error_kind = ERR_NEVER + + EQ: Final = 200 + NEQ: Final = 201 + LT: Final = 202 + GT: Final = 203 + LE: Final = 204 + GE: Final = 205 + + op_str: Final = {EQ: "==", NEQ: "!=", LT: "<", GT: ">", LE: "<=", GE: ">="} + + def __init__(self, lhs: Value, rhs: Value, op: int, line: int = -1) -> None: + super().__init__(line) + self.type = bit_rprimitive + self.lhs = lhs + self.rhs = rhs + self.op = op + + def sources(self) -> List[Value]: + return [self.lhs, self.rhs] + + def accept(self, visitor: "OpVisitor[T]") -> T: + return visitor.visit_float_comparison_op(self) + + +# We can't have this in the FloatOp class body, because of +# https://github.com/mypyc/mypyc/issues/932. +float_comparison_op_to_id: Final = {op: op_id for op_id, op in FloatComparisonOp.op_str.items()} + + class LoadMem(RegisterOp): """Read a memory location: result = *(type *)src. @@ -1405,6 +1514,18 @@ def visit_int_op(self, op: IntOp) -> T: def visit_comparison_op(self, op: ComparisonOp) -> T: raise NotImplementedError + @abstractmethod + def visit_float_op(self, op: FloatOp) -> T: + raise NotImplementedError + + @abstractmethod + def visit_float_neg(self, op: FloatNeg) -> T: + raise NotImplementedError + + @abstractmethod + def visit_float_comparison_op(self, op: FloatComparisonOp) -> T: + raise NotImplementedError + @abstractmethod def visit_load_mem(self, op: LoadMem) -> T: raise NotImplementedError diff --git a/mypyc/ir/pprint.py b/mypyc/ir/pprint.py index cb9e4a2d2541..82e82913c9a6 100644 --- a/mypyc/ir/pprint.py +++ b/mypyc/ir/pprint.py @@ -23,6 +23,10 @@ ControlOp, DecRef, Extend, + Float, + FloatComparisonOp, + FloatNeg, + FloatOp, GetAttr, GetElementPtr, Goto, @@ -241,6 +245,15 @@ def visit_comparison_op(self, op: ComparisonOp) -> str: "%r = %r %s %r%s", op, op.lhs, ComparisonOp.op_str[op.op], op.rhs, sign_format ) + def visit_float_op(self, op: FloatOp) -> str: + return self.format("%r = %r %s %r", op, op.lhs, FloatOp.op_str[op.op], op.rhs) + + def visit_float_neg(self, op: FloatNeg) -> str: + return self.format("%r = -%r", op, op.src) + + def visit_float_comparison_op(self, op: FloatComparisonOp) -> str: + return self.format("%r = %r %s %r", op, op.lhs, op.op_str[op.op], op.rhs) + def visit_load_mem(self, op: LoadMem) -> str: return self.format("%r = load_mem %r :: %t*", op, op.src, op.type) @@ -289,6 +302,8 @@ def format(self, fmt: str, *args: Any) -> str: assert isinstance(arg, Value) if isinstance(arg, Integer): result.append(str(arg.value)) + elif isinstance(arg, Float): + result.append(repr(arg.value)) else: result.append(self.names[arg]) elif typespec == "d": @@ -445,7 +460,7 @@ def generate_names_for_ir(args: list[Register], blocks: list[BasicBlock]) -> dic continue if isinstance(value, Register) and value.name: name = value.name - elif isinstance(value, Integer): + elif isinstance(value, (Integer, Float)): continue else: name = "r%d" % temp_index diff --git a/mypyc/ir/rtypes.py b/mypyc/ir/rtypes.py index babfe0770f35..4ccab56ef832 100644 --- a/mypyc/ir/rtypes.py +++ b/mypyc/ir/rtypes.py @@ -221,6 +221,8 @@ def __init__( self.c_undefined = "2" elif ctype in ("PyObject **", "void *"): self.c_undefined = "NULL" + elif ctype == "double": + self.c_undefined = "-113.0" else: assert False, "Unrecognized ctype: %r" % ctype @@ -366,7 +368,14 @@ def __hash__(self) -> int: # Floats are represent as 'float' PyObject * values. (In the future # we'll likely switch to a more efficient, unboxed representation.) -float_rprimitive: Final = RPrimitive("builtins.float", is_unboxed=False, is_refcounted=True) +float_rprimitive: Final = RPrimitive( + "builtins.float", + is_unboxed=True, + is_refcounted=False, + ctype="double", + size=8, + error_overlap=True, +) # An unboxed Python bool value. This actually has three possible values # (0 -> False, 1 -> True, 2 -> error). If you only need True/False, use @@ -527,6 +536,8 @@ def visit_rprimitive(self, t: RPrimitive) -> str: return "8" # "8 byte integer" elif t._ctype == "int32_t": return "4" # "4 byte integer" + elif t._ctype == "double": + return "F" assert not t.is_unboxed, f"{t} unexpected unboxed type" return "O" diff --git a/mypyc/irbuild/builder.py b/mypyc/irbuild/builder.py index f37fae608083..14f614f57dd6 100644 --- a/mypyc/irbuild/builder.py +++ b/mypyc/irbuild/builder.py @@ -41,6 +41,7 @@ Statement, SymbolNode, TupleExpr, + TypeAlias, TypeInfo, UnaryExpr, Var, @@ -51,6 +52,7 @@ ProperType, TupleType, Type, + TypedDictType, TypeOfAny, UninhabitedType, UnionType, @@ -92,9 +94,11 @@ c_pyssize_t_rprimitive, dict_rprimitive, int_rprimitive, + is_float_rprimitive, is_list_rprimitive, is_none_rprimitive, is_object_rprimitive, + is_tagged, is_tuple_rprimitive, none_rprimitive, object_rprimitive, @@ -567,7 +571,11 @@ def load_final_literal_value(self, val: int | str | bytes | float | bool, line: else: assert False, "Unsupported final literal value" - def get_assignment_target(self, lvalue: Lvalue, line: int = -1) -> AssignmentTarget: + def get_assignment_target( + self, lvalue: Lvalue, line: int = -1, *, for_read: bool = False + ) -> AssignmentTarget: + if line == -1: + line = lvalue.line if isinstance(lvalue, NameExpr): # If we are visiting a decorator, then the SymbolNode we really want to be looking at # is the function that is decorated, not the entire Decorator node itself. @@ -578,6 +586,8 @@ def get_assignment_target(self, lvalue: Lvalue, line: int = -1) -> AssignmentTar # New semantic analyzer doesn't create ad-hoc Vars for special forms. assert lvalue.is_special_form symbol = Var(lvalue.name) + if not for_read and isinstance(symbol, Var) and symbol.is_cls: + self.error("Cannot assign to the first argument of classmethod", line) if lvalue.kind == LDEF: if symbol not in self.symtables[-1]: # If the function is a generator function, then first define a new variable @@ -658,13 +668,13 @@ def read( def assign(self, target: Register | AssignmentTarget, rvalue_reg: Value, line: int) -> None: if isinstance(target, Register): - self.add(Assign(target, self.coerce(rvalue_reg, target.type, line))) + self.add(Assign(target, self.coerce_rvalue(rvalue_reg, target.type, line))) elif isinstance(target, AssignmentTargetRegister): - rvalue_reg = self.coerce(rvalue_reg, target.type, line) + rvalue_reg = self.coerce_rvalue(rvalue_reg, target.type, line) self.add(Assign(target.register, rvalue_reg)) elif isinstance(target, AssignmentTargetAttr): if isinstance(target.obj_type, RInstance): - rvalue_reg = self.coerce(rvalue_reg, target.type, line) + rvalue_reg = self.coerce_rvalue(rvalue_reg, target.type, line) self.add(SetAttr(target.obj, target.attr, rvalue_reg, line)) else: key = self.load_str(target.attr) @@ -691,6 +701,18 @@ def assign(self, target: Register | AssignmentTarget, rvalue_reg: Value, line: i else: assert False, "Unsupported assignment target" + def coerce_rvalue(self, rvalue: Value, rtype: RType, line: int) -> Value: + if is_float_rprimitive(rtype) and is_tagged(rvalue.type): + typename = rvalue.type.short_name() + if typename == "short_int": + typename = "int" + self.error( + "Incompatible value representations in assignment " + + f'(expression has type "{typename}", variable has type "float")', + line, + ) + return self.coerce(rvalue, rtype, line) + def process_sequence_assignment( self, target: AssignmentTargetTuple, rvalue: Value, line: int ) -> None: @@ -892,8 +914,12 @@ def get_dict_base_type(self, expr: Expression) -> list[Instance]: dict_types = [] for t in types: - assert isinstance(t, Instance), t - dict_base = next(base for base in t.type.mro if base.fullname == "builtins.dict") + if isinstance(t, TypedDictType): + t = t.fallback + dict_base = next(base for base in t.type.mro if base.fullname == "typing.Mapping") + else: + assert isinstance(t, Instance), t + dict_base = next(base for base in t.type.mro if base.fullname == "builtins.dict") dict_types.append(map_instance_to_supertype(t, dict_base)) return dict_types @@ -1018,7 +1044,8 @@ def call_refexpr_with_args( # Handle data-driven special-cased primitive call ops. if callee.fullname and expr.arg_kinds == [ARG_POS] * len(arg_values): - call_c_ops_candidates = function_ops.get(callee.fullname, []) + fullname = get_call_target_fullname(callee) + call_c_ops_candidates = function_ops.get(fullname, []) target = self.builder.matching_call_c( call_c_ops_candidates, arg_values, expr.line, self.node_type(expr) ) @@ -1349,3 +1376,12 @@ def remangle_redefinition_name(name: str) -> str: lookups. """ return name.replace("'", "__redef__") + + +def get_call_target_fullname(ref: RefExpr) -> str: + if isinstance(ref.node, TypeAlias): + # Resolve simple type aliases. In calls they evaluate to the type they point to. + target = get_proper_type(ref.node.target) + if isinstance(target, Instance): + return target.type.fullname + return ref.fullname diff --git a/mypyc/irbuild/constant_fold.py b/mypyc/irbuild/constant_fold.py index 4e9eb53b9222..bc71052f5418 100644 --- a/mypyc/irbuild/constant_fold.py +++ b/mypyc/irbuild/constant_fold.py @@ -16,14 +16,25 @@ from mypy.constant_fold import ( constant_fold_binary_int_op, constant_fold_binary_str_op, + constant_fold_unary_float_op, constant_fold_unary_int_op, ) -from mypy.nodes import Expression, IntExpr, MemberExpr, NameExpr, OpExpr, StrExpr, UnaryExpr, Var +from mypy.nodes import ( + Expression, + FloatExpr, + IntExpr, + MemberExpr, + NameExpr, + OpExpr, + StrExpr, + UnaryExpr, + Var, +) from mypyc.irbuild.builder import IRBuilder # All possible result types of constant folding -ConstantValue = Union[int, str] -CONST_TYPES: Final = (int, str) +ConstantValue = Union[int, str, float] +CONST_TYPES: Final = (int, str, float) def constant_fold_expr(builder: IRBuilder, expr: Expression) -> ConstantValue | None: @@ -35,6 +46,8 @@ def constant_fold_expr(builder: IRBuilder, expr: Expression) -> ConstantValue | return expr.value if isinstance(expr, StrExpr): return expr.value + if isinstance(expr, FloatExpr): + return expr.value elif isinstance(expr, NameExpr): node = expr.node if isinstance(node, Var) and node.is_final: @@ -60,4 +73,6 @@ def constant_fold_expr(builder: IRBuilder, expr: Expression) -> ConstantValue | value = constant_fold_expr(builder, expr.expr) if isinstance(value, int): return constant_fold_unary_int_op(expr.op, value) + if isinstance(value, float): + return constant_fold_unary_float_op(expr.op, value) return None diff --git a/mypyc/irbuild/expression.py b/mypyc/irbuild/expression.py index 3f5b795a1436..d94bd228e948 100644 --- a/mypyc/irbuild/expression.py +++ b/mypyc/irbuild/expression.py @@ -6,7 +6,7 @@ from __future__ import annotations -from typing import Callable, Sequence, cast +from typing import Callable, Sequence from mypy.nodes import ( ARG_POS, @@ -48,11 +48,13 @@ ) from mypy.types import Instance, ProperType, TupleType, TypeType, get_proper_type from mypyc.common import MAX_SHORT_INT +from mypyc.ir.class_ir import ClassIR from mypyc.ir.func_ir import FUNC_CLASSMETHOD, FUNC_STATICMETHOD from mypyc.ir.ops import ( Assign, BasicBlock, ComparisonOp, + Float, Integer, LoadAddress, LoadLiteral, @@ -174,7 +176,7 @@ def transform_name_expr(builder: IRBuilder, expr: NameExpr) -> Value: ) return obj else: - return builder.read(builder.get_assignment_target(expr), expr.line) + return builder.read(builder.get_assignment_target(expr, for_read=True), expr.line) return builder.load_global(expr) @@ -288,6 +290,9 @@ def transform_call_expr(builder: IRBuilder, expr: CallExpr) -> Value: callee = callee.analyzed.expr # Unwrap type application if isinstance(callee, MemberExpr): + if isinstance(callee.expr, RefExpr) and isinstance(callee.expr.node, MypyFile): + # Call a module-level function, not a method. + return translate_call(builder, expr, callee) return apply_method_specialization(builder, expr, callee) or translate_method_call( builder, expr, callee ) @@ -336,30 +341,7 @@ def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr # Call a method via the *class* assert isinstance(callee.expr.node, TypeInfo) ir = builder.mapper.type_to_ir[callee.expr.node] - decl = ir.method_decl(callee.name) - args = [] - arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:] - # Add the class argument for class methods in extension classes - if decl.kind == FUNC_CLASSMETHOD and ir.is_ext_class: - args.append(builder.load_native_type_object(callee.expr.node.fullname)) - arg_kinds.insert(0, ARG_POS) - arg_names.insert(0, None) - args += [builder.accept(arg) for arg in expr.args] - - if ir.is_ext_class: - return builder.builder.call(decl, args, arg_kinds, arg_names, expr.line) - else: - obj = builder.accept(callee.expr) - return builder.gen_method_call( - obj, - callee.name, - args, - builder.node_type(expr), - expr.line, - expr.arg_kinds, - expr.arg_names, - ) - + return call_classmethod(builder, ir, expr, callee) elif builder.is_module_member_expr(callee): # Fall back to a PyCall for non-native module calls function = builder.accept(callee) @@ -368,6 +350,17 @@ def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr function, args, expr.line, arg_kinds=expr.arg_kinds, arg_names=expr.arg_names ) else: + if isinstance(callee.expr, RefExpr): + node = callee.expr.node + if isinstance(node, Var) and node.is_cls: + typ = get_proper_type(node.type) + if isinstance(typ, TypeType) and isinstance(typ.item, Instance): + class_ir = builder.mapper.type_to_ir.get(typ.item.type) + if class_ir and class_ir.is_ext_class and class_ir.has_no_subclasses(): + # Call a native classmethod via cls that can be statically bound, + # since the class has no subclasses. + return call_classmethod(builder, class_ir, expr, callee) + receiver_typ = builder.node_type(callee.expr) # If there is a specializer for this method name/type, try calling it. @@ -389,6 +382,32 @@ def translate_method_call(builder: IRBuilder, expr: CallExpr, callee: MemberExpr ) +def call_classmethod(builder: IRBuilder, ir: ClassIR, expr: CallExpr, callee: MemberExpr) -> Value: + decl = ir.method_decl(callee.name) + args = [] + arg_kinds, arg_names = expr.arg_kinds[:], expr.arg_names[:] + # Add the class argument for class methods in extension classes + if decl.kind == FUNC_CLASSMETHOD and ir.is_ext_class: + args.append(builder.load_native_type_object(ir.fullname)) + arg_kinds.insert(0, ARG_POS) + arg_names.insert(0, None) + args += [builder.accept(arg) for arg in expr.args] + + if ir.is_ext_class: + return builder.builder.call(decl, args, arg_kinds, arg_names, expr.line) + else: + obj = builder.accept(callee.expr) + return builder.gen_method_call( + obj, + callee.name, + args, + builder.node_type(expr), + expr.line, + expr.arg_kinds, + expr.arg_names, + ) + + def translate_super_method_call(builder: IRBuilder, expr: CallExpr, callee: SuperExpr) -> Value: if callee.info is None or (len(callee.call.args) != 0 and len(callee.call.args) != 2): return translate_call(builder, expr, callee) @@ -551,6 +570,8 @@ def try_constant_fold(builder: IRBuilder, expr: Expression) -> Value | None: return builder.load_int(value) elif isinstance(value, str): return builder.load_str(value) + elif isinstance(value, float): + return Float(value) return None @@ -689,7 +710,9 @@ def transform_comparison_expr(builder: IRBuilder, e: ComparisonExpr) -> Value: lhs = e.operands[0] mypy_file = builder.graph["builtins"].tree assert mypy_file is not None - bool_type = Instance(cast(TypeInfo, mypy_file.names["bool"].node), []) + info = mypy_file.names["bool"].node + assert isinstance(info, TypeInfo) + bool_type = Instance(info, []) exprs = [] for item in items: expr = ComparisonExpr([cmp_op], [lhs, item]) diff --git a/mypyc/irbuild/function.py b/mypyc/irbuild/function.py index 02155d70e928..ba2e4d2ba10b 100644 --- a/mypyc/irbuild/function.py +++ b/mypyc/irbuild/function.py @@ -643,7 +643,7 @@ def f(builder: IRBuilder, x: object) -> int: ... args = args[: -base_sig.num_bitmap_args] arg_kinds = arg_kinds[: -base_sig.num_bitmap_args] arg_names = arg_names[: -base_sig.num_bitmap_args] - bitmap_args = builder.builder.args[-base_sig.num_bitmap_args :] + bitmap_args = list(builder.builder.args[-base_sig.num_bitmap_args :]) # We can do a passthrough *args/**kwargs with a native call, but if the # args need to get distributed out to arguments, we just let python handle it diff --git a/mypyc/irbuild/ll_builder.py b/mypyc/irbuild/ll_builder.py index 2391ccc4d0ed..6a4599a324d4 100644 --- a/mypyc/irbuild/ll_builder.py +++ b/mypyc/irbuild/ll_builder.py @@ -46,6 +46,10 @@ Cast, ComparisonOp, Extend, + Float, + FloatComparisonOp, + FloatNeg, + FloatOp, GetAttr, GetElementPtr, Goto, @@ -64,9 +68,12 @@ SetMem, Truncate, TupleGet, + TupleSet, Unbox, Unreachable, Value, + float_comparison_op_to_id, + float_op_to_id, int_op_to_id, ) from mypyc.ir.rtypes import ( @@ -96,6 +103,7 @@ is_bytes_rprimitive, is_dict_rprimitive, is_fixed_width_rtype, + is_float_rprimitive, is_int32_rprimitive, is_int64_rprimitive, is_int_rprimitive, @@ -126,6 +134,7 @@ dict_update_in_display_op, ) from mypyc.primitives.exc_ops import err_occurred_op, keep_propagating_op +from mypyc.primitives.float_ops import copysign_op, int_to_float_op from mypyc.primitives.generic_ops import ( generic_len_op, generic_ssize_t_len_op, @@ -340,11 +349,39 @@ def coerce( is_bool_rprimitive(src_type) or is_bit_rprimitive(src_type) ) and is_fixed_width_rtype(target_type): return self.add(Extend(src, target_type, signed=False)) - else: - # To go from one unboxed type to another, we go through a boxed - # in-between value, for simplicity. - tmp = self.box(src) - return self.unbox_or_cast(tmp, target_type, line) + elif isinstance(src, Integer) and is_float_rprimitive(target_type): + if is_tagged(src_type): + return Float(float(src.value // 2)) + return Float(float(src.value)) + elif is_tagged(src_type) and is_float_rprimitive(target_type): + return self.int_to_float(src, line) + elif ( + isinstance(src_type, RTuple) + and isinstance(target_type, RTuple) + and len(src_type.types) == len(target_type.types) + ): + # Coerce between two tuple types by coercing each item separately + values = [] + for i in range(len(src_type.types)): + v = None + if isinstance(src, TupleSet): + item = src.items[i] + # We can't reuse register values, since they can be modified. + if not isinstance(item, Register): + v = item + if v is None: + v = TupleGet(src, i) + self.add(v) + values.append(v) + return self.add( + TupleSet( + [self.coerce(v, t, line) for v, t in zip(values, target_type.types)], line + ) + ) + # To go between any other unboxed types, we go through a boxed + # in-between value, for simplicity. + tmp = self.box(src) + return self.unbox_or_cast(tmp, target_type, line) if (not src_type.is_unboxed and target_type.is_unboxed) or not is_subtype( src_type, target_type ): @@ -1027,6 +1064,8 @@ def native_args_to_positional( elif not lst: if is_fixed_width_rtype(arg.type): output_arg = Integer(0, arg.type) + elif is_float_rprimitive(arg.type): + output_arg = Float(0.0) else: output_arg = self.add(LoadErrorValue(arg.type, is_borrowed=True)) else: @@ -1166,7 +1205,7 @@ def load_int(self, value: int) -> Value: def load_float(self, value: float) -> Value: """Load a float literal value.""" - return self.add(LoadLiteral(value, float_rprimitive)) + return Float(value) def load_str(self, value: str) -> Value: """Load a str literal value. @@ -1328,6 +1367,24 @@ def binary_op(self, lreg: Value, rreg: Value, op: str, line: int) -> Value: if is_tagged(rtype) and is_subtype(ltype, rtype): lreg = self.coerce(lreg, short_int_rprimitive, line) return self.compare_tagged(lreg, rreg, op, line) + if is_float_rprimitive(ltype) or is_float_rprimitive(rtype): + if isinstance(lreg, Integer): + lreg = Float(float(lreg.numeric_value())) + elif isinstance(rreg, Integer): + rreg = Float(float(rreg.numeric_value())) + elif is_int_rprimitive(lreg.type): + lreg = self.int_to_float(lreg, line) + elif is_int_rprimitive(rreg.type): + rreg = self.int_to_float(rreg, line) + if is_float_rprimitive(lreg.type) and is_float_rprimitive(rreg.type): + if op in float_comparison_op_to_id: + return self.compare_floats(lreg, rreg, float_comparison_op_to_id[op], line) + if op.endswith("="): + base_op = op[:-1] + else: + base_op = op + if base_op in float_op_to_id: + return self.float_op(lreg, rreg, base_op, line) call_c_ops_candidates = binary_ops.get(op, []) target = self.matching_call_c(call_c_ops_candidates, [lreg, rreg], line) @@ -1556,6 +1613,12 @@ def unary_op(self, value: Value, expr_op: str, line: int) -> Value: return self.int_op(typ, value, Integer(-1, typ), IntOp.XOR, line) elif expr_op == "+": return value + if is_float_rprimitive(typ): + if expr_op == "-": + return self.add(FloatNeg(value, line)) + elif expr_op == "+": + return value + if isinstance(value, Integer): # TODO: Overflow? Unsigned? num = value.value @@ -1564,6 +1627,8 @@ def unary_op(self, value: Value, expr_op: str, line: int) -> Value: return Integer(-num, typ, value.line) if is_tagged(typ) and expr_op == "+": return value + if isinstance(value, Float): + return Float(-value.value, value.line) if isinstance(typ, RInstance): if expr_op == "-": method = "__neg__" @@ -1713,6 +1778,8 @@ def bool_value(self, value: Value) -> Value: ): # Directly call the __bool__ method on classes that have it. result = self.gen_method_call(value, "__bool__", [], bool_rprimitive, value.line) + elif is_float_rprimitive(value.type): + result = self.compare_floats(value, Float(0.0), FloatComparisonOp.NEQ, value.line) else: value_type = optional_value_type(value.type) if value_type is not None: @@ -1890,6 +1957,62 @@ def int_op(self, type: RType, lhs: Value, rhs: Value, op: int, line: int = -1) - """ return self.add(IntOp(type, lhs, rhs, op, line)) + def float_op(self, lhs: Value, rhs: Value, op: str, line: int) -> Value: + """Generate a native float binary arithmetic operation. + + This follows Python semantics (e.g. raise exception on division by zero). + Add a FloatOp directly if you want low-level semantics. + + Args: + op: Binary operator (e.g. '+' or '*') + """ + op_id = float_op_to_id[op] + if op_id in (FloatOp.DIV, FloatOp.MOD): + if not (isinstance(rhs, Float) and rhs.value != 0.0): + c = self.compare_floats(rhs, Float(0.0), FloatComparisonOp.EQ, line) + err, ok = BasicBlock(), BasicBlock() + self.add(Branch(c, err, ok, Branch.BOOL, rare=True)) + self.activate_block(err) + if op_id == FloatOp.DIV: + msg = "float division by zero" + else: + msg = "float modulo" + self.add(RaiseStandardError(RaiseStandardError.ZERO_DIVISION_ERROR, msg, line)) + self.add(Unreachable()) + self.activate_block(ok) + if op_id == FloatOp.MOD: + # Adjust the result to match Python semantics (FloatOp follows C semantics). + return self.float_mod(lhs, rhs, line) + else: + return self.add(FloatOp(lhs, rhs, op_id, line)) + + def float_mod(self, lhs: Value, rhs: Value, line: int) -> Value: + """Perform x % y on floats using Python semantics.""" + mod = self.add(FloatOp(lhs, rhs, FloatOp.MOD, line)) + res = Register(float_rprimitive) + self.add(Assign(res, mod)) + tricky, adjust, copysign, done = BasicBlock(), BasicBlock(), BasicBlock(), BasicBlock() + is_zero = self.add(FloatComparisonOp(res, Float(0.0), FloatComparisonOp.EQ, line)) + self.add(Branch(is_zero, copysign, tricky, Branch.BOOL)) + self.activate_block(tricky) + same_signs = self.is_same_float_signs(lhs, rhs, line) + self.add(Branch(same_signs, done, adjust, Branch.BOOL)) + self.activate_block(adjust) + adj = self.float_op(res, rhs, "+", line) + self.add(Assign(res, adj)) + self.add(Goto(done)) + self.activate_block(copysign) + # If the remainder is zero, CPython ensures the result has the + # same sign as the denominator. + adj = self.call_c(copysign_op, [Float(0.0), rhs], line) + self.add(Assign(res, adj)) + self.add(Goto(done)) + self.activate_block(done) + return res + + def compare_floats(self, lhs: Value, rhs: Value, op: int, line: int) -> Value: + return self.add(FloatComparisonOp(lhs, rhs, op, line)) + def fixed_width_int_op(self, type: RType, lhs: Value, rhs: Value, op: int, line: int) -> Value: """Generate a binary op using Python fixed-width integer semantics. @@ -1932,13 +2055,12 @@ def inline_fixed_width_divide(self, type: RType, lhs: Value, rhs: Value, line: i res = Register(type) div = self.int_op(type, lhs, rhs, IntOp.DIV, line) self.add(Assign(res, div)) - diff_signs = self.is_different_native_int_signs(type, lhs, rhs, line) + same_signs = self.is_same_native_int_signs(type, lhs, rhs, line) tricky, adjust, done = BasicBlock(), BasicBlock(), BasicBlock() - self.add(Branch(diff_signs, done, tricky, Branch.BOOL)) + self.add(Branch(same_signs, done, tricky, Branch.BOOL)) self.activate_block(tricky) mul = self.int_op(type, res, rhs, IntOp.MUL, line) mul_eq = self.add(ComparisonOp(mul, lhs, ComparisonOp.EQ, line)) - adjust = BasicBlock() self.add(Branch(mul_eq, done, adjust, Branch.BOOL)) self.activate_block(adjust) adj = self.int_op(type, res, Integer(1, type), IntOp.SUB, line) @@ -1952,12 +2074,11 @@ def inline_fixed_width_mod(self, type: RType, lhs: Value, rhs: Value, line: int) res = Register(type) mod = self.int_op(type, lhs, rhs, IntOp.MOD, line) self.add(Assign(res, mod)) - diff_signs = self.is_different_native_int_signs(type, lhs, rhs, line) + same_signs = self.is_same_native_int_signs(type, lhs, rhs, line) tricky, adjust, done = BasicBlock(), BasicBlock(), BasicBlock() - self.add(Branch(diff_signs, done, tricky, Branch.BOOL)) + self.add(Branch(same_signs, done, tricky, Branch.BOOL)) self.activate_block(tricky) is_zero = self.add(ComparisonOp(res, Integer(0, type), ComparisonOp.EQ, line)) - adjust = BasicBlock() self.add(Branch(is_zero, done, adjust, Branch.BOOL)) self.activate_block(adjust) adj = self.int_op(type, res, rhs, IntOp.ADD, line) @@ -1966,11 +2087,16 @@ def inline_fixed_width_mod(self, type: RType, lhs: Value, rhs: Value, line: int) self.activate_block(done) return res - def is_different_native_int_signs(self, type: RType, a: Value, b: Value, line: int) -> Value: + def is_same_native_int_signs(self, type: RType, a: Value, b: Value, line: int) -> Value: neg1 = self.add(ComparisonOp(a, Integer(0, type), ComparisonOp.SLT, line)) neg2 = self.add(ComparisonOp(b, Integer(0, type), ComparisonOp.SLT, line)) return self.add(ComparisonOp(neg1, neg2, ComparisonOp.EQ, line)) + def is_same_float_signs(self, a: Value, b: Value, line: int) -> Value: + neg1 = self.add(FloatComparisonOp(a, Float(0.0), FloatComparisonOp.LT, line)) + neg2 = self.add(FloatComparisonOp(b, Float(0.0), FloatComparisonOp.LT, line)) + return self.add(ComparisonOp(neg1, neg2, ComparisonOp.EQ, line)) + def comparison_op(self, lhs: Value, rhs: Value, op: int, line: int) -> Value: return self.add(ComparisonOp(lhs, rhs, op, line)) @@ -2042,6 +2168,9 @@ def new_tuple_with_length(self, length: Value, line: int) -> Value: """ return self.call_c(new_tuple_with_length_op, [length], line) + def int_to_float(self, n: Value, line: int) -> Value: + return self.call_c(int_to_float_op, [n], line) + # Internal helpers def decompose_union_helper( diff --git a/mypyc/irbuild/specialize.py b/mypyc/irbuild/specialize.py index 8cb24c5b47da..ff9df0cd597b 100644 --- a/mypyc/irbuild/specialize.py +++ b/mypyc/irbuild/specialize.py @@ -55,6 +55,7 @@ is_bool_rprimitive, is_dict_rprimitive, is_fixed_width_rtype, + is_float_rprimitive, is_int32_rprimitive, is_int64_rprimitive, is_int_rprimitive, @@ -728,3 +729,15 @@ def translate_bool(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Value arg = expr.args[0] src = builder.accept(arg) return builder.builder.bool_value(src) + + +@specialize_function("builtins.float") +def translate_float(builder: IRBuilder, expr: CallExpr, callee: RefExpr) -> Optional[Value]: + if len(expr.args) != 1 or expr.arg_kinds[0] != ARG_POS: + return None + arg = expr.args[0] + arg_type = builder.node_type(arg) + if is_float_rprimitive(arg_type): + # No-op float conversion. + return builder.accept(arg) + return None diff --git a/mypyc/lib-rt/CPy.h b/mypyc/lib-rt/CPy.h index 016a6d3ea9e0..e01acec4d2f0 100644 --- a/mypyc/lib-rt/CPy.h +++ b/mypyc/lib-rt/CPy.h @@ -147,9 +147,9 @@ CPyTagged CPyTagged_Lshift(CPyTagged left, CPyTagged right); bool CPyTagged_IsEq_(CPyTagged left, CPyTagged right); bool CPyTagged_IsLt_(CPyTagged left, CPyTagged right); PyObject *CPyTagged_Str(CPyTagged n); +CPyTagged CPyTagged_FromFloat(double f); PyObject *CPyLong_FromStrWithBase(PyObject *o, CPyTagged base); PyObject *CPyLong_FromStr(PyObject *o); -PyObject *CPyLong_FromFloat(PyObject *o); PyObject *CPyBool_Str(bool b); int64_t CPyLong_AsInt64(PyObject *o); int64_t CPyInt64_Divide(int64_t x, int64_t y); @@ -158,6 +158,7 @@ int32_t CPyLong_AsInt32(PyObject *o); int32_t CPyInt32_Divide(int32_t x, int32_t y); int32_t CPyInt32_Remainder(int32_t x, int32_t y); void CPyInt32_Overflow(void); +double CPyTagged_TrueDivide(CPyTagged x, CPyTagged y); static inline int CPyTagged_CheckLong(CPyTagged x) { return x & CPY_INT_TAG; @@ -283,6 +284,24 @@ static inline bool CPyTagged_IsLe(CPyTagged left, CPyTagged right) { } +// Float operations + + +double CPyFloat_FloorDivide(double x, double y); +double CPyFloat_Pow(double x, double y); +double CPyFloat_Sin(double x); +double CPyFloat_Cos(double x); +double CPyFloat_Tan(double x); +double CPyFloat_Sqrt(double x); +double CPyFloat_Exp(double x); +double CPyFloat_Log(double x); +CPyTagged CPyFloat_Floor(double x); +CPyTagged CPyFloat_Ceil(double x); +double CPyFloat_FromTagged(CPyTagged x); +bool CPyFloat_IsInf(double x); +bool CPyFloat_IsNaN(double x); + + // Generic operations (that work with arbitrary types) @@ -452,7 +471,6 @@ PyObject *CPyBytes_Join(PyObject *sep, PyObject *iter); int CPyBytes_Compare(PyObject *left, PyObject *right); - // Set operations diff --git a/mypyc/lib-rt/float_ops.c b/mypyc/lib-rt/float_ops.c new file mode 100644 index 000000000000..d8c6f25955fa --- /dev/null +++ b/mypyc/lib-rt/float_ops.c @@ -0,0 +1,192 @@ +// Float primitive operations +// +// These are registered in mypyc.primitives.float_ops. + +#include +#include "CPy.h" + + +static double CPy_DomainError(void) { + PyErr_SetString(PyExc_ValueError, "math domain error"); + return CPY_FLOAT_ERROR; +} + +static double CPy_MathRangeError(void) { + PyErr_SetString(PyExc_OverflowError, "math range error"); + return CPY_FLOAT_ERROR; +} + +double CPyFloat_FromTagged(CPyTagged x) { + if (CPyTagged_CheckShort(x)) { + return CPyTagged_ShortAsSsize_t(x); + } + double result = PyFloat_AsDouble(CPyTagged_LongAsObject(x)); + if (unlikely(result == -1.0) && PyErr_Occurred()) { + return CPY_FLOAT_ERROR; + } + return result; +} + +double CPyFloat_Sin(double x) { + double v = sin(x); + if (unlikely(isnan(v)) && !isnan(x)) { + return CPy_DomainError(); + } + return v; +} + +double CPyFloat_Cos(double x) { + double v = cos(x); + if (unlikely(isnan(v)) && !isnan(x)) { + return CPy_DomainError(); + } + return v; +} + +double CPyFloat_Tan(double x) { + if (unlikely(isinf(x))) { + return CPy_DomainError(); + } + return tan(x); +} + +double CPyFloat_Sqrt(double x) { + if (x < 0.0) { + return CPy_DomainError(); + } + return sqrt(x); +} + +double CPyFloat_Exp(double x) { + double v = exp(x); + if (unlikely(v == INFINITY) && x != INFINITY) { + return CPy_MathRangeError(); + } + return v; +} + +double CPyFloat_Log(double x) { + if (x <= 0.0) { + return CPy_DomainError(); + } + return log(x); +} + +CPyTagged CPyFloat_Floor(double x) { + double v = floor(x); + return CPyTagged_FromFloat(v); +} + +CPyTagged CPyFloat_Ceil(double x) { + double v = ceil(x); + return CPyTagged_FromFloat(v); +} + +bool CPyFloat_IsInf(double x) { + return isinf(x) != 0; +} + +bool CPyFloat_IsNaN(double x) { + return isnan(x) != 0; +} + +// From CPython 3.10.0, Objects/floatobject.c +static void +_float_div_mod(double vx, double wx, double *floordiv, double *mod) +{ + double div; + *mod = fmod(vx, wx); + /* fmod is typically exact, so vx-mod is *mathematically* an + exact multiple of wx. But this is fp arithmetic, and fp + vx - mod is an approximation; the result is that div may + not be an exact integral value after the division, although + it will always be very close to one. + */ + div = (vx - *mod) / wx; + if (*mod) { + /* ensure the remainder has the same sign as the denominator */ + if ((wx < 0) != (*mod < 0)) { + *mod += wx; + div -= 1.0; + } + } + else { + /* the remainder is zero, and in the presence of signed zeroes + fmod returns different results across platforms; ensure + it has the same sign as the denominator. */ + *mod = copysign(0.0, wx); + } + /* snap quotient to nearest integral value */ + if (div) { + *floordiv = floor(div); + if (div - *floordiv > 0.5) { + *floordiv += 1.0; + } + } + else { + /* div is zero - get the same sign as the true quotient */ + *floordiv = copysign(0.0, vx / wx); /* zero w/ sign of vx/wx */ + } +} + +double CPyFloat_FloorDivide(double x, double y) { + double mod, floordiv; + if (y == 0) { + PyErr_SetString(PyExc_ZeroDivisionError, "float floor division by zero"); + return CPY_FLOAT_ERROR; + } + _float_div_mod(x, y, &floordiv, &mod); + return floordiv; +} + +// Adapted from CPython 3.10.7 +double CPyFloat_Pow(double x, double y) { + if (!isfinite(x) || !isfinite(y)) { + if (isnan(x)) + return y == 0.0 ? 1.0 : x; /* NaN**0 = 1 */ + else if (isnan(y)) + return x == 1.0 ? 1.0 : y; /* 1**NaN = 1 */ + else if (isinf(x)) { + int odd_y = isfinite(y) && fmod(fabs(y), 2.0) == 1.0; + if (y > 0.0) + return odd_y ? x : fabs(x); + else if (y == 0.0) + return 1.0; + else /* y < 0. */ + return odd_y ? copysign(0.0, x) : 0.0; + } + else if (isinf(y)) { + if (fabs(x) == 1.0) + return 1.0; + else if (y > 0.0 && fabs(x) > 1.0) + return y; + else if (y < 0.0 && fabs(x) < 1.0) { + #if PY_VERSION_HEX < 0x030B0000 + if (x == 0.0) { /* 0**-inf: divide-by-zero */ + return CPy_DomainError(); + } + #endif + return -y; /* result is +inf */ + } else + return 0.0; + } + } + double r = pow(x, y); + if (!isfinite(r)) { + if (isnan(r)) { + return CPy_DomainError(); + } + /* + an infinite result here arises either from: + (A) (+/-0.)**negative (-> divide-by-zero) + (B) overflow of x**y with x and y finite + */ + else if (isinf(r)) { + if (x == 0.0) + return CPy_DomainError(); + else + return CPy_MathRangeError(); + } + } + return r; +} diff --git a/mypyc/lib-rt/int_ops.c b/mypyc/lib-rt/int_ops.c index 5ea2f65d5776..843d9b0d2230 100644 --- a/mypyc/lib-rt/int_ops.c +++ b/mypyc/lib-rt/int_ops.c @@ -293,13 +293,14 @@ PyObject *CPyLong_FromStr(PyObject *o) { return CPyLong_FromStrWithBase(o, base); } -PyObject *CPyLong_FromFloat(PyObject *o) { - if (PyLong_Check(o)) { - CPy_INCREF(o); - return o; - } else { - return PyLong_FromDouble(PyFloat_AS_DOUBLE(o)); +CPyTagged CPyTagged_FromFloat(double f) { + if (f < ((double)CPY_TAGGED_MAX + 1.0) && f > (CPY_TAGGED_MIN - 1.0)) { + return (Py_ssize_t)f << 1; } + PyObject *o = PyLong_FromDouble(f); + if (o == NULL) + return CPY_INT_TAG; + return CPyTagged_StealFromObject(o); } PyObject *CPyBool_Str(bool b) { @@ -639,3 +640,22 @@ int32_t CPyInt32_Remainder(int32_t x, int32_t y) { void CPyInt32_Overflow() { PyErr_SetString(PyExc_OverflowError, "int too large to convert to i32"); } + +double CPyTagged_TrueDivide(CPyTagged x, CPyTagged y) { + if (unlikely(y == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "division by zero"); + return CPY_FLOAT_ERROR; + } + if (likely(!CPyTagged_CheckLong(x) && !CPyTagged_CheckLong(y))) { + return (double)((Py_ssize_t)x >> 1) / (double)((Py_ssize_t)y >> 1); + } else { + PyObject *xo = CPyTagged_AsObject(x); + PyObject *yo = CPyTagged_AsObject(y); + PyObject *result = PyNumber_TrueDivide(xo, yo); + if (result == NULL) { + return CPY_FLOAT_ERROR; + } + return PyFloat_AsDouble(result); + } + return 1.0; +} diff --git a/mypyc/lib-rt/mypyc_util.h b/mypyc/lib-rt/mypyc_util.h index 0fae239cbb9e..13672087fbbc 100644 --- a/mypyc/lib-rt/mypyc_util.h +++ b/mypyc/lib-rt/mypyc_util.h @@ -56,6 +56,9 @@ typedef PyObject CPyModule; // Error value for fixed-width (low-level) integers #define CPY_LL_INT_ERROR -113 +// Error value for floats +#define CPY_FLOAT_ERROR -113.0 + typedef void (*CPyVTableItem)(void); static inline CPyTagged CPyTagged_ShortFromInt(int x) { diff --git a/mypyc/lib-rt/setup.py b/mypyc/lib-rt/setup.py index e04d7041ad72..a31b705cd723 100644 --- a/mypyc/lib-rt/setup.py +++ b/mypyc/lib-rt/setup.py @@ -7,12 +7,14 @@ import sys from distutils.core import Extension, setup +from typing import Any +kwargs: dict[str, Any] if sys.platform == "darwin": kwargs = {"language": "c++"} compile_args = [] else: - kwargs = {} # type: ignore + kwargs = {} compile_args = ["--std=c++11"] setup( @@ -21,7 +23,15 @@ ext_modules=[ Extension( "test_capi", - ["test_capi.cc", "init.c", "int_ops.c", "list_ops.c", "exc_ops.c", "generic_ops.c"], + [ + "test_capi.cc", + "init.c", + "int_ops.c", + "float_ops.c", + "list_ops.c", + "exc_ops.c", + "generic_ops.c", + ], depends=["CPy.h", "mypyc_util.h", "pythonsupport.h"], extra_compile_args=["-Wno-unused-function", "-Wno-sign-compare"] + compile_args, library_dirs=["../external/googletest/make"], diff --git a/mypyc/primitives/float_ops.py b/mypyc/primitives/float_ops.py index 535606df6176..14e8d4caf09c 100644 --- a/mypyc/primitives/float_ops.py +++ b/mypyc/primitives/float_ops.py @@ -2,18 +2,41 @@ from __future__ import annotations -from mypyc.ir.ops import ERR_MAGIC -from mypyc.ir.rtypes import float_rprimitive, object_rprimitive, str_rprimitive -from mypyc.primitives.registry import function_op, load_address_op +from mypyc.ir.ops import ERR_MAGIC, ERR_MAGIC_OVERLAPPING, ERR_NEVER +from mypyc.ir.rtypes import ( + bool_rprimitive, + float_rprimitive, + int_rprimitive, + object_rprimitive, + str_rprimitive, +) +from mypyc.primitives.registry import binary_op, function_op, load_address_op # Get the 'builtins.float' type object. load_address_op(name="builtins.float", type=object_rprimitive, src="PyFloat_Type") +binary_op( + name="//", + arg_types=[float_rprimitive, float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_FloorDivide", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# float(int) +int_to_float_op = function_op( + name="builtins.float", + arg_types=[int_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_FromTagged", + error_kind=ERR_MAGIC_OVERLAPPING, +) + # float(str) function_op( name="builtins.float", arg_types=[str_rprimitive], - return_type=float_rprimitive, + return_type=object_rprimitive, c_function_name="PyFloat_FromString", error_kind=ERR_MAGIC, ) @@ -23,6 +46,123 @@ name="builtins.abs", arg_types=[float_rprimitive], return_type=float_rprimitive, - c_function_name="PyNumber_Absolute", + c_function_name="fabs", + error_kind=ERR_NEVER, +) + +# math.sin(float) +function_op( + name="math.sin", + arg_types=[float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_Sin", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# math.cos(float) +function_op( + name="math.cos", + arg_types=[float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_Cos", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# math.tan(float) +function_op( + name="math.tan", + arg_types=[float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_Tan", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# math.sqrt(float) +function_op( + name="math.sqrt", + arg_types=[float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_Sqrt", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# math.exp(float) +function_op( + name="math.exp", + arg_types=[float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_Exp", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# math.log(float) +function_op( + name="math.log", + arg_types=[float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_Log", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# math.floor(float) +function_op( + name="math.floor", + arg_types=[float_rprimitive], + return_type=int_rprimitive, + c_function_name="CPyFloat_Floor", error_kind=ERR_MAGIC, ) + +# math.ceil(float) +function_op( + name="math.ceil", + arg_types=[float_rprimitive], + return_type=int_rprimitive, + c_function_name="CPyFloat_Ceil", + error_kind=ERR_MAGIC, +) + +# math.fabs(float) +function_op( + name="math.fabs", + arg_types=[float_rprimitive], + return_type=float_rprimitive, + c_function_name="fabs", + error_kind=ERR_NEVER, +) + +# math.pow(float, float) +pow_op = function_op( + name="math.pow", + arg_types=[float_rprimitive, float_rprimitive], + return_type=float_rprimitive, + c_function_name="CPyFloat_Pow", + error_kind=ERR_MAGIC_OVERLAPPING, +) + +# math.copysign(float, float) +copysign_op = function_op( + name="math.copysign", + arg_types=[float_rprimitive, float_rprimitive], + return_type=float_rprimitive, + c_function_name="copysign", + error_kind=ERR_NEVER, +) + +# math.isinf(float) +function_op( + name="math.isinf", + arg_types=[float_rprimitive], + return_type=bool_rprimitive, + c_function_name="CPyFloat_IsInf", + error_kind=ERR_NEVER, +) + +# math.isnan(float) +function_op( + name="math.isnan", + arg_types=[float_rprimitive], + return_type=bool_rprimitive, + c_function_name="CPyFloat_IsNaN", + error_kind=ERR_NEVER, +) diff --git a/mypyc/primitives/int_ops.py b/mypyc/primitives/int_ops.py index 7eda9bab7e3c..13dca720eba2 100644 --- a/mypyc/primitives/int_ops.py +++ b/mypyc/primitives/int_ops.py @@ -50,8 +50,8 @@ function_op( name=int_name, arg_types=[float_rprimitive], - return_type=object_rprimitive, - c_function_name="CPyLong_FromFloat", + return_type=int_rprimitive, + c_function_name="CPyTagged_FromFloat", error_kind=ERR_MAGIC, ) @@ -126,6 +126,10 @@ def int_binary_op( int_binary_op(">>", "CPyTagged_Rshift", error_kind=ERR_MAGIC) int_binary_op("<<", "CPyTagged_Lshift", error_kind=ERR_MAGIC) +int_binary_op( + "/", "CPyTagged_TrueDivide", return_type=float_rprimitive, error_kind=ERR_MAGIC_OVERLAPPING +) + # This should work because assignment operators are parsed differently # and the code in irbuild that handles it does the assignment # regardless of whether or not the operator works in place anyway. diff --git a/mypyc/test-data/exceptions.test b/mypyc/test-data/exceptions.test index 187551249676..16bf8ba1eb89 100644 --- a/mypyc/test-data/exceptions.test +++ b/mypyc/test-data/exceptions.test @@ -570,6 +570,34 @@ L0: c.x = r1 return 1 +[case testExceptionWithOverlappingFloatErrorValue] +def f() -> float: + return 0.0 + +def g() -> float: + return f() +[out] +def f(): +L0: + return 0.0 +def g(): + r0 :: float + r1 :: bit + r2 :: object + r3 :: float +L0: + r0 = f() + r1 = r0 == -113.0 + if r1 goto L2 else goto L1 :: bool +L1: + return r0 +L2: + r2 = PyErr_Occurred() + if not is_error(r2) goto L3 (error at g:5) else goto L1 +L3: + r3 = :: float + return r3 + [case testExceptionWithLowLevelIntAttribute] from mypy_extensions import i32, i64 @@ -639,3 +667,47 @@ L5: L6: r6 = :: int64 return r6 + +[case testExceptionWithFloatAttribute] +class C: + def __init__(self, x: float, y: float) -> None: + self.x = x + if x: + self.y = y + +def f(c: C) -> float: + return c.x + c.y +[out] +def C.__init__(self, x, y): + self :: __main__.C + x, y :: float + r0 :: bit +L0: + self.x = x + r0 = x != 0.0 + if r0 goto L1 else goto L2 :: bool +L1: + self.y = y +L2: + return 1 +def f(c): + c :: __main__.C + r0, r1 :: float + r2 :: bit + r3 :: float + r4 :: object + r5 :: float +L0: + r0 = c.x + r1 = c.y + r2 = r1 == -113.0 + if r2 goto L2 else goto L1 :: bool +L1: + r3 = r0 + r1 + return r3 +L2: + r4 = PyErr_Occurred() + if not is_error(r4) goto L3 (error at f:8) else goto L1 +L3: + r5 = :: float + return r5 diff --git a/mypyc/test-data/fixtures/ir.py b/mypyc/test-data/fixtures/ir.py index 27e225f273bc..f6e934ac90bb 100644 --- a/mypyc/test-data/fixtures/ir.py +++ b/mypyc/test-data/fixtures/ir.py @@ -111,14 +111,24 @@ def encode(self, x: str=..., y: str=...) -> bytes: ... class float: def __init__(self, x: object) -> None: pass def __add__(self, n: float) -> float: pass + def __radd__(self, n: float) -> float: pass def __sub__(self, n: float) -> float: pass + def __rsub__(self, n: float) -> float: pass def __mul__(self, n: float) -> float: pass def __truediv__(self, n: float) -> float: pass + def __floordiv__(self, n: float) -> float: pass + def __mod__(self, n: float) -> float: pass def __pow__(self, n: float) -> float: pass def __neg__(self) -> float: pass def __pos__(self) -> float: pass def __abs__(self) -> float: pass def __invert__(self) -> float: pass + def __eq__(self, x: object) -> bool: pass + def __ne__(self, x: object) -> bool: pass + def __lt__(self, x: float) -> bool: ... + def __le__(self, x: float) -> bool: ... + def __gt__(self, x: float) -> bool: ... + def __ge__(self, x: float) -> bool: ... class complex: def __init__(self, x: object, y: object = None) -> None: pass @@ -288,6 +298,7 @@ class ValueError(Exception): pass class AttributeError(Exception): pass class ImportError(Exception): pass class NameError(Exception): pass +class UnboundLocalError(NameError): pass class LookupError(Exception): pass class KeyError(LookupError): pass class IndexError(LookupError): pass diff --git a/mypyc/test-data/fixtures/testutil.py b/mypyc/test-data/fixtures/testutil.py index 7b4fcc9fc1ca..5a4b1d0f549e 100644 --- a/mypyc/test-data/fixtures/testutil.py +++ b/mypyc/test-data/fixtures/testutil.py @@ -2,10 +2,43 @@ from contextlib import contextmanager from collections.abc import Iterator +import math from typing import ( Any, Iterator, TypeVar, Generator, Optional, List, Tuple, Sequence, Union, Callable, Awaitable, ) +from typing_extensions import Final + +FLOAT_MAGIC: Final = -113.0 + +# Various different float values +float_vals = [ + float(n) * 0.25 for n in range(-10, 10) +] + [ + -0.0, + 1.0/3.0, + math.sqrt(2.0), + 1.23e200, + -2.34e200, + 5.43e-100, + -6.532e-200, + float('inf'), + -float('inf'), + float('nan'), + FLOAT_MAGIC, + math.pi, + 2.0 * math.pi, + math.pi / 2.0, + -math.pi / 2.0, + -1.7976931348623158e+308, # Smallest finite value + -2.2250738585072014e-308, # Closest to zero negative normal value + -7.5491e-312, # Arbitrary negative subnormal value + -5e-324, # Closest to zero negative subnormal value + 1.7976931348623158e+308, # Largest finite value + 2.2250738585072014e-308, # Closest to zero positive normal value + -6.3492e-312, # Arbitrary positive subnormal value + 5e-324, # Closest to zero positive subnormal value +] @contextmanager def assertRaises(typ: type, msg: str = '') -> Iterator[None]: @@ -17,6 +50,12 @@ def assertRaises(typ: type, msg: str = '') -> Iterator[None]: else: assert False, f"Expected {typ.__name__} but got no exception" +def assertDomainError() -> Any: + return assertRaises(ValueError, "math domain error") + +def assertMathRangeError() -> Any: + return assertRaises(OverflowError, "math range error") + T = TypeVar('T') U = TypeVar('U') V = TypeVar('V') diff --git a/mypyc/test-data/irbuild-any.test b/mypyc/test-data/irbuild-any.test index 8d4e085179ae..8274e3d5c619 100644 --- a/mypyc/test-data/irbuild-any.test +++ b/mypyc/test-data/irbuild-any.test @@ -187,15 +187,14 @@ def f() -> None: def f(): r0, r1 :: object r2, a :: int - r3, r4, b :: float + r3, b :: float L0: r0 = object 1 r1 = PyNumber_Absolute(r0) r2 = unbox(int, r1) a = r2 - r3 = 1.1 - r4 = PyNumber_Absolute(r3) - b = r4 + r3 = fabs(1.1) + b = r3 return 1 [case testFunctionBasedOps] @@ -228,13 +227,12 @@ L0: def f3(): r0, r1, r2, r3 :: object r4 :: int - r5 :: object + r5 :: float L0: r0 = object 2 r1 = object 5 r2 = object 3 r3 = PyNumber_Power(r0, r1, r2) r4 = unbox(int, r3) - r5 = box(int, r4) + r5 = CPyFloat_FromTagged(r4) return r5 - diff --git a/mypyc/test-data/irbuild-basic.test b/mypyc/test-data/irbuild-basic.test index a06977d037b2..e6426cdeea53 100644 --- a/mypyc/test-data/irbuild-basic.test +++ b/mypyc/test-data/irbuild-basic.test @@ -1016,35 +1016,24 @@ def assign_and_return_float_sum() -> float: return f1 * f2 + f3 [out] def assign_and_return_float_sum(): - r0, f1, r1, f2, r2, f3 :: float - r3 :: object - r4 :: float - r5 :: object - r6 :: float -L0: - r0 = 1.0 - f1 = r0 - r1 = 2.0 - f2 = r1 - r2 = 3.0 - f3 = r2 - r3 = PyNumber_Multiply(f1, f2) - r4 = cast(float, r3) - r5 = PyNumber_Add(r4, f3) - r6 = cast(float, r5) - return r6 + f1, f2, f3, r0, r1 :: float +L0: + f1 = 1.0 + f2 = 2.0 + f3 = 3.0 + r0 = f1 * f2 + r1 = r0 + f3 + return r1 [case testLoadComplex] def load() -> complex: return 5j+1.0 [out] def load(): - r0 :: object - r1 :: float - r2 :: object + r0, r1, r2 :: object L0: r0 = 5j - r1 = 1.0 + r1 = box(float, 1.0) r2 = PyNumber_Add(r0, r1) return r2 @@ -1176,10 +1165,8 @@ L0: r5 = unbox(int, r4) return r5 def return_float(): - r0 :: float L0: - r0 = 5.0 - return r0 + return 5.0 def return_callable_type(): r0 :: dict r1 :: str @@ -1196,7 +1183,7 @@ L0: r0 = return_callable_type() f = r0 r1 = PyObject_CallFunctionObjArgs(f, 0) - r2 = cast(float, r1) + r2 = unbox(float, r1) return r2 [case testCallableTypesWithKeywordArgs] @@ -3573,7 +3560,7 @@ def f() -> None: def f(): i, r0 :: int r1, i__redef__, r2 :: str - r3, i__redef____redef__ :: float + i__redef____redef__ :: float L0: i = 0 r0 = CPyTagged_Add(i, 2) @@ -3582,8 +3569,7 @@ L0: i__redef__ = r1 r2 = CPyStr_Append(i__redef__, i__redef__) i__redef__ = r2 - r3 = 0.0 - i__redef____redef__ = r3 + i__redef____redef__ = 0.0 return 1 [case testNewType] diff --git a/mypyc/test-data/irbuild-classes.test b/mypyc/test-data/irbuild-classes.test index b9501c32180d..0f98fc69e5f3 100644 --- a/mypyc/test-data/irbuild-classes.test +++ b/mypyc/test-data/irbuild-classes.test @@ -656,6 +656,75 @@ L0: r3 = CPyTagged_Add(r0, r2) return r3 +[case testCallClassMethodViaCls] +class C: + @classmethod + def f(cls, x: int) -> int: + return cls.g(x) + + @classmethod + def g(cls, x: int) -> int: + return x + +class D: + @classmethod + def f(cls, x: int) -> int: + # TODO: This could aso be optimized, since g is not ever overridden + return cls.g(x) + + @classmethod + def g(cls, x: int) -> int: + return x + +class DD(D): + pass +[out] +def C.f(cls, x): + cls :: object + x :: int + r0 :: object + r1 :: int +L0: + r0 = __main__.C :: type + r1 = C.g(r0, x) + return r1 +def C.g(cls, x): + cls :: object + x :: int +L0: + return x +def D.f(cls, x): + cls :: object + x :: int + r0 :: str + r1, r2 :: object + r3 :: int +L0: + r0 = 'g' + r1 = box(int, x) + r2 = CPyObject_CallMethodObjArgs(cls, r0, r1, 0) + r3 = unbox(int, r2) + return r3 +def D.g(cls, x): + cls :: object + x :: int +L0: + return x + +[case testCannotAssignToClsArgument] +from typing import Any, cast + +class C: + @classmethod + def m(cls) -> None: + cls = cast(Any, D) # E: Cannot assign to the first argument of classmethod + cls, x = cast(Any, D), 1 # E: Cannot assign to the first argument of classmethod + cls, x = cast(Any, [1, 2]) # E: Cannot assign to the first argument of classmethod + cls.m() + +class D: + pass + [case testSuper1] class A: def __init__(self, x: int) -> None: diff --git a/mypyc/test-data/irbuild-constant-fold.test b/mypyc/test-data/irbuild-constant-fold.test index 7d9127887aa6..866953f0c09a 100644 --- a/mypyc/test-data/irbuild-constant-fold.test +++ b/mypyc/test-data/irbuild-constant-fold.test @@ -140,21 +140,12 @@ L0: rshift_neg = r3 return 1 def unsupported_div(): - r0, r1, r2 :: object - r3, x :: float - r4, r5, r6 :: object - r7, y :: float + r0, x, r1, y :: float L0: - r0 = object 4 - r1 = object 6 - r2 = PyNumber_TrueDivide(r0, r1) - r3 = cast(float, r2) - x = r3 - r4 = object 10 - r5 = object 5 - r6 = PyNumber_TrueDivide(r4, r5) - r7 = cast(float, r6) - y = r7 + r0 = CPyTagged_TrueDivide(8, 12) + x = r0 + r1 = CPyTagged_TrueDivide(20, 10) + y = r1 return 1 def unsupported_pow(): r0, r1, r2 :: object @@ -163,7 +154,7 @@ L0: r0 = object 3 r1 = object -1 r2 = CPyNumber_Power(r0, r1) - r3 = cast(float, r2) + r3 = unbox(float, r2) p = r3 return 1 diff --git a/mypyc/test-data/irbuild-dict.test b/mypyc/test-data/irbuild-dict.test index 99643b9451f0..d1fc4f956ce7 100644 --- a/mypyc/test-data/irbuild-dict.test +++ b/mypyc/test-data/irbuild-dict.test @@ -219,6 +219,12 @@ L0: [case testDictIterationMethods] from typing import Dict, Union +from typing_extensions import TypedDict + +class Person(TypedDict): + name: str + age: int + def print_dict_methods(d1: Dict[int, int], d2: Dict[int, int]) -> None: for v in d1.values(): if v in d2: @@ -229,6 +235,10 @@ def union_of_dicts(d: Union[Dict[str, int], Dict[str, str]]) -> None: new = {} for k, v in d.items(): new[k] = int(v) +def typeddict(d: Person) -> None: + for k, v in d.items(): + if k == "name": + name = v [out] def print_dict_methods(d1, d2): d1, d2 :: dict @@ -370,6 +380,65 @@ L4: r19 = CPy_NoErrOccured() L5: return 1 +def typeddict(d): + d :: dict + r0 :: short_int + r1 :: native_int + r2 :: short_int + r3 :: object + r4 :: tuple[bool, short_int, object, object] + r5 :: short_int + r6 :: bool + r7, r8 :: object + r9, k :: str + v :: object + r10 :: str + r11 :: int32 + r12 :: bit + r13 :: object + r14, r15, r16 :: bit + name :: object + r17, r18 :: bit +L0: + r0 = 0 + r1 = PyDict_Size(d) + r2 = r1 << 1 + r3 = CPyDict_GetItemsIter(d) +L1: + r4 = CPyDict_NextItem(r3, r0) + r5 = r4[1] + r0 = r5 + r6 = r4[0] + if r6 goto L2 else goto L9 :: bool +L2: + r7 = r4[2] + r8 = r4[3] + r9 = cast(str, r7) + k = r9 + v = r8 + r10 = 'name' + r11 = PyUnicode_Compare(k, r10) + r12 = r11 == -1 + if r12 goto L3 else goto L5 :: bool +L3: + r13 = PyErr_Occurred() + r14 = r13 != 0 + if r14 goto L4 else goto L5 :: bool +L4: + r15 = CPy_KeepPropagating() +L5: + r16 = r11 == 0 + if r16 goto L6 else goto L7 :: bool +L6: + name = v +L7: +L8: + r17 = CPyDict_CheckSize(d, r2) + goto L1 +L9: + r18 = CPy_NoErrOccured() +L10: + return 1 [case testDictLoadAddress] def f() -> None: diff --git a/mypyc/test-data/irbuild-dunders.test b/mypyc/test-data/irbuild-dunders.test index 82f04dcdf687..3c140d927c0f 100644 --- a/mypyc/test-data/irbuild-dunders.test +++ b/mypyc/test-data/irbuild-dunders.test @@ -184,10 +184,8 @@ L0: return 6 def C.__float__(self): self :: __main__.C - r0 :: float L0: - r0 = 4.0 - return r0 + return 4.0 def C.__pos__(self): self :: __main__.C L0: @@ -223,4 +221,3 @@ L0: r6 = c.__bool__() r7 = c.__complex__() return 1 - diff --git a/mypyc/test-data/irbuild-float.test b/mypyc/test-data/irbuild-float.test new file mode 100644 index 000000000000..e3a60852574b --- /dev/null +++ b/mypyc/test-data/irbuild-float.test @@ -0,0 +1,497 @@ +[case testFloatAdd] +def f(x: float, y: float) -> float: + return x + y +def g(x: float) -> float: + z = x - 1.5 + return 2.5 * z +[out] +def f(x, y): + x, y, r0 :: float +L0: + r0 = x + y + return r0 +def g(x): + x, r0, z, r1 :: float +L0: + r0 = x - 1.5 + z = r0 + r1 = 2.5 * z + return r1 + +[case testFloatBoxAndUnbox] +from typing import Any +def f(x: float) -> object: + return x +def g(x: Any) -> float: + return x +[out] +def f(x): + x :: float + r0 :: object +L0: + r0 = box(float, x) + return r0 +def g(x): + x :: object + r0 :: float +L0: + r0 = unbox(float, x) + return r0 + +[case testFloatNegAndPos] +def f(x: float) -> float: + y = +x * -0.5 + return -y +[out] +def f(x): + x, r0, y, r1 :: float +L0: + r0 = x * -0.5 + y = r0 + r1 = -y + return r1 + +[case testFloatCoerceFromInt] +def from_int(x: int) -> float: + return x + +def from_literal() -> float: + return 5 + +def from_literal_neg() -> float: + return -2 +[out] +def from_int(x): + x :: int + r0 :: float +L0: + r0 = CPyFloat_FromTagged(x) + return r0 +def from_literal(): +L0: + return 5.0 +def from_literal_neg(): +L0: + return -2.0 + +[case testConvertBetweenFloatAndInt] +def to_int(x: float) -> int: + return int(x) +def from_int(x: int) -> float: + return float(x) +[out] +def to_int(x): + x :: float + r0 :: int +L0: + r0 = CPyTagged_FromFloat(x) + return r0 +def from_int(x): + x :: int + r0 :: float +L0: + r0 = CPyFloat_FromTagged(x) + return r0 + +[case testFloatOperatorAssignment] +def f(x: float, y: float) -> float: + x += y + x -= 5.0 + return x +[out] +def f(x, y): + x, y, r0, r1 :: float +L0: + r0 = x + y + x = r0 + r1 = x - 5.0 + x = r1 + return x + +[case testFloatOperatorAssignmentWithInt] +def f(x: float, y: int) -> None: + x += y + x -= 5 +[out] +def f(x, y): + x :: float + y :: int + r0, r1, r2 :: float +L0: + r0 = CPyFloat_FromTagged(y) + r1 = x + r0 + x = r1 + r2 = x - 5.0 + x = r2 + return 1 + +[case testFloatComparison] +def lt(x: float, y: float) -> bool: + return x < y +def eq(x: float, y: float) -> bool: + return x == y +[out] +def lt(x, y): + x, y :: float + r0 :: bit +L0: + r0 = x < y + return r0 +def eq(x, y): + x, y :: float + r0 :: bit +L0: + r0 = x == y + return r0 + +[case testFloatOpWithLiteralInt] +def f(x: float) -> None: + y = x * 2 + z = 1 - y + b = z < 3 + c = 0 == z +[out] +def f(x): + x, r0, y, r1, z :: float + r2 :: bit + b :: bool + r3 :: bit + c :: bool +L0: + r0 = x * 2.0 + y = r0 + r1 = 1.0 - y + z = r1 + r2 = z < 3.0 + b = r2 + r3 = 0.0 == z + c = r3 + return 1 + +[case testFloatCallFunctionWithLiteralInt] +def f(x: float) -> None: pass + +def g() -> None: + f(3) + f(-2) +[out] +def f(x): + x :: float +L0: + return 1 +def g(): + r0, r1 :: None +L0: + r0 = f(3.0) + r1 = f(-2.0) + return 1 + +[case testFloatAsBool] +def f(x: float) -> int: + if x: + return 2 + else: + return 5 +[out] +def f(x): + x :: float + r0 :: bit +L0: + r0 = x != 0.0 + if r0 goto L1 else goto L2 :: bool +L1: + return 4 +L2: + return 10 +L3: + unreachable + +[case testCallSqrtViaMathModule] +import math + +def f(x: float) -> float: + return math.sqrt(x) +[out] +def f(x): + x, r0 :: float +L0: + r0 = CPyFloat_Sqrt(x) + return r0 + +[case testFloatFinalConstant] +from typing_extensions import Final + +X: Final = 123.0 +Y: Final = -1.0 + +def f() -> float: + a = X + return a + Y +[out] +def f(): + a, r0 :: float +L0: + a = 123.0 + r0 = a + -1.0 + return r0 + +[case testFloatDefaultArg] +def f(x: float = 1.5) -> float: + return x +[out] +def f(x, __bitmap): + x :: float + __bitmap, r0 :: uint32 + r1 :: bit +L0: + r0 = __bitmap & 1 + r1 = r0 == 0 + if r1 goto L1 else goto L2 :: bool +L1: + x = 1.5 +L2: + return x + +[case testFloatMixedOperations] +def f(x: float, y: int) -> None: + if x < y: + z = x + y + x -= y + z = y + z + if y == x: + x -= 1 +[out] +def f(x, y): + x :: float + y :: int + r0 :: float + r1 :: bit + r2, r3, z, r4, r5, r6, r7, r8 :: float + r9 :: bit + r10 :: float +L0: + r0 = CPyFloat_FromTagged(y) + r1 = x < r0 + if r1 goto L1 else goto L2 :: bool +L1: + r2 = CPyFloat_FromTagged(y) + r3 = x + r2 + z = r3 + r4 = CPyFloat_FromTagged(y) + r5 = x - r4 + x = r5 + r6 = CPyFloat_FromTagged(y) + r7 = r6 + z + z = r7 +L2: + r8 = CPyFloat_FromTagged(y) + r9 = r8 == x + if r9 goto L3 else goto L4 :: bool +L3: + r10 = x - 1.0 + x = r10 +L4: + return 1 + +[case testFloatDivideSimple] +def f(x: float, y: float) -> float: + z = x / y + z = z / 2.0 + return z / 3 +[out] +def f(x, y): + x, y :: float + r0 :: bit + r1 :: bool + r2, z, r3, r4 :: float +L0: + r0 = y == 0.0 + if r0 goto L1 else goto L2 :: bool +L1: + r1 = raise ZeroDivisionError('float division by zero') + unreachable +L2: + r2 = x / y + z = r2 + r3 = z / 2.0 + z = r3 + r4 = z / 3.0 + return r4 + +[case testFloatDivideIntOperand] +def f(n: int, m: int) -> float: + return n / m +[out] +def f(n, m): + n, m :: int + r0 :: float +L0: + r0 = CPyTagged_TrueDivide(n, m) + return r0 + +[case testFloatResultOfIntDivide] +def f(f: float, n: int) -> float: + x = f / n + return n / x +[out] +def f(f, n): + f :: float + n :: int + r0 :: float + r1 :: bit + r2 :: bool + r3, x, r4 :: float + r5 :: bit + r6 :: bool + r7 :: float +L0: + r0 = CPyFloat_FromTagged(n) + r1 = r0 == 0.0 + if r1 goto L1 else goto L2 :: bool +L1: + r2 = raise ZeroDivisionError('float division by zero') + unreachable +L2: + r3 = f / r0 + x = r3 + r4 = CPyFloat_FromTagged(n) + r5 = x == 0.0 + if r5 goto L3 else goto L4 :: bool +L3: + r6 = raise ZeroDivisionError('float division by zero') + unreachable +L4: + r7 = r4 / x + return r7 + +[case testFloatExplicitConversions] +def f(f: float, n: int) -> int: + x = float(n) + y = float(x) # no-op + return int(y) +[out] +def f(f, n): + f :: float + n :: int + r0, x, y :: float + r1 :: int +L0: + r0 = CPyFloat_FromTagged(n) + x = r0 + y = x + r1 = CPyTagged_FromFloat(y) + return r1 + +[case testFloatModulo] +def f(x: float, y: float) -> float: + return x % y +[out] +def f(x, y): + x, y :: float + r0 :: bit + r1 :: bool + r2, r3 :: float + r4, r5, r6, r7 :: bit + r8, r9 :: float +L0: + r0 = y == 0.0 + if r0 goto L1 else goto L2 :: bool +L1: + r1 = raise ZeroDivisionError('float modulo') + unreachable +L2: + r2 = x % y + r3 = r2 + r4 = r3 == 0.0 + if r4 goto L5 else goto L3 :: bool +L3: + r5 = x < 0.0 + r6 = y < 0.0 + r7 = r5 == r6 + if r7 goto L6 else goto L4 :: bool +L4: + r8 = r3 + y + r3 = r8 + goto L6 +L5: + r9 = copysign(0.0, y) + r3 = r9 +L6: + return r3 + +[case testFloatFloorDivide] +def f(x: float, y: float) -> float: + return x // y +def g(x: float, y: int) -> float: + return x // y +[out] +def f(x, y): + x, y, r0 :: float +L0: + r0 = CPyFloat_FloorDivide(x, y) + return r0 +def g(x, y): + x :: float + y :: int + r0, r1 :: float +L0: + r0 = CPyFloat_FromTagged(y) + r1 = CPyFloat_FloorDivide(x, r0) + return r1 + +[case testFloatNarrowToIntDisallowed] +class C: + x: float + +def narrow_local(x: float, n: int) -> int: + x = n # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + return x + +def narrow_tuple_lvalue(x: float, y: float, n: int) -> int: + x, y = 1.0, n # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + return y + +def narrow_multiple_lvalues(x: float, y: float, n: int) -> int: + x = a = n # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + a = y = n # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + return x + y + +def narrow_attribute(c: C, n: int) -> int: + c.x = n # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + return c.x + +def narrow_using_int_literal(x: float) -> int: + x = 1 # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + return x + +def narrow_using_declaration(n: int) -> int: + x: float + x = n # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + return x + +[case testFloatInitializeFromInt] +def init(n: int) -> None: + # These are strictly speaking safe, since these don't narrow, but for consistency with + # narrowing assignments, generate errors here + x: float = n # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + y: float = 5 # E: Incompatible value representations in assignment (expression has type "int", variable has type "float") + +[case testFloatCoerceTupleFromIntValues] +from __future__ import annotations + +def f(x: int) -> None: + t: tuple[float, float, float] = (x, 2.5, -7) +[out] +def f(x): + x :: int + r0 :: tuple[int, float, int] + r1 :: int + r2 :: float + r3, t :: tuple[float, float, float] +L0: + r0 = (x, 2.5, -14) + r1 = r0[0] + r2 = CPyFloat_FromTagged(r1) + r3 = (r2, 2.5, -7.0) + t = r3 + return 1 diff --git a/mypyc/test-data/irbuild-i32.test b/mypyc/test-data/irbuild-i32.test index 7ea3c0864728..725e183657b1 100644 --- a/mypyc/test-data/irbuild-i32.test +++ b/mypyc/test-data/irbuild-i32.test @@ -481,7 +481,7 @@ L0: z = -3 return 1 -[case testI32ExplicitConversionFromVariousTypes] +[case testI32ExplicitConversionFromVariousTypes_64bit] from mypy_extensions import i32 def bool_to_i32(b: bool) -> i32: @@ -526,9 +526,62 @@ L0: return r0 def float_to_i32(x): x :: float - r0 :: object - r1 :: int32 + r0 :: int + r1 :: native_int + r2, r3, r4 :: bit + r5 :: native_int + r6, r7 :: int32 L0: - r0 = CPyLong_FromFloat(x) - r1 = unbox(int32, r0) - return r1 + r0 = CPyTagged_FromFloat(x) + r1 = r0 & 1 + r2 = r1 == 0 + if r2 goto L1 else goto L4 :: bool +L1: + r3 = r0 < 4294967296 :: signed + if r3 goto L2 else goto L4 :: bool +L2: + r4 = r0 >= -4294967296 :: signed + if r4 goto L3 else goto L4 :: bool +L3: + r5 = r0 >> 1 + r6 = truncate r5: native_int to int32 + r7 = r6 + goto L5 +L4: + CPyInt32_Overflow() + unreachable +L5: + return r7 + +[case testI32ExplicitConversionFromFloat_32bit] +from mypy_extensions import i32 + +def float_to_i32(x: float) -> i32: + return i32(x) +[out] +def float_to_i32(x): + x :: float + r0 :: int + r1 :: native_int + r2 :: bit + r3, r4 :: int32 + r5 :: ptr + r6 :: c_ptr + r7 :: int32 +L0: + r0 = CPyTagged_FromFloat(x) + r1 = r0 & 1 + r2 = r1 == 0 + if r2 goto L1 else goto L2 :: bool +L1: + r3 = r0 >> 1 + r4 = r3 + goto L3 +L2: + r5 = r0 ^ 1 + r6 = r5 + r7 = CPyLong_AsInt32(r6) + r4 = r7 + keep_alive r0 +L3: + return r4 diff --git a/mypyc/test-data/irbuild-i64.test b/mypyc/test-data/irbuild-i64.test index f616893d8fe5..a18171c41d57 100644 --- a/mypyc/test-data/irbuild-i64.test +++ b/mypyc/test-data/irbuild-i64.test @@ -650,7 +650,6 @@ def f(x: i64, y: i64) -> Tuple[i64, i64]: return x, y def g() -> Tuple[i64, i64]: - # TODO: Avoid boxing and unboxing return 1, 2 def h() -> i64: @@ -666,13 +665,11 @@ L0: return r0 def g(): r0 :: tuple[int, int] - r1 :: object - r2 :: tuple[int64, int64] + r1 :: tuple[int64, int64] L0: r0 = (2, 4) - r1 = box(tuple[int, int], r0) - r2 = unbox(tuple[int64, int64], r1) - return r2 + r1 = (1, 2) + return r1 def h(): r0 :: tuple[int64, int64] r1, x, r2, y :: int64 @@ -1844,7 +1841,7 @@ L2: L3: return r4 -[case testI64ExplicitConversionFromVariousTypes] +[case testI64ExplicitConversionFromVariousTypes_64bit] from mypy_extensions import i64 def bool_to_i64(b: bool) -> i64: @@ -1900,12 +1897,123 @@ L0: return r0 def float_to_i64(x): x :: float - r0 :: object - r1 :: int64 + r0 :: int + r1 :: native_int + r2 :: bit + r3, r4 :: int64 + r5 :: ptr + r6 :: c_ptr + r7 :: int64 L0: - r0 = CPyLong_FromFloat(x) - r1 = unbox(int64, r0) - return r1 + r0 = CPyTagged_FromFloat(x) + r1 = r0 & 1 + r2 = r1 == 0 + if r2 goto L1 else goto L2 :: bool +L1: + r3 = r0 >> 1 + r4 = r3 + goto L3 +L2: + r5 = r0 ^ 1 + r6 = r5 + r7 = CPyLong_AsInt64(r6) + r4 = r7 + keep_alive r0 +L3: + return r4 + +[case testI64ExplicitConversionFromFloat_32bit] +from mypy_extensions import i64 + +def float_to_i64(x: float) -> i64: + return i64(x) +[out] +def float_to_i64(x): + x :: float + r0 :: int + r1 :: native_int + r2 :: bit + r3, r4, r5 :: int64 + r6 :: ptr + r7 :: c_ptr + r8 :: int64 +L0: + r0 = CPyTagged_FromFloat(x) + r1 = r0 & 1 + r2 = r1 == 0 + if r2 goto L1 else goto L2 :: bool +L1: + r3 = extend signed r0: builtins.int to int64 + r4 = r3 >> 1 + r5 = r4 + goto L3 +L2: + r6 = r0 ^ 1 + r7 = r6 + r8 = CPyLong_AsInt64(r7) + r5 = r8 + keep_alive r0 +L3: + return r5 + +[case testI64ConvertToFloat_64bit] +from mypy_extensions import i64 + +def i64_to_float(x: i64) -> float: + return float(x) +[out] +def i64_to_float(x): + x :: int64 + r0, r1 :: bit + r2, r3, r4 :: int + r5 :: float +L0: + r0 = x <= 4611686018427387903 :: signed + if r0 goto L1 else goto L2 :: bool +L1: + r1 = x >= -4611686018427387904 :: signed + if r1 goto L3 else goto L2 :: bool +L2: + r2 = CPyTagged_FromInt64(x) + r3 = r2 + goto L4 +L3: + r4 = x << 1 + r3 = r4 +L4: + r5 = CPyFloat_FromTagged(r3) + return r5 + +[case testI64ConvertToFloat_32bit] +from mypy_extensions import i64 + +def i64_to_float(x: i64) -> float: + return float(x) +[out] +def i64_to_float(x): + x :: int64 + r0, r1 :: bit + r2, r3 :: int + r4 :: native_int + r5 :: int + r6 :: float +L0: + r0 = x <= 1073741823 :: signed + if r0 goto L1 else goto L2 :: bool +L1: + r1 = x >= -1073741824 :: signed + if r1 goto L3 else goto L2 :: bool +L2: + r2 = CPyTagged_FromInt64(x) + r3 = r2 + goto L4 +L3: + r4 = truncate x: int64 to native_int + r5 = r4 << 1 + r3 = r5 +L4: + r6 = CPyFloat_FromTagged(r3) + return r6 [case testI64IsinstanceNarrowing] from typing import Union @@ -1970,3 +2078,78 @@ L2: r6 = r5.a keep_alive x return r6 + +[case testI64ConvertBetweenTuples_64bit] +from __future__ import annotations +from mypy_extensions import i64 + +def f(t: tuple[int, i64, int]) -> None: + tt: tuple[int, i64, i64] = t + +def g(n: int) -> None: + t: tuple[i64, i64] = (1, n) +[out] +def f(t): + t :: tuple[int, int64, int] + r0 :: int + r1 :: int64 + r2 :: int + r3 :: native_int + r4 :: bit + r5, r6 :: int64 + r7 :: ptr + r8 :: c_ptr + r9 :: int64 + r10, tt :: tuple[int, int64, int64] +L0: + r0 = t[0] + r1 = t[1] + r2 = t[2] + r3 = r2 & 1 + r4 = r3 == 0 + if r4 goto L1 else goto L2 :: bool +L1: + r5 = r2 >> 1 + r6 = r5 + goto L3 +L2: + r7 = r2 ^ 1 + r8 = r7 + r9 = CPyLong_AsInt64(r8) + r6 = r9 + keep_alive r2 +L3: + r10 = (r0, r1, r6) + tt = r10 + return 1 +def g(n): + n :: int + r0 :: tuple[int, int] + r1 :: int + r2 :: native_int + r3 :: bit + r4, r5 :: int64 + r6 :: ptr + r7 :: c_ptr + r8 :: int64 + r9, t :: tuple[int64, int64] +L0: + r0 = (2, n) + r1 = r0[1] + r2 = r1 & 1 + r3 = r2 == 0 + if r3 goto L1 else goto L2 :: bool +L1: + r4 = r1 >> 1 + r5 = r4 + goto L3 +L2: + r6 = r1 ^ 1 + r7 = r6 + r8 = CPyLong_AsInt64(r7) + r5 = r8 + keep_alive r1 +L3: + r9 = (1, r5) + t = r9 + return 1 diff --git a/mypyc/test-data/irbuild-lists.test b/mypyc/test-data/irbuild-lists.test index cb9687a2f942..eaeff9432446 100644 --- a/mypyc/test-data/irbuild-lists.test +++ b/mypyc/test-data/irbuild-lists.test @@ -84,6 +84,22 @@ L0: x = r0 return 1 +[case testNewListEmptyViaAlias] +from typing import List + +ListAlias = list + +def f() -> None: + x: List[int] = ListAlias() + +[out] +def f(): + r0, x :: list +L0: + r0 = PyList_New(0) + x = r0 + return 1 + [case testNewListTwoItems] from typing import List def f() -> None: diff --git a/mypyc/test-data/run-classes.test b/mypyc/test-data/run-classes.test index 92ec3873bf38..268e07f6bde4 100644 --- a/mypyc/test-data/run-classes.test +++ b/mypyc/test-data/run-classes.test @@ -662,42 +662,107 @@ Traceback (most recent call last): AttributeError: attribute 'x' of 'X' undefined [case testClassMethods] -MYPY = False -if MYPY: - from typing import ClassVar +from typing import ClassVar, Any +from typing_extensions import final +from mypy_extensions import mypyc_attr + +from interp import make_interpreted_subclass + class C: - lurr: 'ClassVar[int]' = 9 + lurr: ClassVar[int] = 9 @staticmethod - def foo(x: int) -> int: return 10 + x + def foo(x: int) -> int: + return 10 + x @classmethod - def bar(cls, x: int) -> int: return cls.lurr + x + def bar(cls, x: int) -> int: + return cls.lurr + x @staticmethod - def baz(x: int, y: int = 10) -> int: return y - x + def baz(x: int, y: int = 10) -> int: + return y - x @classmethod - def quux(cls, x: int, y: int = 10) -> int: return y - x + def quux(cls, x: int, y: int = 10) -> int: + return y - x + @classmethod + def call_other(cls, x: int) -> int: + return cls.quux(x, 3) class D(C): def f(self) -> int: return super().foo(1) + super().bar(2) + super().baz(10) + super().quux(10) -def test1() -> int: +def ctest1() -> int: return C.foo(1) + C.bar(2) + C.baz(10) + C.quux(10) + C.quux(y=10, x=9) -def test2() -> int: + +def ctest2() -> int: c = C() return c.foo(1) + c.bar(2) + c.baz(10) -[file driver.py] -from native import * -assert C.foo(10) == 20 -assert C.bar(10) == 19 -c = C() -assert c.foo(10) == 20 -assert c.bar(10) == 19 -assert test1() == 23 -assert test2() == 22 +CAny: Any = C + +def test_classmethod_using_any() -> None: + assert CAny.foo(10) == 20 + assert CAny.bar(10) == 19 + +def test_classmethod_on_instance() -> None: + c = C() + assert c.foo(10) == 20 + assert c.bar(10) == 19 + assert c.call_other(1) == 2 + +def test_classmethod_misc() -> None: + assert ctest1() == 23 + assert ctest2() == 22 + assert C.call_other(2) == 1 + +def test_classmethod_using_super() -> None: + d = D() + assert d.f() == 22 -d = D() -assert d.f() == 22 +@final +class F1: + @classmethod + def f(cls, x: int) -> int: + return cls.g(x) + + @classmethod + def g(cls, x: int) -> int: + return x + 1 + +class F2: # Implicitly final (no subclasses) + @classmethod + def f(cls, x: int) -> int: + return cls.g(x) + + @classmethod + def g(cls, x: int) -> int: + return x + 1 + +def test_classmethod_of_final_class() -> None: + assert F1.f(5) == 6 + assert F2.f(7) == 8 + +@mypyc_attr(allow_interpreted_subclasses=True) +class CI: + @classmethod + def f(cls, x: int) -> int: + return cls.g(x) + + @classmethod + def g(cls, x: int) -> int: + return x + 1 + +def test_classmethod_with_allow_interpreted() -> None: + assert CI.f(4) == 5 + sub = make_interpreted_subclass(CI) + assert sub.f(4) == 7 + +[file interp.py] +def make_interpreted_subclass(base): + class Sub(base): + @classmethod + def g(cls, x: int) -> int: + return x + 3 + return Sub [case testSuper] from mypy_extensions import trait diff --git a/mypyc/test-data/run-dicts.test b/mypyc/test-data/run-dicts.test index 41675e7fcc91..58b862e3f303 100644 --- a/mypyc/test-data/run-dicts.test +++ b/mypyc/test-data/run-dicts.test @@ -95,7 +95,13 @@ assert get_content_set(od) == ({1, 3}, {2, 4}, {(1, 2), (3, 4)}) [typing fixtures/typing-full.pyi] [case testDictIterationMethodsRun] -from typing import Dict +from typing import Dict, Union +from typing_extensions import TypedDict + +class ExtensionDict(TypedDict): + python: str + c: str + def print_dict_methods(d1: Dict[int, int], d2: Dict[int, int], d3: Dict[int, int]) -> None: @@ -107,13 +113,27 @@ def print_dict_methods(d1: Dict[int, int], for v in d3.values(): print(v) +def print_dict_methods_special(d1: Union[Dict[int, int], Dict[str, str]], + d2: ExtensionDict) -> None: + for k in d1.keys(): + print(k) + for k, v in d1.items(): + print(k) + print(v) + for v2 in d2.values(): + print(v2) + for k2, v2 in d2.items(): + print(k2) + print(v2) + + def clear_during_iter(d: Dict[int, int]) -> None: for k in d: d.clear() class Custom(Dict[int, int]): pass [file driver.py] -from native import print_dict_methods, Custom, clear_during_iter +from native import print_dict_methods, print_dict_methods_special, Custom, clear_during_iter from collections import OrderedDict print_dict_methods({}, {}, {}) print_dict_methods({1: 2}, {3: 4, 5: 6}, {7: 8}) @@ -124,6 +144,7 @@ print('==') d = OrderedDict([(1, 2), (3, 4)]) print_dict_methods(d, d, d) print('==') +print_dict_methods_special({1: 2}, {"python": ".py", "c": ".c"}) d.move_to_end(1) print_dict_methods(d, d, d) clear_during_iter({}) # OK @@ -185,6 +206,15 @@ else: 2 4 == +1 +1 +2 +.py +.c +python +.py +c +.c 3 1 3 diff --git a/mypyc/test-data/run-floats.test b/mypyc/test-data/run-floats.test index 1b67a1190cd8..2c101100549d 100644 --- a/mypyc/test-data/run-floats.test +++ b/mypyc/test-data/run-floats.test @@ -1,30 +1,516 @@ # Test cases for floats (compile and run) -[case testStrToFloat] +[case testFloatOps] +from __future__ import annotations +from typing import Any, cast +from typing_extensions import Final +from testutil import assertRaises, float_vals, FLOAT_MAGIC +import math + +def test_arithmetic() -> None: + zero = float(0.0) + one = zero + 1.0 + x = one + one / 2.0 + assert x == 1.5 + assert x - one == 0.5 + assert x * x == 2.25 + assert x / 2.0 == 0.75 + assert x * (-0.5) == -0.75 + assert -x == -1.5 + for x in float_vals: + assert repr(-x) == repr(getattr(x, "__neg__")()) + + for y in float_vals: + assert repr(x + y) == repr(getattr(x, "__add__")(y)) + assert repr(x - y) == repr(getattr(x, "__sub__")(y)) + assert repr(x * y) == repr(getattr(x, "__mul__")(y)) + if y != 0: + assert repr(x / y) == repr(getattr(x, "__truediv__")(y)) + +def test_mod() -> None: + zero = float(0.0) + one = zero + 1.0 + x = one + one / 2.0 + assert x % 0.4 == 0.29999999999999993 + assert (-x) % 0.4 == 0.10000000000000009 + assert x % -0.4 == -0.10000000000000009 + assert (-x) % -0.4 == -0.29999999999999993 + for x in float_vals: + for y in float_vals: + if y != 0: + assert repr(x % y) == repr(getattr(x, "__mod__")(y)) + +def test_floor_div() -> None: + for x in float_vals: + for y in float_vals: + if y != 0: + assert repr(x // y) == repr(getattr(x, "__floordiv__")(y)) + else: + with assertRaises(ZeroDivisionError, "float floor division by zero"): + x // y + +def test_mixed_arithmetic() -> None: + zf = float(0.0) + zn = int() + assert (zf + 5.5) + (zn + 1) == 6.5 + assert (zn - 2) - (zf - 5.5) == 3.5 + x = zf + 3.4 + x += zn + 2 + assert x == 5.4 + +def test_arithmetic_errors() -> None: + zero = float(0.0) + one = zero + 1.0 + with assertRaises(ZeroDivisionError, "float division by zero"): + print(one / zero) + with assertRaises(ZeroDivisionError, "float modulo"): + print(one % zero) + +def test_comparisons() -> None: + zero = float(0.0) + one = zero + 1.0 + x = one + one / 2.0 + assert x < (1.51 + zero) + assert not (x < (1.49 + zero)) + assert x > (1.49 + zero) + assert not (x > (1.51 + zero)) + assert x <= (1.5 + zero) + assert not (x <= (1.49 + zero)) + assert x >= (1.5 + zero) + assert not (x >= (1.51 + zero)) + for x in float_vals: + for y in float_vals: + assert (x <= y) == getattr(x, "__le__")(y) + assert (x < y) == getattr(x, "__lt__")(y) + assert (x >= y) == getattr(x, "__ge__")(y) + assert (x > y) == getattr(x, "__gt__")(y) + assert (x == y) == getattr(x, "__eq__")(y) + assert (x != y) == getattr(x, "__ne__")(y) + +def test_mixed_comparisons() -> None: + zf = float(0.0) + zn = int() + if (zf + 1.0) == (zn + 1): + assert True + else: + assert False + if (zf + 1.1) == (zn + 1): + assert False + else: + assert True + assert (zf + 1.1) != (zn + 1) + assert (zf + 1.1) > (zn + 1) + assert not (zf + 0.9) > (zn + 1) + assert (zn + 1) < (zf + 1.1) + +def test_boxing_and_unboxing() -> None: + x = 1.5 + boxed: Any = x + assert repr(boxed) == "1.5" + assert type(boxed) is float + y: float = boxed + assert y == x + boxed_int: Any = 5 + assert [type(boxed_int)] == [int] # Avoid mypy type narrowing + z: float = boxed_int + assert z == 5.0 + for xx in float_vals: + bb: Any = xx + yy: float = bb + assert repr(xx) == repr(bb) + assert repr(xx) == repr(yy) + for b in True, False: + boxed_bool: Any = b + assert type(boxed_bool) is bool + zz: float = boxed_bool + assert zz == int(b) + +def test_unboxing_failure() -> None: + boxed: Any = '1.5' + with assertRaises(TypeError): + x: float = boxed + +def identity(x: float) -> float: + return x + +def test_coerce_from_int_literal() -> None: + assert identity(34) == 34.0 + assert identity(-1) == -1.0 + +def test_coerce_from_short_tagged_int() -> None: + n = int() - 17 + assert identity(n) == -17.0 + for i in range(-300, 300): + assert identity(i) == float(i) + +def test_coerce_from_long_tagged_int() -> None: + n = int() + 2**100 + x = identity(n) + assert repr(x) == '1.2676506002282294e+30' + n = int() - 2**100 + y = identity(n) + assert repr(y) == '-1.2676506002282294e+30' + +def test_coerce_from_very_long_tagged_int() -> None: + n = int() + 10**1000 + with assertRaises(OverflowError, "int too large to convert to float"): + identity(n) + with assertRaises(OverflowError, "int too large to convert to float"): + identity(int(n)) + n = int() - 10**1000 + with assertRaises(OverflowError, "int too large to convert to float"): + identity(n) + with assertRaises(OverflowError, "int too large to convert to float"): + identity(int(n)) + +def test_explicit_conversion_from_int() -> None: + float_any: Any = float + a = [0, 1, 2, 3, -1, -2, 13257, -928745] + for n in range(1, 100): + for delta in -1, 0, 1, 2342345: + a.append(2**n + delta) + a.append(-2**n + delta) + for x in a: + assert repr(float(x)) == repr(float_any(x)) + +def test_explicit_conversion_to_int() -> None: + int_any: Any = int + for x in float_vals: + if math.isinf(x): + with assertRaises(OverflowError, "cannot convert float infinity to integer"): + int(x) + elif math.isnan(x): + with assertRaises(ValueError, "cannot convert float NaN to integer"): + int(x) + else: + assert repr(int(x)) == repr(int_any(x)) + + # Test some edge cases + assert 2**30 == int(2.0**30 + int()) + assert 2**30 - 1 == int(1073741823.9999999 + int()) # math.nextafter(2.0**30, 0)) + assert -2**30 - 1 == int(-2.0**30 - 1 + int()) + assert -2**30 == int(-1073741824.9999998 + int()) # math.nextafter(-2.0**30 - 1, 0) + assert 2**62 == int(2.0**62 + int()) + assert 2**62 == int(2.0**62 - 1 + int()) + assert -2**62 == int(-2.0**62 + int()) + assert -2**62 == int(-2.0**62 - 1 + int()) + def str_to_float(x: str) -> float: return float(x) -[file driver.py] -from native import str_to_float +def test_str_to_float() -> None: + assert str_to_float("1") == 1.0 + assert str_to_float("1.234567") == 1.234567 + assert str_to_float("44324") == 44324.0 + assert str_to_float("23.4") == 23.4 + assert str_to_float("-43.44e-4") == -43.44e-4 + assert str_to_float("-43.44e-4") == -43.44e-4 + assert math.isinf(str_to_float("inf")) + assert math.isinf(str_to_float("-inf")) + assert str_to_float("inf") > 0.0 + assert str_to_float("-inf") < 0.0 + assert math.isnan(str_to_float("nan")) + assert math.isnan(str_to_float("NaN")) + assert repr(str_to_float("-0.0")) == "-0.0" -assert str_to_float("1") == 1.0 -assert str_to_float("1.234567") == 1.234567 -assert str_to_float("44324") == 44324.0 -assert str_to_float("23.4") == 23.4 -assert str_to_float("-43.44e-4") == -43.44e-4 - -[case testFloatArithmetic] def test_abs() -> None: assert abs(0.0) == 0.0 assert abs(-1.234567) == 1.234567 assert abs(44324.732) == 44324.732 assert abs(-23.4) == 23.4 assert abs(-43.44e-4) == 43.44e-4 + abs_any: Any = abs + for x in float_vals: + assert repr(abs(x)) == repr(abs_any(x)) def test_float_min_max() -> None: - x: float = 20.0 - y: float = 30.0 - assert min(x, y) == 20.0 - assert min(y, x) == 20.0 - assert max(x, y) == 30.0 - assert max(y, x) == 30.0 + for x in float_vals: + for y in float_vals: + min_any: Any = min + assert repr(min(x, y)) == repr(min_any(x, y)) + max_any: Any = max + assert repr(max(x, y)) == repr(max_any(x, y)) + +def default(x: float = 2) -> float: + return x + 1 + +def test_float_default_value() -> None: + assert default(1.2) == 2.2 + for i in range(-200, 200): + assert default(float(i)) == i + 1 + assert default() == 3.0 + +def test_float_default_value_wrapper() -> None: + f: Any = default + assert f(1.2) == 2.2 + for i in range(-200, 200): + assert f(float(i)) == i + 1 + assert f() == 3.0 + +class C: + def __init__(self, x: float) -> None: + self.x = x + +def test_float_attr() -> None: + for i in range(-200, 200): + f = float(i) + c = C(f) + assert c.x == f + a: Any = c + assert a.x == f + c.x = FLOAT_MAGIC + assert c.x == FLOAT_MAGIC + assert a.x == FLOAT_MAGIC + a.x = 1.0 + assert a.x == 1.0 + a.x = FLOAT_MAGIC + assert a.x == FLOAT_MAGIC + +class D: + def __init__(self, x: float) -> None: + if x: + self.x = x + +def test_float_attr_maybe_undefned() -> None: + for i in range(-200, 200): + if i == 0: + d = D(0.0) + with assertRaises(AttributeError): + d.x + a: Any = d + with assertRaises(AttributeError): + a.x + d.x = FLOAT_MAGIC + assert d.x == FLOAT_MAGIC + assert a.x == FLOAT_MAGIC + d.x = 0.0 + assert d.x == 0.0 + assert a.x == 0.0 + a.x = FLOAT_MAGIC + assert a.x == FLOAT_MAGIC + d = D(0.0) + a = cast(Any, d) + a.x = FLOAT_MAGIC + assert d.x == FLOAT_MAGIC + else: + f = float(i) + d = D(f) + assert d.x == f + a2: Any = d + assert a2.x == f + +def f(x: float) -> float: + return x + 1 + +def test_return_values() -> None: + a: Any = f + for i in range(-200, 200): + x = float(i) + assert f(x) == x + 1 + assert a(x) == x + 1 + for x in float_vals: + if not math.isnan(x): + assert f(x) == x + 1 + else: + assert math.isnan(f(x)) + +def exc() -> float: + raise IndexError('x') + +def test_exception() -> None: + with assertRaises(IndexError): + exc() + a: Any = exc + with assertRaises(IndexError): + a() + +def test_undefined_local_var() -> None: + if not int(): + x = -113.0 + assert x == -113.0 + if int(): + y = -113.0 + with assertRaises(UnboundLocalError, 'local variable "y" referenced before assignment'): + print(y) + if not int(): + x2 = -1.0 + assert x2 == -1.0 + if int(): + y2 = -1.0 + with assertRaises(UnboundLocalError, 'local variable "y2" referenced before assignment'): + print(y2) + +def test_tuples() -> None: + t1: tuple[float, float] = (1.5, 2.5) + assert t1 == tuple([1.5, 2.5]) + n = int() + 5 + t2: tuple[float, float, float, float] = (n, 1.5, -7, -113) + assert t2 == tuple([5.0, 1.5, -7.0, -113.0]) + +[case testFloatGlueMethodsAndInheritance] +from typing import Any +from typing_extensions import Final + +from mypy_extensions import trait + +from testutil import assertRaises + +MAGIC: Final = -113.0 + +class Base: + def foo(self) -> float: + return 5.0 + + def bar(self, x: float = 2.0) -> float: + return x + 1 + + def hoho(self, x: float) -> float: + return x - 1 + +class Derived(Base): + def foo(self, x: float = 5.0) -> float: + return x + 10 + + def bar(self, x: float = 3, y: float = 20) -> float: + return x + y + 2 + + def hoho(self, x: float = 7) -> float: + return x - 2 + +def test_derived_adds_bitmap() -> None: + b: Base = Derived() + assert b.foo() == 15 + +def test_derived_adds_another_default_arg() -> None: + b: Base = Derived() + assert b.bar() == 25 + assert b.bar(1) == 23 + assert b.bar(MAGIC) == MAGIC + 22 + +def test_derived_switches_arg_to_have_default() -> None: + b: Base = Derived() + assert b.hoho(5) == 3 + assert b.hoho(MAGIC) == MAGIC - 2 + +@trait +class T: + @property + def x(self) -> float: ... + @property + def y(self) -> float: ... + +class C(T): + x: float = 1.0 + y: float = 4 + +def test_read_only_property_in_trait_implemented_as_attribute() -> None: + c = C() + c.x = 5.5 + assert c.x == 5.5 + c.x = MAGIC + assert c.x == MAGIC + assert c.y == 4 + c.y = 6.5 + assert c.y == 6.5 + t: T = C() + assert t.y == 4 + t = c + assert t.x == MAGIC + c.x = 55.5 + assert t.x == 55.5 + assert t.y == 6.5 + a: Any = c + assert a.x == 55.5 + assert a.y == 6.5 + a.x = 7.0 + a.y = 8.0 + assert a.x == 7 + assert a.y == 8 + +class D(T): + xx: float + + @property + def x(self) -> float: + return self.xx + + @property + def y(self) -> float: + raise TypeError + +def test_read_only_property_in_trait_implemented_as_property() -> None: + d = D() + d.xx = 5.0 + assert d.x == 5 + d.xx = MAGIC + assert d.x == MAGIC + with assertRaises(TypeError): + d.y + t: T = d + assert t.x == MAGIC + d.xx = 6.0 + assert t.x == 6 + with assertRaises(TypeError): + t.y + +@trait +class T2: + x: float + y: float + +class C2(T2): + pass + +def test_inherit_trait_attribute() -> None: + c = C2() + c.x = 5.0 + assert c.x == 5 + c.x = MAGIC + assert c.x == MAGIC + with assertRaises(AttributeError): + c.y + c.y = 6.0 + assert c.y == 6.0 + t: T2 = C2() + with assertRaises(AttributeError): + t.y + t = c + assert t.x == MAGIC + c.x = 55.0 + assert t.x == 55 + assert t.y == 6 + a: Any = c + assert a.x == 55 + assert a.y == 6 + a.x = 7.0 + a.y = 8.0 + assert a.x == 7 + assert a.y == 8 + +class D2(T2): + x: float + y: float = 4 + +def test_implement_trait_attribute() -> None: + d = D2() + d.x = 5.0 + assert d.x == 5 + d.x = MAGIC + assert d.x == MAGIC + assert d.y == 4 + d.y = 6.0 + assert d.y == 6 + t: T2 = D2() + assert t.y == 4 + t = d + assert t.x == MAGIC + d.x = 55.0 + assert t.x == 55 + assert t.y == 6 + a: Any = d + assert a.x == 55 + assert a.y == 6 + a.x = 7.0 + a.y = 8.0 + assert a.x == 7 + assert a.y == 8 diff --git a/mypyc/test-data/run-i64.test b/mypyc/test-data/run-i64.test index cd4ac19532d2..bcde39fed5ff 100644 --- a/mypyc/test-data/run-i64.test +++ b/mypyc/test-data/run-i64.test @@ -315,7 +315,8 @@ def test_explicit_conversion_from_float() -> None: assert from_float(0.0) == 0 assert from_float(1.456) == 1 assert from_float(-1234.567) == -1234 - assert from_float(2**63 - 1) == 2**63 - 1 + # Subtract 1024 due to limited precision of 64-bit floats + assert from_float(2**63 - 1024) == 2**63 - 1024 assert from_float(-2**63) == -2**63 # The error message could be better, but this is acceptable with assertRaises(OverflowError, "int too large to convert to i64"): diff --git a/mypyc/test-data/run-integers.test b/mypyc/test-data/run-integers.test index c65f36110b46..d575e141b567 100644 --- a/mypyc/test-data/run-integers.test +++ b/mypyc/test-data/run-integers.test @@ -173,6 +173,7 @@ assert test_isinstance_int_and_not_bool(1) == True [case testIntOps] from typing import Any +from testutil import assertRaises def check_and(x: int, y: int) -> None: # eval() can be trusted to calculate expected result @@ -390,7 +391,7 @@ def test_no_op_conversion() -> None: for x in 1, 55, -1, -7, 1 << 50, 1 << 101, -(1 << 50), -(1 << 101): assert no_op_conversion(x) == x -def test_divide() -> None: +def test_floor_divide() -> None: for x in range(-100, 100): for y in range(-100, 100): if y != 0: @@ -470,6 +471,25 @@ def test_floor_divide_by_literal() -> None: assert div_by_3(i) == i_boxed // int('3') assert div_by_4(i) == i_boxed // int('4') +def test_true_divide() -> None: + for x in range(-150, 100): + for y in range(-150, 100): + if y != 0: + assert x / y == getattr(x, "__truediv__")(y) + large1 = (123 + int())**123 + large2 = (121 + int())**121 + assert large1 / large2 == getattr(large1, "__truediv__")(large2) + assert large1 / 135 == getattr(large1, "__truediv__")(135) + assert large1 / -2 == getattr(large1, "__truediv__")(-2) + assert 17 / large2 == getattr(17, "__truediv__")(large2) + + huge = 10**1000 + int() + with assertRaises(OverflowError, "integer division result too large for a float"): + huge / 2 + with assertRaises(OverflowError, "integer division result too large for a float"): + huge / -2 + assert 1 / huge == 0.0 + [case testIntMinMax] def test_int_min_max() -> None: x: int = 200 diff --git a/mypyc/test-data/run-math.test b/mypyc/test-data/run-math.test new file mode 100644 index 000000000000..64d5c1812afa --- /dev/null +++ b/mypyc/test-data/run-math.test @@ -0,0 +1,88 @@ +# Test cases for the math module (compile and run) + +[case testMathOps] +from typing import Any, Callable +from typing_extensions import Final +import math +from testutil import assertRaises, float_vals, assertDomainError, assertMathRangeError + +pymath: Any = math + +def validate_one_arg(test: Callable[[float], float], validate: Callable[[float], float]) -> None: + """Ensure that test and validate behave the same for various float args.""" + for x in float_vals: + try: + expected = validate(x) + except Exception as e: + try: + test(x) + assert False, f"no exception raised for {x!r}, expected {e!r}" + except Exception as e2: + assert repr(e) == repr(e2), f"actual for {x!r}: {e2!r}, expected: {e!r}" + continue + actual = test(x) + assert repr(actual) == repr(expected), ( + f"actual for {x!r}: {actual!r}, expected {expected!r}") + +def validate_two_arg(test: Callable[[float, float], float], + validate: Callable[[float, float], float]) -> None: + """Ensure that test and validate behave the same for various float args.""" + for x in float_vals: + for y in float_vals: + args = f"({x!r}, {y!r})" + try: + expected = validate(x, y) + except Exception as e: + try: + test(x, y) + assert False, f"no exception raised for {args}, expected {e!r}" + except Exception as e2: + assert repr(e) == repr(e2), f"actual for {args}: {e2!r}, expected: {e!r}" + continue + try: + actual = test(x, y) + except Exception as e: + assert False, f"no exception expected for {args}, got {e!r}" + assert repr(actual) == repr(expected), ( + f"actual for {args}: {actual!r}, expected {expected!r}") + +def test_sqrt() -> None: + validate_one_arg(lambda x: math.sqrt(x), pymath.sqrt) + +def test_sin() -> None: + validate_one_arg(lambda x: math.sin(x), pymath.sin) + +def test_cos() -> None: + validate_one_arg(lambda x: math.cos(x), pymath.cos) + +def test_tan() -> None: + validate_one_arg(lambda x: math.tan(x), pymath.tan) + +def test_exp() -> None: + validate_one_arg(lambda x: math.exp(x), pymath.exp) + +def test_log() -> None: + validate_one_arg(lambda x: math.log(x), pymath.log) + +def test_floor() -> None: + validate_one_arg(lambda x: math.floor(x), pymath.floor) + +def test_ceil() -> None: + validate_one_arg(lambda x: math.ceil(x), pymath.ceil) + +def test_fabs() -> None: + validate_one_arg(lambda x: math.fabs(x), pymath.fabs) + +def test_pow() -> None: + validate_two_arg(lambda x, y: math.pow(x, y), pymath.pow) + +def test_copysign() -> None: + validate_two_arg(lambda x, y: math.copysign(x, y), pymath.copysign) + +def test_isinf() -> None: + for x in float_vals: + assert repr(math.isinf(x)) == repr(pymath.isinf(x)) + +def test_isnan() -> None: + for x in float_vals: + assert repr(math.isnan(x)) == repr(pymath.isnan(x)) diff --git a/mypyc/test-data/run-sets.test b/mypyc/test-data/run-sets.test index 56c946933fac..8d178d03a75b 100644 --- a/mypyc/test-data/run-sets.test +++ b/mypyc/test-data/run-sets.test @@ -141,8 +141,8 @@ def test_in_set() -> None: assert main_set(item), f"{item!r} should be in set_main" assert not main_negated_set(item), item - assert non_final_name_set(non_const) global non_const + assert non_final_name_set(non_const) non_const = "updated" assert non_final_name_set("updated") diff --git a/mypyc/test/test_irbuild.py b/mypyc/test/test_irbuild.py index cb5e690eed55..86bdf7c590d8 100644 --- a/mypyc/test/test_irbuild.py +++ b/mypyc/test/test_irbuild.py @@ -31,6 +31,7 @@ "irbuild-set.test", "irbuild-str.test", "irbuild-bytes.test", + "irbuild-float.test", "irbuild-statements.test", "irbuild-nested.test", "irbuild-classes.test", diff --git a/mypyc/test/test_run.py b/mypyc/test/test_run.py index 6a5ab87fca49..9598b9865f1e 100644 --- a/mypyc/test/test_run.py +++ b/mypyc/test/test_run.py @@ -11,7 +11,7 @@ import subprocess import sys import time -from typing import Any, Iterator, cast +from typing import Any, Iterator from mypy import build from mypy.errors import CompileError @@ -42,6 +42,7 @@ "run-i64.test", "run-i32.test", "run-floats.test", + "run-math.test", "run-bools.test", "run-strings.test", "run-bytes.test", @@ -108,15 +109,13 @@ def run_setup(script_name: str, script_args: list[str]) -> bool: finally: sys.argv = save_argv except SystemExit as e: - # typeshed reports code as being an int but that is wrong - code = cast(Any, e).code # distutils converts KeyboardInterrupt into a SystemExit with # "interrupted" as the argument. Convert it back so that # pytest will exit instead of just failing the test. - if code == "interrupted": + if e.code == "interrupted": raise KeyboardInterrupt from e - return code == 0 or code is None + return e.code == 0 or e.code is None return True diff --git a/mypyc/transform/exceptions.py b/mypyc/transform/exceptions.py index 2851955ff38f..bf5e60659f8f 100644 --- a/mypyc/transform/exceptions.py +++ b/mypyc/transform/exceptions.py @@ -23,6 +23,7 @@ Branch, CallC, ComparisonOp, + Float, GetAttr, Integer, LoadErrorValue, @@ -33,7 +34,7 @@ TupleGet, Value, ) -from mypyc.ir.rtypes import RTuple, bool_rprimitive +from mypyc.ir.rtypes import RTuple, bool_rprimitive, is_float_rprimitive from mypyc.primitives.exc_ops import err_occurred_op from mypyc.primitives.registry import CFunctionDescription @@ -173,7 +174,11 @@ def insert_overlapping_error_value_check(ops: list[Op], target: Value) -> Compar ops.append(item) return insert_overlapping_error_value_check(ops, item) else: - errvalue = Integer(int(typ.c_undefined), rtype=typ) + errvalue: Value + if is_float_rprimitive(target.type): + errvalue = Float(float(typ.c_undefined)) + else: + errvalue = Integer(int(typ.c_undefined), rtype=typ) op = ComparisonOp(target, errvalue, ComparisonOp.EQ) ops.append(op) return op diff --git a/pyproject.toml b/pyproject.toml index 328b9bf159a1..20301bf64216 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,7 +13,7 @@ requires = [ # the following is from build-requirements.txt "types-psutil", "types-setuptools", - "types-typed-ast>=1.5.8,<1.6.0", + "types-typed-ast>=1.5.8.5,<1.6.0", ] build-backend = "setuptools.build_meta" diff --git a/setup.py b/setup.py index 516a639f3bb2..5d5ea06fb714 100644 --- a/setup.py +++ b/setup.py @@ -6,6 +6,7 @@ import os import os.path import sys +from typing import TYPE_CHECKING, Any if sys.version_info < (3, 7, 0): sys.stderr.write("ERROR: You need Python 3.7 or later to use mypy.\n") @@ -17,11 +18,14 @@ # This requires setuptools when building; setuptools is not needed # when installing from a wheel file (though it is still needed for # alternative forms of installing, as suggested by README.md). -from setuptools import find_packages, setup +from setuptools import Extension, find_packages, setup from setuptools.command.build_py import build_py from mypy.version import __version__ as version +if TYPE_CHECKING: + from typing_extensions import TypeGuard + description = "Optional static typing for Python" long_description = """ Mypy -- Optional Static Typing for Python @@ -36,6 +40,10 @@ """.lstrip() +def is_list_of_setuptools_extension(items: list[Any]) -> TypeGuard[list[Extension]]: + return all(isinstance(item, Extension) for item in items) + + def find_package_data(base, globs, root="mypy"): """Find all interesting data files, for setup(package_data=) @@ -166,6 +174,8 @@ def run(self): # our Appveyor builds run out of memory sometimes. multi_file=sys.platform == "win32" or force_multifile, ) + assert is_list_of_setuptools_extension(ext_modules), "Expected mypycify to use setuptools" + else: ext_modules = [] diff --git a/test-data/unit/README.md b/test-data/unit/README.md index 6cf0b1bb26cf..97680c949bef 100644 --- a/test-data/unit/README.md +++ b/test-data/unit/README.md @@ -36,7 +36,7 @@ the error message - `W: ...` and `N: ...` works exactly like `E: ...`, but report a warning and a note respectively - lines that don't contain the above should cause no type check errors - optional `[builtins fixtures/...]` tells the type checker to use -stubs from the indicated file (see Fixtures section below) +`builtins` stubs from the indicated file (see Fixtures section below) - optional `[out]` is an alternative to the `# E: ` notation: it indicates that any text after it contains the expected type checking error messages. Usually, `# E: ` is preferred because it makes it easier to associate the @@ -65,7 +65,7 @@ Where the stubs for builtins come from for a given test: - The builtins used by default in unit tests live in `test-data/unit/lib-stub`. -- Individual test cases can override the builtins stubs by using +- Individual test cases can override the `builtins` stubs by using `[builtins fixtures/foo.pyi]`; this targets files in `test-data/unit/fixtures`. Feel free to modify existing files there or create new ones as you deem fit. @@ -77,6 +77,21 @@ Where the stubs for builtins come from for a given test: addition with other mypy developers, as additions could slow down the test suite. +- Some tests choose to customize the standard library in a way that's local to the test: + ``` + [case testFoo] + ... + [file builtins.py] + class int: + def next_fibonacci() -> int: pass + ``` + Another possible syntax is: + ``` + [fixture builtins.py] + ``` + Whether you use `[file ...]` or `[fixture ...]` depends on whether you want + the file to be part of the tested corpus (e.g. contribute to `[out]` section) + or only support the test. Running tests and linting ------------------------- diff --git a/test-data/unit/check-async-await.test b/test-data/unit/check-async-await.test index 40efe2d2cece..7356fa59c86d 100644 --- a/test-data/unit/check-async-await.test +++ b/test-data/unit/check-async-await.test @@ -945,11 +945,15 @@ async def bar(x: Union[A, B]) -> None: [typing fixtures/typing-async.pyi] [case testInvalidComprehensionNoCrash] +# flags: --show-error-codes async def foo(x: int) -> int: ... -crasher = [await foo(x) for x in [1, 2, 3]] # E: "await" outside function +# These are allowed in some cases: +top_level = await foo(1) # E: "await" outside function [top-level-await] +crasher = [await foo(x) for x in [1, 2, 3]] # E: "await" outside function [top-level-await] def bad() -> None: + # These are always critical / syntax issues: y = [await foo(x) for x in [1, 2, 3]] # E: "await" outside coroutine ("async def") async def good() -> None: y = [await foo(x) for x in [1, 2, 3]] # OK diff --git a/test-data/unit/check-attr.test b/test-data/unit/check-attr.test index f6ef289e792e..83a441aca233 100644 --- a/test-data/unit/check-attr.test +++ b/test-data/unit/check-attr.test @@ -1867,3 +1867,81 @@ D(1, "").a = 2 # E: Cannot assign to final attribute "a" D(1, "").b = "2" # E: Cannot assign to final attribute "b" [builtins fixtures/property.pyi] + +[case testEvolve] +import attr + +class Base: + pass + +class Derived(Base): + pass + +class Other: + pass + +@attr.s(auto_attribs=True) +class C: + name: str + b: Base + +c = C(name='foo', b=Derived()) +c = attr.evolve(c) +c = attr.evolve(c, name='foo') +c = attr.evolve(c, 'foo') # E: Too many positional arguments for "evolve" of "C" +c = attr.evolve(c, b=Derived()) +c = attr.evolve(c, b=Base()) +c = attr.evolve(c, b=Other()) # E: Argument "b" to "evolve" of "C" has incompatible type "Other"; expected "Base" +c = attr.evolve(c, name=42) # E: Argument "name" to "evolve" of "C" has incompatible type "int"; expected "str" +c = attr.evolve(c, foobar=42) # E: Unexpected keyword argument "foobar" for "evolve" of "C" + +# test passing instance as 'inst' kw +c = attr.evolve(inst=c, name='foo') +c = attr.evolve(not_inst=c, name='foo') # E: Missing positional argument "inst" in call to "evolve" + +# test determining type of first argument's expression from something that's not NameExpr +def f() -> C: + return c + +c = attr.evolve(f(), name='foo') + +[builtins fixtures/attr.pyi] + +[case testEvolveFromNonAttrs] +import attr + +attr.evolve(42, name='foo') # E: Argument 1 to "evolve" has incompatible type "int"; expected an attrs class +attr.evolve(None, name='foo') # E: Argument 1 to "evolve" has incompatible type "None"; expected an attrs class +[case testEvolveFromAny] +from typing import Any +import attr + +any: Any = 42 +ret = attr.evolve(any, name='foo') +reveal_type(ret) # N: Revealed type is "Any" + +[typing fixtures/typing-medium.pyi] + +[case testEvolveVariants] +from typing import Any +import attr +import attrs + + +@attr.s(auto_attribs=True) +class C: + name: str + +c = C(name='foo') + +c = attr.assoc(c, name='test') +c = attr.assoc(c, name=42) # E: Argument "name" to "assoc" of "C" has incompatible type "int"; expected "str" + +c = attrs.evolve(c, name='test') +c = attrs.evolve(c, name=42) # E: Argument "name" to "evolve" of "C" has incompatible type "int"; expected "str" + +c = attrs.assoc(c, name='test') +c = attrs.assoc(c, name=42) # E: Argument "name" to "assoc" of "C" has incompatible type "int"; expected "str" + +[builtins fixtures/attr.pyi] +[typing fixtures/typing-medium.pyi] diff --git a/test-data/unit/check-classes.test b/test-data/unit/check-classes.test index d5fb830487e8..9e45da717426 100644 --- a/test-data/unit/check-classes.test +++ b/test-data/unit/check-classes.test @@ -3464,9 +3464,9 @@ class ProUser(User): pass class BasicUser(User): pass U = TypeVar('U', bound=Union[ProUser, BasicUser]) def process(cls: Type[U]): - cls.foo() # E: "Type[U]" has no attribute "foo" + cls.foo() obj = cls() - cls.bar(obj) # E: "Type[U]" has no attribute "bar" + cls.bar(obj) cls.mro() # Defined in class type cls.error # E: "Type[U]" has no attribute "error" [builtins fixtures/classmethod.pyi] @@ -4524,6 +4524,72 @@ WithMeta().a # E: "WithMeta" has no attribute "a" t: Type[WithMeta] t.unknown # OK +[case testUnpackIterableClassWithOverloadedIter] +from typing import Generic, overload, Iterator, TypeVar, Union + +AnyNum = TypeVar('AnyNum', int, float) + +class Foo(Generic[AnyNum]): + @overload + def __iter__(self: Foo[int]) -> Iterator[float]: ... + @overload + def __iter__(self: Foo[float]) -> Iterator[int]: ... + def __iter__(self) -> Iterator[Union[float, int]]: + ... + +a, b, c = Foo[int]() +reveal_type(a) # N: Revealed type is "builtins.float" +reveal_type(b) # N: Revealed type is "builtins.float" +reveal_type(c) # N: Revealed type is "builtins.float" + +x, y = Foo[float]() +reveal_type(x) # N: Revealed type is "builtins.int" +reveal_type(y) # N: Revealed type is "builtins.int" +[builtins fixtures/list.pyi] + +[case testUnpackIterableClassWithOverloadedIter2] +from typing import Union, TypeVar, Generic, overload, Iterator + +X = TypeVar('X') + +class Foo(Generic[X]): + @overload + def __iter__(self: Foo[str]) -> Iterator[int]: ... # type: ignore + @overload + def __iter__(self: Foo[X]) -> Iterator[str]: ... + def __iter__(self) -> Iterator[Union[int, str]]: + ... + +a, b, c = Foo[str]() +reveal_type(a) # N: Revealed type is "builtins.int" +reveal_type(b) # N: Revealed type is "builtins.int" +reveal_type(c) # N: Revealed type is "builtins.int" + +x, y = Foo[float]() +reveal_type(x) # N: Revealed type is "builtins.str" +reveal_type(y) # N: Revealed type is "builtins.str" +[builtins fixtures/list.pyi] + +[case testUnpackIterableRegular] +from typing import TypeVar, Generic, Iterator + +X = TypeVar('X') + +class Foo(Generic[X]): + def __iter__(self) -> Iterator[X]: + ... + +a, b = Foo[int]() +reveal_type(a) # N: Revealed type is "builtins.int" +reveal_type(b) # N: Revealed type is "builtins.int" +[builtins fixtures/list.pyi] + +[case testUnpackNotIterableClass] +class Foo: ... + +a, b, c = Foo() # E: "Foo" object is not iterable +[builtins fixtures/list.pyi] + [case testMetaclassIterable] from typing import Iterable, Iterator @@ -7731,3 +7797,25 @@ class Element(Generic[_T]): class Bar(Foo): ... e: Element[Bar] reveal_type(e.elements) # N: Revealed type is "typing.Sequence[__main__.Element[__main__.Bar]]" + +[case testIterableUnpackingWithGetAttr] +from typing import Union, Tuple + +class C: + def __getattr__(self, name): + pass + +class D: + def f(self) -> C: + return C() + + def g(self) -> None: + # iter(x) looks up `__iter__` on the type of x rather than x itself, + # so this is correct behaviour. + # Instances of C should not be treated as being iterable, + # despite having a __getattr__ method + # that could allow for arbitrary attributes to be accessed on instances, + # since `type(C()).__iter__` still raises AttributeError at runtime, + # and that's what matters. + a, b = self.f() # E: "C" has no attribute "__iter__" (not iterable) +[builtins fixtures/tuple.pyi] diff --git a/test-data/unit/check-dataclass-transform.test b/test-data/unit/check-dataclass-transform.test index 2a7fad1da992..be6b46d70846 100644 --- a/test-data/unit/check-dataclass-transform.test +++ b/test-data/unit/check-dataclass-transform.test @@ -279,8 +279,7 @@ class Bad: bad1: int = field(alias=some_str()) # E: "alias" argument to dataclass field must be a string literal bad2: int = field(kw_only=some_bool()) # E: "kw_only" argument must be a boolean literal -# this metadata should only exist for dataclasses.dataclass classes -Foo.__dataclass_fields__ # E: "Type[Foo]" has no attribute "__dataclass_fields__" +reveal_type(Foo.__dataclass_fields__) # N: Revealed type is "builtins.dict[builtins.str, Any]" [typing fixtures/typing-full.pyi] [builtins fixtures/dataclasses.pyi] @@ -329,6 +328,38 @@ Foo(a=1, b='bye') [typing fixtures/typing-full.pyi] [builtins fixtures/dataclasses.pyi] +[case testDataclassTransformFieldSpecifierImplicitInit] +# flags: --python-version 3.11 +from typing import dataclass_transform, Literal, overload + +def init(*, init: Literal[True] = True): ... +def no_init(*, init: Literal[False] = False): ... + +@overload +def field_overload(*, custom: None, init: Literal[True] = True): ... +@overload +def field_overload(*, custom: str, init: Literal[False] = False): ... +def field_overload(*, custom, init): ... + +@dataclass_transform(field_specifiers=(init, no_init, field_overload)) +def my_dataclass(cls): return cls + +@my_dataclass +class Foo: + a: int = init() + b: int = field_overload(custom=None) + + bad1: int = no_init() + bad2: int = field_overload(custom="bad2") + +reveal_type(Foo) # N: Revealed type is "def (a: builtins.int, b: builtins.int) -> __main__.Foo" +Foo(a=1, b=2) +Foo(a=1, b=2, bad1=0) # E: Unexpected keyword argument "bad1" for "Foo" +Foo(a=1, b=2, bad2=0) # E: Unexpected keyword argument "bad2" for "Foo" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + [case testDataclassTransformOverloadsDecoratorOnOverload] # flags: --python-version 3.11 from typing import dataclass_transform, overload, Any, Callable, Type, Literal @@ -416,7 +447,11 @@ from typing import dataclass_transform @dataclass_transform(frozen_default=True) class Dataclass(type): ... -class Person(metaclass=Dataclass, kw_only=True): +# Note that PEP 681 states that a class that directly specifies a dataclass_transform-decorated +# metaclass should be treated as neither frozen nor unfrozen. For Person to have frozen semantics, +# it may not directly specify the metaclass. +class BaseDataclass(metaclass=Dataclass): ... +class Person(BaseDataclass, kw_only=True): name: str age: int @@ -452,3 +487,569 @@ Foo(1) # E: Too many arguments for "Foo" [typing fixtures/typing-full.pyi] [builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformTypeCheckingInFunction] +# flags: --python-version 3.11 +from typing import dataclass_transform, Type, TYPE_CHECKING + +@dataclass_transform() +def model(cls: Type) -> Type: + return cls + +@model +class FunctionModel: + if TYPE_CHECKING: + string_: str + integer_: int + else: + string_: tuple + integer_: tuple + +FunctionModel(string_="abc", integer_=1) +FunctionModel(string_="abc", integer_=tuple()) # E: Argument "integer_" to "FunctionModel" has incompatible type "Tuple[, ...]"; expected "int" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformNegatedTypeCheckingInFunction] +# flags: --python-version 3.11 +from typing import dataclass_transform, Type, TYPE_CHECKING + +@dataclass_transform() +def model(cls: Type) -> Type: + return cls + +@model +class FunctionModel: + if not TYPE_CHECKING: + string_: tuple + integer_: tuple + else: + string_: str + integer_: int + +FunctionModel(string_="abc", integer_=1) +FunctionModel(string_="abc", integer_=tuple()) # E: Argument "integer_" to "FunctionModel" has incompatible type "Tuple[, ...]"; expected "int" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + + +[case testDataclassTransformTypeCheckingInBaseClass] +# flags: --python-version 3.11 +from typing import dataclass_transform, TYPE_CHECKING + +@dataclass_transform() +class ModelBase: + ... + +class BaseClassModel(ModelBase): + if TYPE_CHECKING: + string_: str + integer_: int + else: + string_: tuple + integer_: tuple + +BaseClassModel(string_="abc", integer_=1) +BaseClassModel(string_="abc", integer_=tuple()) # E: Argument "integer_" to "BaseClassModel" has incompatible type "Tuple[, ...]"; expected "int" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformNegatedTypeCheckingInBaseClass] +# flags: --python-version 3.11 +from typing import dataclass_transform, TYPE_CHECKING + +@dataclass_transform() +class ModelBase: + ... + +class BaseClassModel(ModelBase): + if not TYPE_CHECKING: + string_: tuple + integer_: tuple + else: + string_: str + integer_: int + +BaseClassModel(string_="abc", integer_=1) +BaseClassModel(string_="abc", integer_=tuple()) # E: Argument "integer_" to "BaseClassModel" has incompatible type "Tuple[, ...]"; expected "int" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformTypeCheckingInMetaClass] +# flags: --python-version 3.11 +from typing import dataclass_transform, Type, TYPE_CHECKING + +@dataclass_transform() +class ModelMeta(type): + ... + +class ModelBaseWithMeta(metaclass=ModelMeta): + ... + +class MetaClassModel(ModelBaseWithMeta): + if TYPE_CHECKING: + string_: str + integer_: int + else: + string_: tuple + integer_: tuple + +MetaClassModel(string_="abc", integer_=1) +MetaClassModel(string_="abc", integer_=tuple()) # E: Argument "integer_" to "MetaClassModel" has incompatible type "Tuple[, ...]"; expected "int" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformNegatedTypeCheckingInMetaClass] +# flags: --python-version 3.11 +from typing import dataclass_transform, Type, TYPE_CHECKING + +@dataclass_transform() +class ModelMeta(type): + ... + +class ModelBaseWithMeta(metaclass=ModelMeta): + ... + +class MetaClassModel(ModelBaseWithMeta): + if not TYPE_CHECKING: + string_: tuple + integer_: tuple + else: + string_: str + integer_: int + +MetaClassModel(string_="abc", integer_=1) +MetaClassModel(string_="abc", integer_=tuple()) # E: Argument "integer_" to "MetaClassModel" has incompatible type "Tuple[, ...]"; expected "int" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformStaticConditionalAttributes] +# flags: --python-version 3.11 --always-true TRUTH +from typing import dataclass_transform, Type, TYPE_CHECKING + +TRUTH = False # Is set to --always-true + +@dataclass_transform() +def model(cls: Type) -> Type: + return cls + +@model +class FunctionModel: + if TYPE_CHECKING: + present_1: int + else: + skipped_1: int + if True: # Mypy does not know if it is True or False, so the block is used + present_2: int + if False: # Mypy does not know if it is True or False, so the block is used + present_3: int + if not TRUTH: + skipped_2: int + else: + present_4: int + +FunctionModel( + present_1=1, + present_2=2, + present_3=3, + present_4=4, +) +FunctionModel() # E: Missing positional arguments "present_1", "present_2", "present_3", "present_4" in call to "FunctionModel" +FunctionModel( # E: Unexpected keyword argument "skipped_1" for "FunctionModel" + present_1=1, + present_2=2, + present_3=3, + present_4=4, + skipped_1=5, +) + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + + +[case testDataclassTransformStaticDeterministicConditionalElifAttributes] +# flags: --python-version 3.11 --always-true TRUTH --always-false LIE +from typing import dataclass_transform, Type, TYPE_CHECKING + +TRUTH = False # Is set to --always-true +LIE = True # Is set to --always-false + +@dataclass_transform() +def model(cls: Type) -> Type: + return cls + +@model +class FunctionModel: + if TYPE_CHECKING: + present_1: int + elif TRUTH: + skipped_1: int + else: + skipped_2: int + if LIE: + skipped_3: int + elif TRUTH: + present_2: int + else: + skipped_4: int + if LIE: + skipped_5: int + elif LIE: + skipped_6: int + else: + present_3: int + +FunctionModel( + present_1=1, + present_2=2, + present_3=3, +) + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformStaticNotDeterministicConditionalElifAttributes] +# flags: --python-version 3.11 --always-true TRUTH --always-false LIE +from typing import dataclass_transform, Type, TYPE_CHECKING + +TRUTH = False # Is set to --always-true +LIE = True # Is set to --always-false + +@dataclass_transform() +def model(cls: Type) -> Type: + return cls + +@model +class FunctionModel: + if 123: # Mypy does not know if it is True or False, so this block is used + present_1: int + elif TRUTH: # Mypy does not know if previous condition is True or False, so it uses also this block + present_2: int + else: # Previous block is for sure True, so this block is skipped + skipped_1: int + if 123: + present_3: int + elif 123: + present_4: int + else: + present_5: int + if 123: # Mypy does not know if it is True or False, so this block is used + present_6: int + elif LIE: # This is for sure False, so the block is skipped used + skipped_2: int + else: # None of the conditions above for sure True, so this block is used + present_7: int + +FunctionModel( + present_1=1, + present_2=2, + present_3=3, + present_4=4, + present_5=5, + present_6=6, + present_7=7, +) + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformFunctionConditionalAttributes] +# flags: --python-version 3.11 +from typing import dataclass_transform, Type + +@dataclass_transform() +def model(cls: Type) -> Type: + return cls + +def condition() -> bool: + return True + +@model +class FunctionModel: + if condition(): + x: int + y: int + z1: int + else: + x: str # E: Name "x" already defined on line 14 + y: int # E: Name "y" already defined on line 15 + z2: int + +FunctionModel(x=1, y=2, z1=3, z2=4) + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + + +[case testDataclassTransformNegatedFunctionConditionalAttributes] +# flags: --python-version 3.11 +from typing import dataclass_transform, Type + +@dataclass_transform() +def model(cls: Type) -> Type: + return cls + +def condition() -> bool: + return True + +@model +class FunctionModel: + if not condition(): + x: int + y: int + z1: int + else: + x: str # E: Name "x" already defined on line 14 + y: int # E: Name "y" already defined on line 15 + z2: int + +FunctionModel(x=1, y=2, z1=3, z2=4) + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformDirectMetaclassNeitherFrozenNorNotFrozen] +# flags: --python-version 3.11 +from typing import dataclass_transform, Type + +@dataclass_transform() +class Meta(type): ... +class Base(metaclass=Meta): + base: int +class Foo(Base, frozen=True): + foo: int +class Bar(Base, frozen=False): + bar: int + + +foo = Foo(0, 1) +foo.foo = 5 # E: Property "foo" defined in "Foo" is read-only +foo.base = 6 +reveal_type(foo.base) # N: Revealed type is "builtins.int" +bar = Bar(0, 1) +bar.bar = 5 +bar.base = 6 +reveal_type(bar.base) # N: Revealed type is "builtins.int" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformSimpleDescriptor] +# flags: --python-version 3.11 + +from typing import dataclass_transform, overload, Any + +@dataclass_transform() +def my_dataclass(cls): ... + +class Desc: + @overload + def __get__(self, instance: None, owner: Any) -> Desc: ... + @overload + def __get__(self, instance: object, owner: Any) -> str: ... + def __get__(self, instance: object | None, owner: Any) -> Desc | str: ... + + def __set__(self, instance: Any, value: str) -> None: ... + +@my_dataclass +class C: + x: Desc + y: int + +C(x='x', y=1) +C(x=1, y=1) # E: Argument "x" to "C" has incompatible type "int"; expected "str" +reveal_type(C(x='x', y=1).x) # N: Revealed type is "builtins.str" +reveal_type(C(x='x', y=1).y) # N: Revealed type is "builtins.int" +reveal_type(C.x) # N: Revealed type is "__main__.Desc" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformUnannotatedDescriptor] +# flags: --python-version 3.11 + +from typing import dataclass_transform, overload, Any + +@dataclass_transform() +def my_dataclass(cls): ... + +class Desc: + @overload + def __get__(self, instance: None, owner: Any) -> Desc: ... + @overload + def __get__(self, instance: object, owner: Any) -> str: ... + def __get__(self, instance: object | None, owner: Any) -> Desc | str: ... + + def __set__(*args, **kwargs): ... + +@my_dataclass +class C: + x: Desc + y: int + +C(x='x', y=1) +C(x=1, y=1) +reveal_type(C(x='x', y=1).x) # N: Revealed type is "builtins.str" +reveal_type(C(x='x', y=1).y) # N: Revealed type is "builtins.int" +reveal_type(C.x) # N: Revealed type is "__main__.Desc" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformGenericDescriptor] +# flags: --python-version 3.11 + +from typing import dataclass_transform, overload, Any, TypeVar, Generic + +@dataclass_transform() +def my_dataclass(frozen: bool = False): ... + +T = TypeVar("T") + +class Desc(Generic[T]): + @overload + def __get__(self, instance: None, owner: Any) -> Desc[T]: ... + @overload + def __get__(self, instance: object, owner: Any) -> T: ... + def __get__(self, instance: object | None, owner: Any) -> Desc | T: ... + + def __set__(self, instance: Any, value: T) -> None: ... + +@my_dataclass() +class C: + x: Desc[str] + +C(x='x') +C(x=1) # E: Argument "x" to "C" has incompatible type "int"; expected "str" +reveal_type(C(x='x').x) # N: Revealed type is "builtins.str" +reveal_type(C.x) # N: Revealed type is "__main__.Desc[builtins.str]" + +@my_dataclass() +class D(C): + y: Desc[int] + +d = D(x='x', y=1) +reveal_type(d.x) # N: Revealed type is "builtins.str" +reveal_type(d.y) # N: Revealed type is "builtins.int" +reveal_type(D.x) # N: Revealed type is "__main__.Desc[builtins.str]" +reveal_type(D.y) # N: Revealed type is "__main__.Desc[builtins.int]" + +@my_dataclass(frozen=True) +class F: + x: Desc[str] = Desc() + +F(x='x') +F(x=1) # E: Argument "x" to "F" has incompatible type "int"; expected "str" +reveal_type(F(x='x').x) # N: Revealed type is "builtins.str" +reveal_type(F.x) # N: Revealed type is "__main__.Desc[builtins.str]" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformGenericDescriptorWithInheritance] +# flags: --python-version 3.11 + +from typing import dataclass_transform, overload, Any, TypeVar, Generic + +@dataclass_transform() +def my_dataclass(cls): ... + +T = TypeVar("T") + +class Desc(Generic[T]): + @overload + def __get__(self, instance: None, owner: Any) -> Desc[T]: ... + @overload + def __get__(self, instance: object, owner: Any) -> T: ... + def __get__(self, instance: object | None, owner: Any) -> Desc | T: ... + + def __set__(self, instance: Any, value: T) -> None: ... + +class Desc2(Desc[str]): + pass + +@my_dataclass +class C: + x: Desc2 + +C(x='x') +C(x=1) # E: Argument "x" to "C" has incompatible type "int"; expected "str" +reveal_type(C(x='x').x) # N: Revealed type is "builtins.str" +reveal_type(C.x) # N: Revealed type is "__main__.Desc[builtins.str]" + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformDescriptorWithDifferentGetSetTypes] +# flags: --python-version 3.11 + +from typing import dataclass_transform, overload, Any + +@dataclass_transform() +def my_dataclass(cls): ... + +class Desc: + @overload + def __get__(self, instance: None, owner: Any) -> int: ... + @overload + def __get__(self, instance: object, owner: Any) -> str: ... + def __get__(self, instance, owner): ... + + def __set__(self, instance: Any, value: bytes) -> None: ... + +@my_dataclass +class C: + x: Desc + +c = C(x=b'x') +C(x=1) # E: Argument "x" to "C" has incompatible type "int"; expected "bytes" +reveal_type(c.x) # N: Revealed type is "builtins.str" +reveal_type(C.x) # N: Revealed type is "builtins.int" +c.x = b'x' +c.x = 1 # E: Incompatible types in assignment (expression has type "int", variable has type "bytes") + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[case testDataclassTransformUnsupportedDescriptors] +# flags: --python-version 3.11 + +from typing import dataclass_transform, overload, Any + +@dataclass_transform() +def my_dataclass(cls): ... + +class Desc: + @overload + def __get__(self, instance: None, owner: Any) -> int: ... + @overload + def __get__(self, instance: object, owner: Any) -> str: ... + def __get__(self, instance, owner): ... + + def __set__(*args, **kwargs) -> None: ... + +class Desc2: + @overload + def __get__(self, instance: None, owner: Any) -> int: ... + @overload + def __get__(self, instance: object, owner: Any) -> str: ... + def __get__(self, instance, owner): ... + + @overload + def __set__(self, instance: Any, value: bytes) -> None: ... + @overload + def __set__(self) -> None: ... + def __set__(self, *args, **kawrga) -> None: ... + +@my_dataclass +class C: + x: Desc # E: Unsupported signature for "__set__" in "Desc" + y: Desc2 # E: Unsupported "__set__" in "Desc2" +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] diff --git a/test-data/unit/check-dataclasses.test b/test-data/unit/check-dataclasses.test index 4d85be391186..da0b7feb4831 100644 --- a/test-data/unit/check-dataclasses.test +++ b/test-data/unit/check-dataclasses.test @@ -2001,3 +2001,39 @@ class Bar(Foo): ... e: Element[Bar] reveal_type(e.elements) # N: Revealed type is "typing.Sequence[__main__.Element[__main__.Bar]]" [builtins fixtures/dataclasses.pyi] + + +[case testIfConditionsInDefinition] +# flags: --python-version 3.11 --always-true TRUTH +from dataclasses import dataclass +from typing import TYPE_CHECKING + +TRUTH = False # Is set to --always-true + +@dataclass +class Foo: + if TYPE_CHECKING: + present_1: int + else: + skipped_1: int + if True: # Mypy does not know if it is True or False, so the block is used + present_2: int + if False: # Mypy does not know if it is True or False, so the block is used + present_3: int + if not TRUTH: + skipped_2: int + elif 123: + present_4: int + elif TRUTH: + present_5: int + else: + skipped_3: int + +Foo( + present_1=1, + present_2=2, + present_3=3, + present_4=4, + present_5=5, +) +[builtins fixtures/dataclasses.pyi] diff --git a/test-data/unit/check-errorcodes.test b/test-data/unit/check-errorcodes.test index 8b3567ab7cf6..124d6952fe5f 100644 --- a/test-data/unit/check-errorcodes.test +++ b/test-data/unit/check-errorcodes.test @@ -960,7 +960,11 @@ def f(d: D, s: str) -> None: [typing fixtures/typing-typeddict.pyi] [case testRecommendErrorCode] -# type: ignore[whatever] # E: type ignore with error code is not supported for modules; use `# mypy: disable-error-code=...` [syntax] +# type: ignore[whatever] # E: type ignore with error code is not supported for modules; use `# mypy: disable-error-code="whatever"` [syntax] +1 + "asdf" + +[case testRecommendErrorCode2] +# type: ignore[whatever, other] # E: type ignore with error code is not supported for modules; use `# mypy: disable-error-code="whatever, other"` [syntax] 1 + "asdf" [case testShowErrorCodesInConfig] diff --git a/test-data/unit/check-expressions.test b/test-data/unit/check-expressions.test index 49a3f0d4aaa7..c7053ad9b014 100644 --- a/test-data/unit/check-expressions.test +++ b/test-data/unit/check-expressions.test @@ -1115,11 +1115,28 @@ o[:] # E: Value of type "object" is not indexable [case testNonIntSliceBounds] from typing import Any -a, o = None, None # type: (Any, object) -a[o:1] # E: Slice index must be an integer or None -a[1:o] # E: Slice index must be an integer or None -a[o:] # E: Slice index must be an integer or None -a[:o] # E: Slice index must be an integer or None +a: Any +o: object +a[o:1] # E: Slice index must be an integer, SupportsIndex or None +a[1:o] # E: Slice index must be an integer, SupportsIndex or None +a[o:] # E: Slice index must be an integer, SupportsIndex or None +a[:o] # E: Slice index must be an integer, SupportsIndex or None +[builtins fixtures/slice.pyi] + +[case testSliceSupportsIndex] +import typing_extensions +class Index: + def __init__(self, value: int) -> None: + self.value = value + def __index__(self) -> int: + return self.value + +c = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] +reveal_type(c[Index(0):Index(5)]) # N: Revealed type is "builtins.list[builtins.int]" +[file typing_extensions.pyi] +from typing import Protocol +class SupportsIndex(Protocol): + def __index__(self) -> int: ... [builtins fixtures/slice.pyi] [case testNoneSliceBounds] diff --git a/test-data/unit/check-functions.test b/test-data/unit/check-functions.test index c23bbb77f643..b76abd31e3dc 100644 --- a/test-data/unit/check-functions.test +++ b/test-data/unit/check-functions.test @@ -491,62 +491,61 @@ if int(): [case testDefaultArgumentExpressions] import typing +class B: pass +class A: pass + def f(x: 'A' = A()) -> None: b = x # type: B # E: Incompatible types in assignment (expression has type "A", variable has type "B") a = x # type: A - -class B: pass -class A: pass [out] [case testDefaultArgumentExpressions2] import typing -def f(x: 'A' = B()) -> None: # E: Incompatible default for argument "x" (default has type "B", argument has type "A") - b = x # type: B # E: Incompatible types in assignment (expression has type "A", variable has type "B") - a = x # type: A - class B: pass class A: pass +def f(x: 'A' = B()) -> None: # E: Incompatible default for argument "x" (default has type "B", argument has type "A") + b = x # type: B # E: Incompatible types in assignment (expression has type "A", variable has type "B") + a = x # type: A [case testDefaultArgumentExpressionsGeneric] from typing import TypeVar T = TypeVar('T', bound='A') -def f(x: T = B()) -> None: # E: Incompatible default for argument "x" (default has type "B", argument has type "T") - b = x # type: B # E: Incompatible types in assignment (expression has type "T", variable has type "B") - a = x # type: A class B: pass class A: pass +def f(x: T = B()) -> None: # E: Incompatible default for argument "x" (default has type "B", argument has type "T") + b = x # type: B # E: Incompatible types in assignment (expression has type "T", variable has type "B") + a = x # type: A [case testDefaultArgumentsWithSubtypes] import typing +class A: pass +class B(A): pass + def f(x: 'B' = A()) -> None: # E: Incompatible default for argument "x" (default has type "A", argument has type "B") pass def g(x: 'A' = B()) -> None: pass - -class A: pass -class B(A): pass [out] [case testMultipleDefaultArgumentExpressions] import typing +class A: pass +class B: pass + def f(x: 'A' = B(), y: 'B' = B()) -> None: # E: Incompatible default for argument "x" (default has type "B", argument has type "A") pass def h(x: 'A' = A(), y: 'B' = B()) -> None: pass - -class A: pass -class B: pass [out] [case testMultipleDefaultArgumentExpressions2] import typing -def g(x: 'A' = A(), y: 'B' = A()) -> None: # E: Incompatible default for argument "y" (default has type "A", argument has type "B") - pass - class A: pass class B: pass + +def g(x: 'A' = A(), y: 'B' = A()) -> None: # E: Incompatible default for argument "y" (default has type "A", argument has type "B") + pass [out] [case testDefaultArgumentsAndSignatureAsComment] @@ -2612,7 +2611,7 @@ def f() -> int: ... [case testLambdaDefaultTypeErrors] lambda a=(1 + 'asdf'): a # E: Unsupported operand types for + ("int" and "str") lambda a=nonsense: a # E: Name "nonsense" is not defined -def f(x: int = i): # E: Name "i" is not defined # E: Name "i" is used before definition +def f(x: int = i): # E: Name "i" is not defined i = 42 [case testRevealTypeOfCallExpressionReturningNoneWorks] diff --git a/test-data/unit/check-incremental.test b/test-data/unit/check-incremental.test index ec0c5d5e4805..df02d73d4ded 100644 --- a/test-data/unit/check-incremental.test +++ b/test-data/unit/check-incremental.test @@ -6403,3 +6403,29 @@ def g(d: Dict[TValue]) -> TValue: tmp/b.py:6: error: TypedDict "a.Dict[TValue]" has no key "x" [out2] tmp/b.py:6: error: TypedDict "a.Dict[TValue]" has no key "y" + +[case testParamSpecNoCrash] +import m +[file m.py] +from typing import Callable, TypeVar +from lib import C + +T = TypeVar("T") +def test(x: Callable[..., T]) -> T: ... +test(C) # type: ignore + +[file m.py.2] +from typing import Callable, TypeVar +from lib import C + +T = TypeVar("T") +def test(x: Callable[..., T]) -> T: ... +test(C) # type: ignore +# touch +[file lib.py] +from typing import ParamSpec, Generic, Callable + +P = ParamSpec("P") +class C(Generic[P]): + def __init__(self, fn: Callable[P, int]) -> None: ... +[builtins fixtures/dict.pyi] diff --git a/test-data/unit/check-inference.test b/test-data/unit/check-inference.test index fc8113766f1a..2dc19d319a0d 100644 --- a/test-data/unit/check-inference.test +++ b/test-data/unit/check-inference.test @@ -270,6 +270,120 @@ def f() -> None: class A: pass [out] +[case testClassObjectsNotUnpackableWithoutIterableMetaclass] +from typing import Type + +class Foo: ... +A: Type[Foo] = Foo +a, b = Foo # E: "Type[Foo]" object is not iterable +c, d = A # E: "Type[Foo]" object is not iterable + +class Meta(type): ... +class Bar(metaclass=Meta): ... +B: Type[Bar] = Bar +e, f = Bar # E: "Type[Bar]" object is not iterable +g, h = B # E: "Type[Bar]" object is not iterable + +reveal_type(a) # E: Cannot determine type of "a" # N: Revealed type is "Any" +reveal_type(b) # E: Cannot determine type of "b" # N: Revealed type is "Any" +reveal_type(c) # E: Cannot determine type of "c" # N: Revealed type is "Any" +reveal_type(d) # E: Cannot determine type of "d" # N: Revealed type is "Any" +reveal_type(e) # E: Cannot determine type of "e" # N: Revealed type is "Any" +reveal_type(f) # E: Cannot determine type of "f" # N: Revealed type is "Any" +reveal_type(g) # E: Cannot determine type of "g" # N: Revealed type is "Any" +reveal_type(h) # E: Cannot determine type of "h" # N: Revealed type is "Any" +[out] + +[case testInferringLvarTypesUnpackedFromIterableClassObject] +from typing import Iterator, Type, TypeVar, Union, overload +class Meta(type): + def __iter__(cls) -> Iterator[int]: + yield from [1, 2, 3] + +class Meta2(type): + def __iter__(cls) -> Iterator[str]: + yield from ["foo", "bar", "baz"] + +class Meta3(type): ... + +class Foo(metaclass=Meta): ... +class Bar(metaclass=Meta2): ... +class Baz(metaclass=Meta3): ... +class Spam: ... + +class Eggs(metaclass=Meta): + @overload + def __init__(self, x: int) -> None: ... + @overload + def __init__(self, x: int, y: int, z: int) -> None: ... + def __init__(self, x: int, y: int = ..., z: int = ...) -> None: ... + +A: Type[Foo] = Foo +B: Type[Union[Foo, Bar]] = Foo +C: Union[Type[Foo], Type[Bar]] = Foo +D: Type[Union[Foo, Baz]] = Foo +E: Type[Union[Foo, Spam]] = Foo +F: Type[Eggs] = Eggs +G: Type[Union[Foo, Eggs]] = Foo + +a, b, c = Foo +d, e, f = A +g, h, i = B +j, k, l = C +m, n, o = D # E: "Type[Baz]" object is not iterable +p, q, r = E # E: "Type[Spam]" object is not iterable +s, t, u = Eggs +v, w, x = F +y, z, aa = G + +for var in [a, b, c, d, e, f, s, t, u, v, w, x, y, z, aa]: + reveal_type(var) # N: Revealed type is "builtins.int" + +for var2 in [g, h, i, j, k, l]: + reveal_type(var2) # N: Revealed type is "Union[builtins.int, builtins.str]" + +for var3 in [m, n, o, p, q, r]: + reveal_type(var3) # N: Revealed type is "Union[builtins.int, Any]" + +T = TypeVar("T", bound=Type[Foo]) + +def check(x: T) -> T: + a, b, c = x + for var in [a, b, c]: + reveal_type(var) # N: Revealed type is "builtins.int" + return x + +T2 = TypeVar("T2", bound=Type[Union[Foo, Bar]]) + +def check2(x: T2) -> T2: + a, b, c = x + for var in [a, b, c]: + reveal_type(var) # N: Revealed type is "Union[builtins.int, builtins.str]" + return x + +T3 = TypeVar("T3", bound=Union[Type[Foo], Type[Bar]]) + +def check3(x: T3) -> T3: + a, b, c = x + for var in [a, b, c]: + reveal_type(var) # N: Revealed type is "Union[builtins.int, builtins.str]" + return x +[out] + +[case testInferringLvarTypesUnpackedFromIterableClassObjectWithGenericIter] +from typing import Iterator, Type, TypeVar + +T = TypeVar("T") +class Meta(type): + def __iter__(self: Type[T]) -> Iterator[T]: ... +class Foo(metaclass=Meta): ... + +A, B, C = Foo +reveal_type(A) # N: Revealed type is "__main__.Foo" +reveal_type(B) # N: Revealed type is "__main__.Foo" +reveal_type(C) # N: Revealed type is "__main__.Foo" +[out] + [case testInferringLvarTypesInMultiDefWithInvalidTuple] from typing import Tuple t = None # type: Tuple[object, object, object] @@ -380,6 +494,23 @@ class Nums(Iterable[int]): def __iter__(self): pass def __next__(self): pass a, b = Nums() +reveal_type(a) # N: Revealed type is "builtins.int" +reveal_type(b) # N: Revealed type is "builtins.int" +if int(): + a = b = 1 +if int(): + a = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") +if int(): + b = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") +[builtins fixtures/for.pyi] + +[case testInferringTypesFromIterableStructuralSubtyping1] +from typing import Iterator +class Nums: + def __iter__(self) -> Iterator[int]: pass +a, b = Nums() +reveal_type(a) # N: Revealed type is "builtins.int" +reveal_type(b) # N: Revealed type is "builtins.int" if int(): a = b = 1 if int(): @@ -388,6 +519,22 @@ if int(): b = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") [builtins fixtures/for.pyi] +[case testInferringTypesFromIterableStructuralSubtyping2] +from typing import Self +class Nums: + def __iter__(self) -> Self: pass + def __next__(self) -> int: pass +a, b = Nums() +reveal_type(a) # N: Revealed type is "builtins.int" +reveal_type(b) # N: Revealed type is "builtins.int" +if int(): + a = b = 1 +if int(): + a = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") +if int(): + b = '' # E: Incompatible types in assignment (expression has type "str", variable has type "int") +[builtins fixtures/tuple.pyi] + -- Type variable inference for generic functions -- --------------------------------------------- diff --git a/test-data/unit/check-inline-config.test b/test-data/unit/check-inline-config.test index 1b2085e33e91..ee847deabd40 100644 --- a/test-data/unit/check-inline-config.test +++ b/test-data/unit/check-inline-config.test @@ -210,3 +210,12 @@ enable_error_code = ignore-without-code, truthy-bool \[mypy-tests.*] disable_error_code = ignore-without-code + +[case testInlineErrorCodesMultipleCodes] +# mypy: disable-error-code="truthy-bool, ignore-without-code" +class Foo: + pass + +foo = Foo() +if foo: ... +42 + "no" # type: ignore diff --git a/test-data/unit/check-multiple-inheritance.test b/test-data/unit/check-multiple-inheritance.test index a8d053f9504e..d03f2e35e1c4 100644 --- a/test-data/unit/check-multiple-inheritance.test +++ b/test-data/unit/check-multiple-inheritance.test @@ -668,3 +668,41 @@ class D1(B[str], C1): ... class D2(B[Union[int, str]], C2): ... class D3(C2, B[str]): ... class D4(B[str], C2): ... # E: Definition of "foo" in base class "A" is incompatible with definition in base class "C2" + + +[case testMultipleInheritanceOverridingOfFunctionsWithCallableInstances] +from typing import Any, Callable + +def dec1(f: Callable[[Any, int], None]) -> Callable[[Any, int], None]: ... + +class F: + def __call__(self, x: int) -> None: ... + +def dec2(f: Callable[[Any, int], None]) -> F: ... + +class B1: + def f(self, x: int) -> None: ... + +class B2: + @dec1 + def f(self, x: int) -> None: ... + +class B3: + @dec2 + def f(self, x: int) -> None: ... + +class B4: + f = F() + +class C12(B1, B2): ... +class C13(B1, B3): ... # E: Definition of "f" in base class "B1" is incompatible with definition in base class "B3" +class C14(B1, B4): ... # E: Definition of "f" in base class "B1" is incompatible with definition in base class "B4" +class C21(B2, B1): ... +class C23(B2, B3): ... # E: Definition of "f" in base class "B2" is incompatible with definition in base class "B3" +class C24(B2, B4): ... # E: Definition of "f" in base class "B2" is incompatible with definition in base class "B4" +class C31(B3, B1): ... +class C32(B3, B2): ... +class C34(B3, B4): ... +class C41(B4, B1): ... +class C42(B4, B2): ... +class C43(B4, B3): ... diff --git a/test-data/unit/check-parameter-specification.test b/test-data/unit/check-parameter-specification.test index 56fc3b6faa14..7ef0485f7841 100644 --- a/test-data/unit/check-parameter-specification.test +++ b/test-data/unit/check-parameter-specification.test @@ -1471,3 +1471,53 @@ def test(f: Concat[T, ...]) -> None: ... class Defer: ... [builtins fixtures/paramspec.pyi] + +[case testNoParamSpecDoubling] +# https://github.com/python/mypy/issues/12734 +from typing import Callable, ParamSpec +from typing_extensions import Concatenate + +P = ParamSpec("P") +Q = ParamSpec("Q") + +def foo(f: Callable[P, int]) -> Callable[P, int]: + return f + +def bar(f: Callable[Concatenate[str, Q], int]) -> Callable[Concatenate[str, Q], int]: + return foo(f) +[builtins fixtures/paramspec.pyi] + +[case testAlreadyExpandedCallableWithParamSpecReplacement] +from typing import Callable, Any, overload +from typing_extensions import Concatenate, ParamSpec + +P = ParamSpec("P") + +@overload +def command() -> Callable[[Callable[Concatenate[object, object, P], object]], None]: # E: Overloaded function signatures 1 and 2 overlap with incompatible return types + ... + +@overload +def command( + cls: int = ..., +) -> Callable[[Callable[Concatenate[object, P], object]], None]: + ... + +def command( + cls: int = 42, +) -> Any: + ... +[builtins fixtures/paramspec.pyi] + +[case testCopiedParamSpecComparison] +# minimized from https://github.com/python/mypy/issues/12909 +from typing import Callable +from typing_extensions import ParamSpec + +P = ParamSpec("P") + +def identity(func: Callable[P, None]) -> Callable[P, None]: ... + +@identity +def f(f: Callable[P, None], *args: P.args, **kwargs: P.kwargs) -> None: ... +[builtins fixtures/paramspec.pyi] diff --git a/test-data/unit/check-possibly-undefined.test b/test-data/unit/check-possibly-undefined.test index 29c4868e97af..ebceef88b537 100644 --- a/test-data/unit/check-possibly-undefined.test +++ b/test-data/unit/check-possibly-undefined.test @@ -210,7 +210,6 @@ def f0() -> None: y = x x = 1 # No error. - [case testGlobalDeclarationAfterUsage] # flags: --enable-error-code possibly-undefined --enable-error-code used-before-def def f0() -> None: @@ -219,6 +218,7 @@ def f0() -> None: x = 1 # No error. x = 2 + [case testVarDefinedInOuterScope] # flags: --enable-error-code possibly-undefined --enable-error-code used-before-def def f0() -> None: @@ -227,6 +227,7 @@ def f0() -> None: f0() x = 1 + [case testDefinedInOuterScopeNoError] # flags: --enable-error-code possibly-undefined --enable-error-code used-before-def def foo() -> None: @@ -234,6 +235,51 @@ def foo() -> None: def bar() -> None: foo() + +[case testClassFromOuterScopeRedefined] +# flags: --enable-error-code possibly-undefined --enable-error-code used-before-def +class c: pass + +def f0() -> None: + s = c() # E: Name "c" is used before definition + class c: pass + +def f1() -> None: + s = c() # No error. + +def f2() -> None: + s = c() # E: Name "c" is used before definition + if int(): + class c: pass + +glob = c() +def f3(x: c = glob) -> None: + glob = 123 + +[case testVarFromOuterScopeRedefined] +# flags: --enable-error-code possibly-undefined --enable-error-code used-before-def +x = 0 + +def f0() -> None: + y = x # E: Name "x" is used before definition + x = 0 + +def f1() -> None: + y = x # No error. + +def f2() -> None: + y = x # E: Name "x" is used before definition + global x + +def f3() -> None: + global x + y = x # No error. + +def f4() -> None: + if int(): + x = 0 + y = x # E: Name "x" may be undefined + [case testFuncParams] # flags: --enable-error-code possibly-undefined def foo(a: int) -> None: @@ -829,67 +875,56 @@ def f4() -> None: x = z # E: Name "z" is used before definition z: int = 2 -[case testUsedBeforeDefImportsBasic] -# flags: --enable-error-code used-before-def +[case testUsedBeforeDefImportsBasicImportNoError] +# flags: --enable-error-code used-before-def --enable-error-code possibly-undefined --disable-error-code no-redef import foo # type: ignore -import x.y # type: ignore -def f0() -> None: - a = foo # No error. - foo: int = 1 +a = foo # No error. +foo: int = 1 -def f1() -> None: - a = y # E: Name "y" is used before definition - y: int = 1 +[case testUsedBeforeDefImportsDotImport] +# flags: --enable-error-code used-before-def --enable-error-code possibly-undefined --disable-error-code no-redef +import x.y # type: ignore -def f2() -> None: - a = x # No error. - x: int = 1 +a = y # E: Name "y" is used before definition +y: int = 1 -def f3() -> None: - a = x.y # No error. - x: int = 1 +b = x # No error. +x: int = 1 + +c = x.y # No error. +x: int = 1 [case testUsedBeforeDefImportBasicRename] -# flags: --enable-error-code used-before-def +# flags: --enable-error-code used-before-def --disable-error-code=no-redef import x.y as z # type: ignore from typing import Any -def f0() -> None: - a = z # No error. - z: int = 1 - -def f1() -> None: - a = x # E: Name "x" is used before definition - x: int = 1 +a = z # No error. +z: int = 1 -def f2() -> None: - a = x.y # E: Name "x" is used before definition - x: Any = 1 +a = x # E: Name "x" is used before definition +x: int = 1 -def f3() -> None: - a = y # E: Name "y" is used before definition - y: int = 1 +a = y # E: Name "y" is used before definition +y: int = 1 [case testUsedBeforeDefImportFrom] -# flags: --enable-error-code used-before-def +# flags: --enable-error-code used-before-def --disable-error-code no-redef from foo import x # type: ignore -def f0() -> None: - a = x # No error. - x: int = 1 +a = x # No error. +x: int = 1 [case testUsedBeforeDefImportFromRename] -# flags: --enable-error-code used-before-def +# flags: --enable-error-code used-before-def --disable-error-code no-redef from foo import x as y # type: ignore -def f0() -> None: - a = y # No error. - y: int = 1 +a = y # No error. +y: int = 1 -def f1() -> None: - a = x # E: Name "x" is used before definition - x: int = 1 +a = x # E: Name "x" is used before definition +x: int = 1 [case testUsedBeforeDefFunctionDeclarations] # flags: --enable-error-code used-before-def @@ -901,14 +936,37 @@ def f0() -> None: inner() # No error. inner = lambda: None -[case testUsedBeforeDefBuiltins] +[case testUsedBeforeDefBuiltinsFunc] # flags: --enable-error-code used-before-def def f0() -> None: - s = type(123) + s = type(123) # E: Name "type" is used before definition type = "abc" a = type +def f1() -> None: + s = type(123) + +[case testUsedBeforeDefBuiltinsGlobal] +# flags: --enable-error-code used-before-def + +s = type(123) +type = "abc" +a = type + +[case testUsedBeforeDefBuiltinsClass] +# flags: --enable-error-code used-before-def + +class C: + s = type + type = s + +[case testUsedBeforeDefBuiltinsGenerator] +# flags: --enable-error-code used-before-def + +def f0() -> None: + _ = [type for type in [type("a"), type(1)]] + [case testUsedBeforeDefBuiltinsMultipass] # flags: --enable-error-code used-before-def diff --git a/test-data/unit/check-protocols.test b/test-data/unit/check-protocols.test index c787b34bf26b..3b8e668c7546 100644 --- a/test-data/unit/check-protocols.test +++ b/test-data/unit/check-protocols.test @@ -3998,3 +3998,27 @@ TF = TypeVar("TF", bound=Foo) def outer(cls: Type[TF]) -> TF: reveal_type(test(cls)) # N: Revealed type is "TF`-1" return cls() + +[case testProtocolImportNotMember] +import m +import lib + +class Bad: + x: int +class Good: + x: lib.C + +x: m.P = Bad() # E: Incompatible types in assignment (expression has type "Bad", variable has type "P") \ + # N: Following member(s) of "Bad" have conflicts: \ + # N: x: expected "C", got "int" +x = Good() + +[file m.py] +from typing import Protocol + +class P(Protocol): + import lib + x: lib.C + +[file lib.py] +class C: ... diff --git a/test-data/unit/check-python310.test b/test-data/unit/check-python310.test index 7a934348aaf2..fb83dda7ffab 100644 --- a/test-data/unit/check-python310.test +++ b/test-data/unit/check-python310.test @@ -1841,3 +1841,25 @@ class D: pass X = None | C Y = None | D [builtins fixtures/type.pyi] + +[case testMatchStatementWalrus] +class A: + a = 1 + +def returns_a_or_none() -> A | None: + return A() + +def returns_a() -> A: + return A() + +def f() -> None: + match x := returns_a_or_none(): + case A(): + reveal_type(x.a) # N: Revealed type is "builtins.int" + match x := returns_a(): + case A(): + reveal_type(x.a) # N: Revealed type is "builtins.int" + y = returns_a_or_none() + match y: + case A(): + reveal_type(y.a) # N: Revealed type is "builtins.int" diff --git a/test-data/unit/check-selftype.test b/test-data/unit/check-selftype.test index 555cef3641f8..752de3741456 100644 --- a/test-data/unit/check-selftype.test +++ b/test-data/unit/check-selftype.test @@ -208,6 +208,30 @@ class J(A[int]): [builtins fixtures/tuple.pyi] +[case testSelfTypeOverrideCompatibilityGeneric] +from typing import TypeVar, Generic, overload + +T = TypeVar("T", str, int, None) + +class A(Generic[T]): + @overload + def f(self, s: T) -> T: ... + @overload + def f(self: A[str], s: bytes) -> str: ... + def f(self, s: object): ... + +class B(A[int]): + def f(self, s: int) -> int: ... + +class C(A[None]): + def f(self, s: int) -> int: ... # E: Signature of "f" incompatible with supertype "A" \ + # N: Superclass: \ + # N: @overload \ + # N: def f(self, s: None) -> None \ + # N: Subclass: \ + # N: def f(self, s: int) -> int +[builtins fixtures/tuple.pyi] + [case testSelfTypeOverrideCompatibilityTypeVar-xfail] from typing import overload, TypeVar, Union diff --git a/test-data/unit/check-tuples.test b/test-data/unit/check-tuples.test index 266bfbf97888..e843532a2560 100644 --- a/test-data/unit/check-tuples.test +++ b/test-data/unit/check-tuples.test @@ -1248,7 +1248,7 @@ t = (0, "") x = 0 y = "" reveal_type(t[x:]) # N: Revealed type is "builtins.tuple[Union[builtins.int, builtins.str], ...]" -t[y:] # E: Slice index must be an integer or None +t[y:] # E: Slice index must be an integer, SupportsIndex or None [builtins fixtures/tuple.pyi] [case testInferTupleTypeFallbackAgainstInstance] diff --git a/test-data/unit/check-type-aliases.test b/test-data/unit/check-type-aliases.test index d7cccd2d6ba6..9dd56ad309f3 100644 --- a/test-data/unit/check-type-aliases.test +++ b/test-data/unit/check-type-aliases.test @@ -1028,3 +1028,18 @@ RHSAlias3: type = tuple[int, ...] WrongTypeElement = str | tuple[float, 1] # E: Invalid type: try using Literal[1] instead? WrongEllipsis = str | tuple[float, float, ...] # E: Unexpected "..." [builtins fixtures/tuple.pyi] + +[case testCompiledNoCrashOnSingleItemUnion] +# flags: --no-strict-optional +from typing import Callable, Union, Generic, TypeVar + +Alias = Callable[[], int] + +T = TypeVar("T") +class C(Generic[T]): + attr: Union[Alias, None] = None + + @classmethod + def test(cls) -> None: + cls.attr +[builtins fixtures/classmethod.pyi] diff --git a/test-data/unit/check-typeddict.test b/test-data/unit/check-typeddict.test index e3d6188b643b..970dc05b488d 100644 --- a/test-data/unit/check-typeddict.test +++ b/test-data/unit/check-typeddict.test @@ -9,7 +9,7 @@ reveal_type(p) # N: Revealed type is "TypedDict('__main__.Point', {'x': builtin reveal_type(p.values()) # N: Revealed type is "typing.Iterable[builtins.object]" [builtins fixtures/dict.pyi] [typing fixtures/typing-typeddict.pyi] -[targets sys, __main__] +[targets __main__] [case testCanCreateTypedDictInstanceWithDictCall] from mypy_extensions import TypedDict diff --git a/test-data/unit/check-unreachable-code.test b/test-data/unit/check-unreachable-code.test index 48459dd8941a..6522391899de 100644 --- a/test-data/unit/check-unreachable-code.test +++ b/test-data/unit/check-unreachable-code.test @@ -1423,3 +1423,27 @@ def f(value: None) -> None: x = force_forward_ref() [builtins fixtures/exception.pyi] + +[case testSetitemNoReturn] +# flags: --warn-unreachable +from typing import NoReturn +class Foo: + def __setitem__(self, key: str, value: str) -> NoReturn: + raise Exception +Foo()['a'] = 'a' +x = 0 # E: Statement is unreachable +[builtins fixtures/exception.pyi] + +[case TestNoImplicNoReturnFromError] +# flags: --warn-unreachable +from typing import TypeVar + +T = TypeVar("T") +class Foo: + def __setitem__(self, key: str, value: str) -> T: # E: A function returning TypeVar should receive at least one argument containing the same TypeVar + raise Exception + +def f() -> None: + Foo()['a'] = 'a' + x = 0 # This should not be reported as unreachable +[builtins fixtures/exception.pyi] \ No newline at end of file diff --git a/test-data/unit/deps-types.test b/test-data/unit/deps-types.test index 1d7064cde0c7..def117fe04df 100644 --- a/test-data/unit/deps-types.test +++ b/test-data/unit/deps-types.test @@ -852,8 +852,6 @@ class I: pass -> a -> , a, mod.I -> a - -> sys - -> sys [case testAliasDepsClassInFunction] from mod import I diff --git a/test-data/unit/fine-grained-dataclass-transform.test b/test-data/unit/fine-grained-dataclass-transform.test new file mode 100644 index 000000000000..3abba0892ae8 --- /dev/null +++ b/test-data/unit/fine-grained-dataclass-transform.test @@ -0,0 +1,134 @@ +[case updateDataclassTransformParameterViaDecorator] +# flags: --python-version 3.11 +from m import my_dataclass + +@my_dataclass +class Foo: + x: int + +foo = Foo(1) +foo.x = 2 + +[file m.py] +from typing import dataclass_transform + +@dataclass_transform(frozen_default=False) +def my_dataclass(cls): return cls + +[file m.py.2] +from typing import dataclass_transform + +@dataclass_transform(frozen_default=True) +def my_dataclass(cls): return cls + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[out] +== +main:9: error: Property "x" defined in "Foo" is read-only + +[case updateDataclassTransformParameterViaParentClass] +# flags: --python-version 3.11 +from m import Dataclass + +class Foo(Dataclass): + x: int + +foo = Foo(1) +foo.x = 2 + +[file m.py] +from typing import dataclass_transform + +@dataclass_transform(frozen_default=False) +class Dataclass: ... + +[file m.py.2] +from typing import dataclass_transform + +@dataclass_transform(frozen_default=True) +class Dataclass: ... + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[out] +== +main:8: error: Property "x" defined in "Foo" is read-only + +[case updateBaseClassToUseDataclassTransform] +# flags: --python-version 3.11 +from m import A + +class B(A): + y: int + +B(x=1, y=2) + +[file m.py] +class Dataclass: ... + +class A(Dataclass): + x: int + +[file m.py.2] +from typing import dataclass_transform + +@dataclass_transform() +class Dataclass: ... + +class A(Dataclass): + x: int + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[out] +main:7: error: Unexpected keyword argument "x" for "B" +builtins.pyi:12: note: "B" defined here +main:7: error: Unexpected keyword argument "y" for "B" +builtins.pyi:12: note: "B" defined here +== + +[case frozenStays] +# flags: --python-version 3.11 +from foo import Foo + +foo = Foo(base=0, foo=1) + +[file transform.py] +from typing import dataclass_transform, Type + +@dataclass_transform(frozen_default=True) +def dataclass(cls: Type) -> Type: return cls + +[file base.py] +from transform import dataclass + +@dataclass +class Base: + base: int + +[file foo.py] +from base import Base +from transform import dataclass + +@dataclass +class Foo(Base): + foo: int + +[file foo.py.2] +from base import Base +from transform import dataclass + +@dataclass +class Foo(Base): + foo: int + bar: int = 0 + +[typing fixtures/typing-full.pyi] +[builtins fixtures/dataclasses.pyi] + +[out] +== diff --git a/test-data/unit/fixtures/slice.pyi b/test-data/unit/fixtures/slice.pyi index b5a4549da068..b22a12b5213f 100644 --- a/test-data/unit/fixtures/slice.pyi +++ b/test-data/unit/fixtures/slice.pyi @@ -15,3 +15,5 @@ class str: pass class slice: pass class ellipsis: pass class dict: pass +class list(Generic[T]): + def __getitem__(self, x: slice) -> list[T]: pass diff --git a/test-data/unit/lib-stub/attr/__init__.pyi b/test-data/unit/lib-stub/attr/__init__.pyi index 795e5d3f4f69..1a3838aa3ab1 100644 --- a/test-data/unit/lib-stub/attr/__init__.pyi +++ b/test-data/unit/lib-stub/attr/__init__.pyi @@ -244,3 +244,6 @@ def field( order: Optional[bool] = ..., on_setattr: Optional[object] = ..., ) -> Any: ... + +def evolve(inst: _T, **changes: Any) -> _T: ... +def assoc(inst: _T, **changes: Any) -> _T: ... diff --git a/test-data/unit/lib-stub/attrs/__init__.pyi b/test-data/unit/lib-stub/attrs/__init__.pyi index d25774045132..8e9aa1fdced5 100644 --- a/test-data/unit/lib-stub/attrs/__init__.pyi +++ b/test-data/unit/lib-stub/attrs/__init__.pyi @@ -126,3 +126,6 @@ def field( order: Optional[bool] = ..., on_setattr: Optional[object] = ..., ) -> Any: ... + +def evolve(inst: _T, **changes: Any) -> _T: ... +def assoc(inst: _T, **changes: Any) -> _T: ... diff --git a/test-data/unit/lib-stub/contextlib.pyi b/test-data/unit/lib-stub/contextlib.pyi index e2a0cccd562a..ca9e91cf4d65 100644 --- a/test-data/unit/lib-stub/contextlib.pyi +++ b/test-data/unit/lib-stub/contextlib.pyi @@ -1,6 +1,5 @@ -import sys -from typing import Generic, TypeVar, Callable, Iterator -from typing import ContextManager as ContextManager +from typing import AsyncIterator, Generic, TypeVar, Callable, Iterator +from typing import ContextManager as ContextManager, AsyncContextManager as AsyncContextManager _T = TypeVar('_T') @@ -11,7 +10,4 @@ class GeneratorContextManager(ContextManager[_T], Generic[_T]): def contextmanager(func: Callable[..., Iterator[_T]]) -> Callable[..., GeneratorContextManager[_T]]: ... -if sys.version_info >= (3, 7): - from typing import AsyncIterator - from typing import AsyncContextManager as AsyncContextManager - def asynccontextmanager(func: Callable[..., AsyncIterator[_T]]) -> Callable[..., AsyncContextManager[_T]]: ... +def asynccontextmanager(func: Callable[..., AsyncIterator[_T]]) -> Callable[..., AsyncContextManager[_T]]: ... diff --git a/test-data/unit/lib-stub/math.pyi b/test-data/unit/lib-stub/math.pyi new file mode 100644 index 000000000000..587b04a56de8 --- /dev/null +++ b/test-data/unit/lib-stub/math.pyi @@ -0,0 +1,16 @@ +pi: float +def sqrt(__x: float) -> float: ... +def sin(__x: float) -> float: ... +def cos(__x: float) -> float: ... +def tan(__x: float) -> float: ... +def exp(__x: float) -> float: ... +def log(__x: float) -> float: ... +def floor(__x: float) -> int: ... +def ceil(__x: float) -> int: ... +def fabs(__x: float) -> float: ... +def pow(__x: float, __y: float) -> float: ... +def copysign(__x: float, __y: float) -> float: ... +def isinf(__x: float) -> bool: ... +def isnan(__x: float) -> bool: ... +def isfinite(__x: float) -> bool: ... +def nextafter(__x: float, __y: float) -> float: ... diff --git a/test-data/unit/lib-stub/mypy_extensions.pyi b/test-data/unit/lib-stub/mypy_extensions.pyi index d79be8719417..56fac31e7219 100644 --- a/test-data/unit/lib-stub/mypy_extensions.pyi +++ b/test-data/unit/lib-stub/mypy_extensions.pyi @@ -3,7 +3,6 @@ from typing import ( Any, Dict, Type, TypeVar, Optional, Any, Generic, Mapping, NoReturn as NoReturn, Iterator, Union, Protocol ) -import sys _T = TypeVar('_T') _U = TypeVar('_U') @@ -33,8 +32,6 @@ class _TypedDict(Mapping[str, object]): # Mypy expects that 'default' has a type variable type. def pop(self, k: NoReturn, default: _T = ...) -> object: ... def update(self: _T, __m: _T) -> None: ... - if sys.version_info < (3, 0): - def has_key(self, k: str) -> bool: ... def __delitem__(self, k: NoReturn) -> None: ... def TypedDict(typename: str, fields: Dict[str, Type[_T]], *, total: Any = ...) -> Type[dict]: ... @@ -50,68 +47,67 @@ mypyc_attr: Any class FlexibleAlias(Generic[_T, _U]): ... -if sys.version_info >= (3, 0): - class __SupportsInt(Protocol[T_co]): - def __int__(self) -> int: pass - - _Int = Union[int, i32, i64] - - class i32: - def __init__(self, x: Union[_Int, str, bytes, SupportsInt], base: int = 10) -> None: ... - def __add__(self, x: i32) -> i32: ... - def __radd__(self, x: i32) -> i32: ... - def __sub__(self, x: i32) -> i32: ... - def __rsub__(self, x: i32) -> i32: ... - def __mul__(self, x: i32) -> i32: ... - def __rmul__(self, x: i32) -> i32: ... - def __floordiv__(self, x: i32) -> i32: ... - def __rfloordiv__(self, x: i32) -> i32: ... - def __mod__(self, x: i32) -> i32: ... - def __rmod__(self, x: i32) -> i32: ... - def __and__(self, x: i32) -> i32: ... - def __rand__(self, x: i32) -> i32: ... - def __or__(self, x: i32) -> i32: ... - def __ror__(self, x: i32) -> i32: ... - def __xor__(self, x: i32) -> i32: ... - def __rxor__(self, x: i32) -> i32: ... - def __lshift__(self, x: i32) -> i32: ... - def __rlshift__(self, x: i32) -> i32: ... - def __rshift__(self, x: i32) -> i32: ... - def __rrshift__(self, x: i32) -> i32: ... - def __neg__(self) -> i32: ... - def __invert__(self) -> i32: ... - def __pos__(self) -> i32: ... - def __lt__(self, x: i32) -> bool: ... - def __le__(self, x: i32) -> bool: ... - def __ge__(self, x: i32) -> bool: ... - def __gt__(self, x: i32) -> bool: ... - - class i64: - def __init__(self, x: Union[_Int, str, bytes, SupportsInt], base: int = 10) -> None: ... - def __add__(self, x: i64) -> i64: ... - def __radd__(self, x: i64) -> i64: ... - def __sub__(self, x: i64) -> i64: ... - def __rsub__(self, x: i64) -> i64: ... - def __mul__(self, x: i64) -> i64: ... - def __rmul__(self, x: i64) -> i64: ... - def __floordiv__(self, x: i64) -> i64: ... - def __rfloordiv__(self, x: i64) -> i64: ... - def __mod__(self, x: i64) -> i64: ... - def __rmod__(self, x: i64) -> i64: ... - def __and__(self, x: i64) -> i64: ... - def __rand__(self, x: i64) -> i64: ... - def __or__(self, x: i64) -> i64: ... - def __ror__(self, x: i64) -> i64: ... - def __xor__(self, x: i64) -> i64: ... - def __rxor__(self, x: i64) -> i64: ... - def __lshift__(self, x: i64) -> i64: ... - def __rlshift__(self, x: i64) -> i64: ... - def __rshift__(self, x: i64) -> i64: ... - def __rrshift__(self, x: i64) -> i64: ... - def __neg__(self) -> i64: ... - def __invert__(self) -> i64: ... - def __pos__(self) -> i64: ... - def __lt__(self, x: i64) -> bool: ... - def __le__(self, x: i64) -> bool: ... - def __ge__(self, x: i64) -> bool: ... - def __gt__(self, x: i64) -> bool: ... +class __SupportsInt(Protocol[T_co]): + def __int__(self) -> int: pass + +_Int = Union[int, i32, i64] + +class i32: + def __init__(self, x: Union[_Int, str, bytes, SupportsInt], base: int = 10) -> None: ... + def __add__(self, x: i32) -> i32: ... + def __radd__(self, x: i32) -> i32: ... + def __sub__(self, x: i32) -> i32: ... + def __rsub__(self, x: i32) -> i32: ... + def __mul__(self, x: i32) -> i32: ... + def __rmul__(self, x: i32) -> i32: ... + def __floordiv__(self, x: i32) -> i32: ... + def __rfloordiv__(self, x: i32) -> i32: ... + def __mod__(self, x: i32) -> i32: ... + def __rmod__(self, x: i32) -> i32: ... + def __and__(self, x: i32) -> i32: ... + def __rand__(self, x: i32) -> i32: ... + def __or__(self, x: i32) -> i32: ... + def __ror__(self, x: i32) -> i32: ... + def __xor__(self, x: i32) -> i32: ... + def __rxor__(self, x: i32) -> i32: ... + def __lshift__(self, x: i32) -> i32: ... + def __rlshift__(self, x: i32) -> i32: ... + def __rshift__(self, x: i32) -> i32: ... + def __rrshift__(self, x: i32) -> i32: ... + def __neg__(self) -> i32: ... + def __invert__(self) -> i32: ... + def __pos__(self) -> i32: ... + def __lt__(self, x: i32) -> bool: ... + def __le__(self, x: i32) -> bool: ... + def __ge__(self, x: i32) -> bool: ... + def __gt__(self, x: i32) -> bool: ... + +class i64: + def __init__(self, x: Union[_Int, str, bytes, SupportsInt], base: int = 10) -> None: ... + def __add__(self, x: i64) -> i64: ... + def __radd__(self, x: i64) -> i64: ... + def __sub__(self, x: i64) -> i64: ... + def __rsub__(self, x: i64) -> i64: ... + def __mul__(self, x: i64) -> i64: ... + def __rmul__(self, x: i64) -> i64: ... + def __floordiv__(self, x: i64) -> i64: ... + def __rfloordiv__(self, x: i64) -> i64: ... + def __mod__(self, x: i64) -> i64: ... + def __rmod__(self, x: i64) -> i64: ... + def __and__(self, x: i64) -> i64: ... + def __rand__(self, x: i64) -> i64: ... + def __or__(self, x: i64) -> i64: ... + def __ror__(self, x: i64) -> i64: ... + def __xor__(self, x: i64) -> i64: ... + def __rxor__(self, x: i64) -> i64: ... + def __lshift__(self, x: i64) -> i64: ... + def __rlshift__(self, x: i64) -> i64: ... + def __rshift__(self, x: i64) -> i64: ... + def __rrshift__(self, x: i64) -> i64: ... + def __neg__(self) -> i64: ... + def __invert__(self) -> i64: ... + def __pos__(self) -> i64: ... + def __lt__(self, x: i64) -> bool: ... + def __le__(self, x: i64) -> bool: ... + def __ge__(self, x: i64) -> bool: ... + def __gt__(self, x: i64) -> bool: ... diff --git a/test-data/unit/lib-stub/typing_extensions.pyi b/test-data/unit/lib-stub/typing_extensions.pyi index 22b895971521..759f956d314b 100644 --- a/test-data/unit/lib-stub/typing_extensions.pyi +++ b/test-data/unit/lib-stub/typing_extensions.pyi @@ -1,5 +1,5 @@ import typing -from typing import Any, Mapping, Iterator, NoReturn as NoReturn, Dict, Type +from typing import Any, Mapping, Iterable, Iterator, NoReturn as NoReturn, Dict, Tuple, Type from typing import TYPE_CHECKING as TYPE_CHECKING from typing import NewType as NewType, overload as overload @@ -50,6 +50,9 @@ class _TypedDict(Mapping[str, object]): # Mypy expects that 'default' has a type variable type. def pop(self, k: NoReturn, default: _T = ...) -> object: ... def update(self: _T, __m: _T) -> None: ... + def items(self) -> Iterable[Tuple[str, object]]: ... + def keys(self) -> Iterable[str]: ... + def values(self) -> Iterable[object]: ... if sys.version_info < (3, 0): def has_key(self, k: str) -> bool: ... def __delitem__(self, k: NoReturn) -> None: ... diff --git a/test-data/unit/merge.test b/test-data/unit/merge.test index 144a095440f2..42d38c89482c 100644 --- a/test-data/unit/merge.test +++ b/test-data/unit/merge.test @@ -1086,7 +1086,7 @@ a: A [file target.py.next] from _x import A a: A -[file _x.pyi] +[fixture _x.pyi] from typing import Generic, TypeVar, overload T = TypeVar('T') diff --git a/test-data/unit/pythoneval.test b/test-data/unit/pythoneval.test index fbbaecbba241..915d9b4921a2 100644 --- a/test-data/unit/pythoneval.test +++ b/test-data/unit/pythoneval.test @@ -268,7 +268,9 @@ def bin(f: IO[bytes]) -> None: txt(sys.stdout) bin(sys.stdout) [out] -_program.py:5: error: Argument 1 to "write" of "IO" has incompatible type "bytes"; expected "str" +_program.py:5: error: No overload variant of "write" of "IO" matches argument type "bytes" +_program.py:5: note: Possible overload variants: +_program.py:5: note: def write(self, str, /) -> int _program.py:10: error: Argument 1 to "bin" has incompatible type "TextIO"; expected "IO[bytes]" [case testBuiltinOpen] @@ -1878,6 +1880,23 @@ _testEnumIterMetaInference.py:8: note: Revealed type is "typing.Iterator[_E`-1]" _testEnumIterMetaInference.py:9: note: Revealed type is "_E`-1" _testEnumIterMetaInference.py:13: note: Revealed type is "socket.SocketKind" +[case testEnumUnpackedViaMetaclass] +from enum import Enum + +class FooEnum(Enum): + A = 1 + B = 2 + C = 3 + +a, b, c = FooEnum +reveal_type(a) +reveal_type(b) +reveal_type(c) +[out] +_testEnumUnpackedViaMetaclass.py:9: note: Revealed type is "_testEnumUnpackedViaMetaclass.FooEnum" +_testEnumUnpackedViaMetaclass.py:10: note: Revealed type is "_testEnumUnpackedViaMetaclass.FooEnum" +_testEnumUnpackedViaMetaclass.py:11: note: Revealed type is "_testEnumUnpackedViaMetaclass.FooEnum" + [case testNativeIntTypes] # Spot check various native int operations with full stubs. from mypy_extensions import i64, i32 @@ -1924,3 +1943,46 @@ _testStarUnpackNestedUnderscore.py:10: error: List item 0 has incompatible type _testStarUnpackNestedUnderscore.py:10: error: List item 1 has incompatible type "int"; expected "str" _testStarUnpackNestedUnderscore.py:11: note: Revealed type is "builtins.list[builtins.str]" _testStarUnpackNestedUnderscore.py:16: note: Revealed type is "builtins.list[builtins.object]" + +[case testStrictEqualitywithParamSpec] +# flags: --strict-equality +from typing import Generic +from typing_extensions import Concatenate, ParamSpec + +P = ParamSpec("P") + +class Foo(Generic[P]): ... +class Bar(Generic[P]): ... + +def bad(foo: Foo[[int]], bar: Bar[[int]]) -> bool: + return foo == bar + +def good1(foo1: Foo[[int]], foo2: Foo[[str]]) -> bool: + return foo1 == foo2 + +def good2(foo1: Foo[[int, str]], foo2: Foo[[int, bytes]]) -> bool: + return foo1 == foo2 + +def good3(foo1: Foo[[int]], foo2: Foo[[int, int]]) -> bool: + return foo1 == foo2 + +def good4(foo1: Foo[[int]], foo2: Foo[[int]]) -> bool: + return foo1 == foo2 + +def good5(foo1: Foo[[int]], foo2: Foo[[bool]]) -> bool: + return foo1 == foo2 + +def good6(foo1: Foo[[int, int]], foo2: Foo[[bool, bool]]) -> bool: + return foo1 == foo2 + +def good7(foo1: Foo[[int]], foo2: Foo[P], *args: P.args, **kwargs: P.kwargs) -> bool: + return foo1 == foo2 + +def good8(foo1: Foo[P], foo2: Foo[[int, str, bytes]], *args: P.args, **kwargs: P.kwargs) -> bool: + return foo1 == foo2 + +def good9(foo1: Foo[Concatenate[int, P]], foo2: Foo[[int, str, bytes]], *args: P.args, **kwargs: P.kwargs) -> bool: + return foo1 == foo2 + +[out] +_testStrictEqualitywithParamSpec.py:11: error: Non-overlapping equality check (left operand type: "Foo[[int]]", right operand type: "Bar[[int]]") diff --git a/test-data/unit/semanal-modules.test b/test-data/unit/semanal-modules.test index bc381293161f..116747ae5cb9 100644 --- a/test-data/unit/semanal-modules.test +++ b/test-data/unit/semanal-modules.test @@ -77,9 +77,9 @@ MypyFile:1( [case testImportMultiple] import _m, _n _m.x, _n.y -[file _m.py] +[fixture _m.py] x = 1 -[file _n.py] +[fixture _n.py] y = 2 [out] MypyFile:1( @@ -96,7 +96,7 @@ MypyFile:1( [case testImportAs] import _m as n n.x -[file _m.py] +[fixture _m.py] x = 1 [out] MypyFile:1( @@ -109,7 +109,7 @@ MypyFile:1( [case testImportFromMultiple] from _m import x, y x, y -[file _m.py] +[fixture _m.py] x = y = 1 [out] MypyFile:1( @@ -122,7 +122,7 @@ MypyFile:1( [case testImportFromAs] from _m import y as z z -[file _m.py] +[fixture _m.py] y = 1 [out] MypyFile:1( @@ -135,7 +135,7 @@ from m import x y = x [file m.py] from _n import x -[file _n.py] +[fixture _n.py] x = 1 [out] MypyFile:1( @@ -150,9 +150,9 @@ MypyFile:1( [case testAccessImportedName2] import _m y = _m.x -[file _m.py] +[fixture _m.py] from _n import x -[file _n.py] +[fixture _n.py] x = 1 [out] MypyFile:1( @@ -166,9 +166,9 @@ MypyFile:1( [case testAccessingImportedNameInType] from _m import c x = None # type: c -[file _m.py] +[fixture _m.py] from _n import c -[file _n.py] +[fixture _n.py] class c: pass [out] MypyFile:1( @@ -181,9 +181,9 @@ MypyFile:1( [case testAccessingImportedNameInType2] import _m x = None # type: _m.c -[file _m.py] +[fixture _m.py] from _n import c -[file _n.py] +[fixture _n.py] class c: pass [out] MypyFile:1( @@ -196,9 +196,9 @@ MypyFile:1( [case testAccessingImportedModule] from _m import _n _n.x -[file _m.py] +[fixture _m.py] import _n -[file _n.py] +[fixture _n.py] x = 1 [out] MypyFile:1( @@ -211,9 +211,9 @@ MypyFile:1( [case testAccessingImportedModule2] import _m _m._n.x -[file _m.py] +[fixture _m.py] import _n -[file _n.py] +[fixture _n.py] x = 1 [out] MypyFile:1( @@ -228,9 +228,9 @@ MypyFile:1( [case testAccessTypeViaDoubleIndirection] from _m import c a = None # type: c -[file _m.py] +[fixture _m.py] from _n import c -[file _n.py] +[fixture _n.py] class c: pass [out] MypyFile:1( @@ -243,9 +243,9 @@ MypyFile:1( [case testAccessTypeViaDoubleIndirection2] import _m a = None # type: _m.c -[file _m.py] +[fixture _m.py] from _n import c -[file _n.py] +[fixture _n.py] class c: pass [out] MypyFile:1( @@ -258,7 +258,7 @@ MypyFile:1( [case testImportAsterisk] from _m import * x, y -[file _m.py] +[fixture _m.py] x = y = 1 [out] MypyFile:1( @@ -271,10 +271,10 @@ MypyFile:1( [case testImportAsteriskAndImportedNames] from _m import * n_.x, y -[file _m.py] +[fixture _m.py] import n_ from n_ import y -[file n_.py] +[fixture n_.py] x = y = 1 [out] MypyFile:1( @@ -290,10 +290,10 @@ MypyFile:1( from _m import * x = None # type: n_.c y = None # type: d -[file _m.py] +[fixture _m.py] import n_ from n_ import d -[file n_.py] +[fixture n_.py] class c: pass class d: pass [out] @@ -311,7 +311,7 @@ MypyFile:1( [case testModuleInSubdir] import _m _m.x -[file _m/__init__.py] +[fixture _m/__init__.py] x = 1 [out] MypyFile:1( @@ -324,7 +324,7 @@ MypyFile:1( [case testNestedModules] import m.n m.n.x, m.y -[file m/__init__.py] +[fixture m/__init__.py] y = 1 [file m/n.py] x = 1 @@ -351,8 +351,8 @@ MypyFile:1( [case testImportFromSubmodule] from m._n import x x -[file m/__init__.py] -[file m/_n.py] +[fixture m/__init__.py] +[fixture m/_n.py] x = 1 [out] MypyFile:1( @@ -363,8 +363,8 @@ MypyFile:1( [case testImportAllFromSubmodule] from m._n import * x, y -[file m/__init__.py] -[file m/_n.py] +[fixture m/__init__.py] +[fixture m/_n.py] x = y = 1 [out] MypyFile:1( @@ -377,8 +377,8 @@ MypyFile:1( [case testSubmodulesAndTypes] import m._n x = None # type: m._n.c -[file m/__init__.py] -[file m/_n.py] +[fixture m/__init__.py] +[fixture m/_n.py] class c: pass [out] MypyFile:1( @@ -391,8 +391,8 @@ MypyFile:1( [case testSubmodulesAndTypes2] from m._n import c x = None # type: c -[file m/__init__.py] -[file m/_n.py] +[fixture m/__init__.py] +[fixture m/_n.py] class c: pass [out] MypyFile:1( @@ -405,8 +405,8 @@ MypyFile:1( [case testFromPackageImportModule] from m import _n _n.x -[file m/__init__.py] -[file m/_n.py] +[fixture m/__init__.py] +[fixture m/_n.py] x = 1 [out] MypyFile:1( @@ -421,9 +421,9 @@ import m.n.k m.n.k.x m.n.b m.a -[file m/__init__.py] +[fixture m/__init__.py] a = 1 -[file m/n/__init__.py] +[fixture m/n/__init__.py] b = 1 [file m/n/k.py] x = 1 @@ -458,10 +458,10 @@ MypyFile:1( [case testImportInSubmodule] import m._n y = m._n.x -[file m/__init__.py] -[file m/_n.py] +[fixture m/__init__.py] +[fixture m/_n.py] from m._k import x -[file m/_k.py] +[fixture m/_k.py] x = 1 [out] MypyFile:1( @@ -494,7 +494,7 @@ MypyFile:1( import _m _m.x = ( _m.x) -[file _m.py] +[fixture _m.py] x = None [out] MypyFile:1( @@ -510,7 +510,7 @@ MypyFile:1( [case testAssignmentThatRefersToModule] import _m _m.x[None] = None -[file _m.py] +[fixture _m.py] x = None [out] MypyFile:1( @@ -527,7 +527,7 @@ MypyFile:1( if 1: import _x _x.y -[file _x.py] +[fixture _x.py] y = 1 [out] MypyFile:1( @@ -545,7 +545,7 @@ MypyFile:1( def f() -> None: import _x _x.y -[file _x.py] +[fixture _x.py] y = 1 [out] MypyFile:1( @@ -563,7 +563,7 @@ MypyFile:1( class A: from _x import y z = y -[file _x.py] +[fixture _x.py] y = 1 [out] MypyFile:1( @@ -578,7 +578,7 @@ MypyFile:1( class A: import _x z = _x.y -[file _x.py] +[fixture _x.py] y = 1 [out] MypyFile:1( @@ -620,7 +620,7 @@ MypyFile:1( [case testRelativeImport0] import m.x m.x.z.y -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] from . import z [file m/z.py] @@ -650,12 +650,12 @@ MypyFile:1( import m.t.b as b b.x.y b.z.y -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] y = 1 [file m/z.py] y = 3 -[file m/t/__init__.py] +[fixture m/t/__init__.py] [file m/t/b.py] from .. import x, z [out] @@ -693,12 +693,12 @@ MypyFile:1( import m.t.b as b b.xy b.zy -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] y = 1 [file m/z.py] y = 3 -[file m/t/__init__.py] +[fixture m/t/__init__.py] [file m/t/b.py] from ..x import y as xy from ..z import y as zy @@ -735,14 +735,14 @@ import m.t m.zy m.xy m.t.y -[file m/__init__.py] +[fixture m/__init__.py] from .x import * from .z import * [file m/x.py] from .z import zy as xy [file m/z.py] zy = 3 -[file m/t/__init__.py] +[fixture m/t/__init__.py] from .b import * [file m/t/b.py] from .. import xy as y @@ -778,7 +778,7 @@ MypyFile:1( [case testRelativeImportFromSameModule] import m.x -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] from .x import nonexistent [out] @@ -786,7 +786,7 @@ tmp/m/x.py:1: error: Module "m.x" has no attribute "nonexistent" [case testImportFromSameModule] import m.x -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] from m.x import nonexistent [out] @@ -794,7 +794,7 @@ tmp/m/x.py:1: error: Module "m.x" has no attribute "nonexistent" [case testImportMisspellingSingleCandidate] import f -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] def some_function(): pass @@ -805,7 +805,7 @@ tmp/f.py:1: error: Module "m.x" has no attribute "somefunction"; maybe "some_fun [case testImportMisspellingMultipleCandidates] import f -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] def some_function(): pass @@ -818,7 +818,7 @@ tmp/f.py:1: error: Module "m.x" has no attribute "somefunction"; maybe "some_fun [case testImportMisspellingMultipleCandidatesTruncated] import f -[file m/__init__.py] +[fixture m/__init__.py] [file m/x.py] def some_function(): pass @@ -849,10 +849,10 @@ y = 2 from m_ import * x y -[file m_.py] +[fixture m_.py] from m2_ import x as x from m2_ import y -[file m2_.py] +[fixture m2_.py] x = 1 y = 2 [out] @@ -878,11 +878,11 @@ import m3 from m_ import * m2_ m3_ -[file m_.py] +[fixture m_.py] import m2_ as m2_ import m3_ -[file m2_.py] -[file m3_.py] +[fixture m2_.py] +[fixture m3_.py] [out] MypyFile:1( ImportAll:1(m_) diff --git a/test-data/unit/semanal-python310.test b/test-data/unit/semanal-python310.test index 9418ac2912b2..e96a3ca9d777 100644 --- a/test-data/unit/semanal-python310.test +++ b/test-data/unit/semanal-python310.test @@ -194,7 +194,7 @@ x = 1 match x: case _a.b: pass -[file _a.py] +[fixture _a.py] b = 1 [out] MypyFile:1( diff --git a/test-data/unit/semanal-symtable.test b/test-data/unit/semanal-symtable.test index c886080557b0..1622fd1f1ad4 100644 --- a/test-data/unit/semanal-symtable.test +++ b/test-data/unit/semanal-symtable.test @@ -78,10 +78,6 @@ __main__: non_existing2 : Gdef/Var (__main__.non_existing2) : Any non_existing3 : Gdef/Var (__main__.non_existing3) : Any non_existing4 : Gdef/Var (__main__.non_existing4) : Any) -sys: - SymbolTable( - platform : Gdef/Var (sys.platform) : builtins.str - version_info : Gdef/Var (sys.version_info)) [case testDecorator] from typing import Callable diff --git a/test-data/unit/semanal-typealiases.test b/test-data/unit/semanal-typealiases.test index debc7ecdf722..88d234134350 100644 --- a/test-data/unit/semanal-typealiases.test +++ b/test-data/unit/semanal-typealiases.test @@ -92,7 +92,7 @@ import typing import _m A2 = _m.A x = 1 # type: A2 -[file _m.py] +[fixture _m.py] import typing class A: pass [out] @@ -255,7 +255,7 @@ MypyFile:1( import typing from _m import U def f(x: U) -> None: pass -[file _m.py] +[fixture _m.py] from typing import Union class A: pass U = Union[int, A] @@ -275,7 +275,7 @@ MypyFile:1( import typing import _m def f(x: _m.U) -> None: pass -[file _m.py] +[fixture _m.py] from typing import Union class A: pass U = Union[int, A] @@ -295,7 +295,7 @@ MypyFile:1( import typing from _m import A def f(x: A) -> None: pass -[file _m.py] +[fixture _m.py] import typing A = int [out] @@ -314,7 +314,7 @@ MypyFile:1( import typing import _m def f(x: _m.A) -> None: pass -[file _m.py] +[fixture _m.py] import typing A = int [out] @@ -385,7 +385,7 @@ from typing import Union from _m import U U2 = U x = 1 # type: U2 -[file _m.py] +[fixture _m.py] from typing import Union U = Union[int, str] [out] diff --git a/test-data/unit/semanal-types.test b/test-data/unit/semanal-types.test index 494d701b758a..05fc08d8a49e 100644 --- a/test-data/unit/semanal-types.test +++ b/test-data/unit/semanal-types.test @@ -303,7 +303,7 @@ MypyFile:1( import typing import _m typing.cast(_m.C, object) -[file _m.py] +[fixture _m.py] class C: pass [out] MypyFile:1( @@ -318,8 +318,8 @@ MypyFile:1( import typing import _m._n typing.cast(_m._n.C, object) -[file _m/__init__.py] -[file _m/_n.py] +[fixture _m/__init__.py] +[fixture _m/_n.py] class C: pass [out] MypyFile:1( @@ -1152,7 +1152,7 @@ from typing import Generic from _m import T class A(Generic[T]): y = None # type: T -[file _m.py] +[fixture _m.py] from typing import TypeVar T = TypeVar('T') [out] @@ -1175,7 +1175,7 @@ class A(Generic[_m.T]): a = None # type: _m.T def f(self, x: _m.T): b = None # type: _m.T -[file _m.py] +[fixture _m.py] from typing import TypeVar T = TypeVar('T') [out] @@ -1206,7 +1206,7 @@ MypyFile:1( import _m def f(x: _m.T) -> None: a = None # type: _m.T -[file _m.py] +[fixture _m.py] from typing import TypeVar T = TypeVar('T') [out] diff --git a/test-requirements.txt b/test-requirements.txt index aec11e87e96f..a7394e6d1472 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -6,7 +6,7 @@ filelock>=3.3.0 flake8==5.0.4 # must match version in .pre-commit-config.yaml flake8-bugbear==22.12.6 # must match version in .pre-commit-config.yaml flake8-noqa==1.3.0 # must match version in .pre-commit-config.yaml -isort[colors]==5.11.4 # must match version in .pre-commit-config.yaml +isort[colors]==5.11.5 # must match version in .pre-commit-config.yaml lxml>=4.9.1; (python_version<'3.11' or sys_platform!='win32') and python_version<'3.12' psutil>=4.0 # pytest 6.2.3 does not support Python 3.10