diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..dec7126ba6d2f4265a069aabd99bcd9e5b3de4b5 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +python310/config-3.10-x86_64-linux-gnu/libpython3.10-pic.a filter=lfs diff=lfs merge=lfs -text +python310/config-3.10-x86_64-linux-gnu/libpython3.10.a filter=lfs diff=lfs merge=lfs -text +python310/config-3.10-x86_64-linux-gnu/libpython3.10.so filter=lfs diff=lfs merge=lfs -text diff --git a/python310/LICENSE.txt b/python310/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..f26bcf4d2de6eb136e31006ca3ab447d5e488adf --- /dev/null +++ b/python310/LICENSE.txt @@ -0,0 +1,279 @@ +A. HISTORY OF THE SOFTWARE +========================== + +Python was created in the early 1990s by Guido van Rossum at Stichting +Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands +as a successor of a language called ABC. Guido remains Python's +principal author, although it includes many contributions from others. + +In 1995, Guido continued his work on Python at the Corporation for +National Research Initiatives (CNRI, see https://www.cnri.reston.va.us) +in Reston, Virginia where he released several versions of the +software. + +In May 2000, Guido and the Python core development team moved to +BeOpen.com to form the BeOpen PythonLabs team. In October of the same +year, the PythonLabs team moved to Digital Creations, which became +Zope Corporation. In 2001, the Python Software Foundation (PSF, see +https://www.python.org/psf/) was formed, a non-profit organization +created specifically to own Python-related Intellectual Property. +Zope Corporation was a sponsoring member of the PSF. + +All Python releases are Open Source (see https://opensource.org for +the Open Source Definition). Historically, most, but not all, Python +releases have also been GPL-compatible; the table below summarizes +the various releases. + + Release Derived Year Owner GPL- + from compatible? (1) + + 0.9.0 thru 1.2 1991-1995 CWI yes + 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes + 1.6 1.5.2 2000 CNRI no + 2.0 1.6 2000 BeOpen.com no + 1.6.1 1.6 2001 CNRI yes (2) + 2.1 2.0+1.6.1 2001 PSF no + 2.0.1 2.0+1.6.1 2001 PSF yes + 2.1.1 2.1+2.0.1 2001 PSF yes + 2.1.2 2.1.1 2002 PSF yes + 2.1.3 2.1.2 2002 PSF yes + 2.2 and above 2.1.1 2001-now PSF yes + +Footnotes: + +(1) GPL-compatible doesn't mean that we're distributing Python under + the GPL. All Python licenses, unlike the GPL, let you distribute + a modified version without making your changes open source. The + GPL-compatible licenses make it possible to combine Python with + other software that is released under the GPL; the others don't. + +(2) According to Richard Stallman, 1.6.1 is not GPL-compatible, + because its license has a choice of law clause. According to + CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1 + is "not incompatible" with the GPL. + +Thanks to the many outside volunteers who have worked under Guido's +direction to make these releases possible. + + +B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON +=============================================================== + +Python software and documentation are licensed under the +Python Software Foundation License Version 2. + +Starting with Python 3.8.6, examples, recipes, and other code in +the documentation are dual licensed under the PSF License Version 2 +and the Zero-Clause BSD license. + +Some software incorporated into Python is under different licenses. +The licenses are listed with code falling under that license. + + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0 +------------------------------------------- + +BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1 + +1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an +office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the +Individual or Organization ("Licensee") accessing and otherwise using +this software in source or binary form and its associated +documentation ("the Software"). + +2. Subject to the terms and conditions of this BeOpen Python License +Agreement, BeOpen hereby grants Licensee a non-exclusive, +royalty-free, world-wide license to reproduce, analyze, test, perform +and/or display publicly, prepare derivative works, distribute, and +otherwise use the Software alone or in any derivative version, +provided, however, that the BeOpen Python License is retained in the +Software, alone or in any derivative version prepared by Licensee. + +3. BeOpen is making the Software available to Licensee on an "AS IS" +basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE +SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS +AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY +DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +5. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +6. This License Agreement shall be governed by and interpreted in all +respects by the law of the State of California, excluding conflict of +law provisions. Nothing in this License Agreement shall be deemed to +create any relationship of agency, partnership, or joint venture +between BeOpen and Licensee. This License Agreement does not grant +permission to use BeOpen trademarks or trade names in a trademark +sense to endorse or promote products or services of Licensee, or any +third party. As an exception, the "BeOpen Python" logos available at +http://www.pythonlabs.com/logos.html may be used according to the +permissions granted on that web page. + +7. By copying, installing or otherwise using the software, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. + + +CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1 +--------------------------------------- + +1. This LICENSE AGREEMENT is between the Corporation for National +Research Initiatives, having an office at 1895 Preston White Drive, +Reston, VA 20191 ("CNRI"), and the Individual or Organization +("Licensee") accessing and otherwise using Python 1.6.1 software in +source or binary form and its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, CNRI +hereby grants Licensee a nonexclusive, royalty-free, world-wide +license to reproduce, analyze, test, perform and/or display publicly, +prepare derivative works, distribute, and otherwise use Python 1.6.1 +alone or in any derivative version, provided, however, that CNRI's +License Agreement and CNRI's notice of copyright, i.e., "Copyright (c) +1995-2001 Corporation for National Research Initiatives; All Rights +Reserved" are retained in Python 1.6.1 alone or in any derivative +version prepared by Licensee. Alternately, in lieu of CNRI's License +Agreement, Licensee may substitute the following text (omitting the +quotes): "Python 1.6.1 is made available subject to the terms and +conditions in CNRI's License Agreement. This Agreement together with +Python 1.6.1 may be located on the internet using the following +unique, persistent identifier (known as a handle): 1895.22/1013. This +Agreement may also be obtained from a proxy server on the internet +using the following URL: http://hdl.handle.net/1895.22/1013". + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python 1.6.1 or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python 1.6.1. + +4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS" +basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. This License Agreement shall be governed by the federal +intellectual property law of the United States, including without +limitation the federal copyright law, and, to the extent such +U.S. federal law does not apply, by the law of the Commonwealth of +Virginia, excluding Virginia's conflict of law provisions. +Notwithstanding the foregoing, with regard to derivative works based +on Python 1.6.1 that incorporate non-separable material that was +previously distributed under the GNU General Public License (GPL), the +law of the Commonwealth of Virginia shall govern this License +Agreement only as to issues arising under or with respect to +Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this +License Agreement shall be deemed to create any relationship of +agency, partnership, or joint venture between CNRI and Licensee. This +License Agreement does not grant permission to use CNRI trademarks or +trade name in a trademark sense to endorse or promote products or +services of Licensee, or any third party. + +8. By clicking on the "ACCEPT" button where indicated, or by copying, +installing or otherwise using Python 1.6.1, Licensee agrees to be +bound by the terms and conditions of this License Agreement. + + ACCEPT + + +CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2 +-------------------------------------------------- + +Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam, +The Netherlands. All rights reserved. + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose and without fee is hereby granted, +provided that the above copyright notice appear in all copies and that +both that copyright notice and this permission notice appear in +supporting documentation, and that the name of Stichting Mathematisch +Centrum or CWI not be used in advertising or publicity pertaining to +distribution of the software without specific, written prior +permission. + +STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO +THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND +FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE +FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION +---------------------------------------------------------------------- + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH +REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, +INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM +LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR +OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR +PERFORMANCE OF THIS SOFTWARE. diff --git a/python310/abc.py b/python310/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..3c552cebb4226c02bd6f741c5446623a0cd6a7ed --- /dev/null +++ b/python310/abc.py @@ -0,0 +1,188 @@ +# Copyright 2007 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Abstract Base Classes (ABCs) according to PEP 3119.""" + + +def abstractmethod(funcobj): + """A decorator indicating abstract methods. + + Requires that the metaclass is ABCMeta or derived from it. A + class that has a metaclass derived from ABCMeta cannot be + instantiated unless all of its abstract methods are overridden. + The abstract methods can be called using any of the normal + 'super' call mechanisms. abstractmethod() may be used to declare + abstract methods for properties and descriptors. + + Usage: + + class C(metaclass=ABCMeta): + @abstractmethod + def my_abstract_method(self, ...): + ... + """ + funcobj.__isabstractmethod__ = True + return funcobj + + +class abstractclassmethod(classmethod): + """A decorator indicating abstract classmethods. + + Deprecated, use 'classmethod' with 'abstractmethod' instead: + + class C(ABC): + @classmethod + @abstractmethod + def my_abstract_classmethod(cls, ...): + ... + + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractstaticmethod(staticmethod): + """A decorator indicating abstract staticmethods. + + Deprecated, use 'staticmethod' with 'abstractmethod' instead: + + class C(ABC): + @staticmethod + @abstractmethod + def my_abstract_staticmethod(...): + ... + + """ + + __isabstractmethod__ = True + + def __init__(self, callable): + callable.__isabstractmethod__ = True + super().__init__(callable) + + +class abstractproperty(property): + """A decorator indicating abstract properties. + + Deprecated, use 'property' with 'abstractmethod' instead: + + class C(ABC): + @property + @abstractmethod + def my_abstract_property(self): + ... + + """ + + __isabstractmethod__ = True + + +try: + from _abc import (get_cache_token, _abc_init, _abc_register, + _abc_instancecheck, _abc_subclasscheck, _get_dump, + _reset_registry, _reset_caches) +except ImportError: + from _py_abc import ABCMeta, get_cache_token + ABCMeta.__module__ = 'abc' +else: + class ABCMeta(type): + """Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + """ + def __new__(mcls, name, bases, namespace, **kwargs): + cls = super().__new__(mcls, name, bases, namespace, **kwargs) + _abc_init(cls) + return cls + + def register(cls, subclass): + """Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + """ + return _abc_register(cls, subclass) + + def __instancecheck__(cls, instance): + """Override for isinstance(instance, cls).""" + return _abc_instancecheck(cls, instance) + + def __subclasscheck__(cls, subclass): + """Override for issubclass(subclass, cls).""" + return _abc_subclasscheck(cls, subclass) + + def _dump_registry(cls, file=None): + """Debug helper to print the ABC registry.""" + print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) + print(f"Inv. counter: {get_cache_token()}", file=file) + (_abc_registry, _abc_cache, _abc_negative_cache, + _abc_negative_cache_version) = _get_dump(cls) + print(f"_abc_registry: {_abc_registry!r}", file=file) + print(f"_abc_cache: {_abc_cache!r}", file=file) + print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file) + print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}", + file=file) + + def _abc_registry_clear(cls): + """Clear the registry (for debugging or testing).""" + _reset_registry(cls) + + def _abc_caches_clear(cls): + """Clear the caches (for debugging or testing).""" + _reset_caches(cls) + + +def update_abstractmethods(cls): + """Recalculate the set of abstract methods of an abstract class. + + If a class has had one of its abstract methods implemented after the + class was created, the method will not be considered implemented until + this function is called. Alternatively, if a new abstract method has been + added to the class, it will only be considered an abstract method of the + class after this function is called. + + This function should be called before any use is made of the class, + usually in class decorators that add methods to the subject class. + + Returns cls, to allow usage as a class decorator. + + If cls is not an instance of ABCMeta, does nothing. + """ + if not hasattr(cls, '__abstractmethods__'): + # We check for __abstractmethods__ here because cls might by a C + # implementation or a python implementation (especially during + # testing), and we want to handle both cases. + return cls + + abstracts = set() + # Check the existing abstract methods of the parents, keep only the ones + # that are not implemented. + for scls in cls.__bases__: + for name in getattr(scls, '__abstractmethods__', ()): + value = getattr(cls, name, None) + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + # Also add any other newly added abstract methods. + for name, value in cls.__dict__.items(): + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + cls.__abstractmethods__ = frozenset(abstracts) + return cls + + +class ABC(metaclass=ABCMeta): + """Helper class that provides a standard way to create an ABC using + inheritance. + """ + __slots__ = () diff --git a/python310/aifc.py b/python310/aifc.py new file mode 100644 index 0000000000000000000000000000000000000000..ed5da7d8936fdd94301accd4ac725ab0e5fe8877 --- /dev/null +++ b/python310/aifc.py @@ -0,0 +1,947 @@ +"""Stuff to parse AIFF-C and AIFF files. + +Unless explicitly stated otherwise, the description below is true +both for AIFF-C files and AIFF files. + +An AIFF-C file has the following structure. + + +-----------------+ + | FORM | + +-----------------+ + | | + +----+------------+ + | | AIFC | + | +------------+ + | | | + | | . | + | | . | + | | . | + +----+------------+ + +An AIFF file has the string "AIFF" instead of "AIFC". + +A chunk consists of an identifier (4 bytes) followed by a size (4 bytes, +big endian order), followed by the data. The size field does not include +the size of the 8 byte header. + +The following chunk types are recognized. + + FVER + (AIFF-C only). + MARK + <# of markers> (2 bytes) + list of markers: + (2 bytes, must be > 0) + (4 bytes) + ("pstring") + COMM + <# of channels> (2 bytes) + <# of sound frames> (4 bytes) + (2 bytes) + (10 bytes, IEEE 80-bit extended + floating point) + in AIFF-C files only: + (4 bytes) + ("pstring") + SSND + (4 bytes, not used by this program) + (4 bytes, not used by this program) + + +A pstring consists of 1 byte length, a string of characters, and 0 or 1 +byte pad to make the total length even. + +Usage. + +Reading AIFF files: + f = aifc.open(file, 'r') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods read(), seek(), and close(). +In some types of audio files, if the setpos() method is not used, +the seek() method is not necessary. + +This returns an instance of a class with the following public methods: + getnchannels() -- returns number of audio channels (1 for + mono, 2 for stereo) + getsampwidth() -- returns sample width in bytes + getframerate() -- returns sampling frequency + getnframes() -- returns number of audio frames + getcomptype() -- returns compression type ('NONE' for AIFF files) + getcompname() -- returns human-readable version of + compression type ('not compressed' for AIFF files) + getparams() -- returns a namedtuple consisting of all of the + above in the above order + getmarkers() -- get the list of marks in the audio file or None + if there are no marks + getmark(id) -- get mark with the specified id (raises an error + if the mark does not exist) + readframes(n) -- returns at most n frames of audio + rewind() -- rewind to the beginning of the audio stream + setpos(pos) -- seek to the specified position + tell() -- return the current position + close() -- close the instance (make it unusable) +The position returned by tell(), the position given to setpos() and +the position of marks are all compatible and have nothing to do with +the actual position in the file. +The close() method is called automatically when the class instance +is destroyed. + +Writing AIFF files: + f = aifc.open(file, 'w') +where file is either the name of a file or an open file pointer. +The open file pointer must have methods write(), tell(), seek(), and +close(). + +This returns an instance of a class with the following public methods: + aiff() -- create an AIFF file (AIFF-C default) + aifc() -- create an AIFF-C file + setnchannels(n) -- set the number of channels + setsampwidth(n) -- set the sample width + setframerate(n) -- set the frame rate + setnframes(n) -- set the number of frames + setcomptype(type, name) + -- set the compression type and the + human-readable compression type + setparams(tuple) + -- set all parameters at once + setmark(id, pos, name) + -- add specified mark to the list of marks + tell() -- return current position in output file (useful + in combination with setmark()) + writeframesraw(data) + -- write audio frames without pathing up the + file header + writeframes(data) + -- write audio frames and patch up the file header + close() -- patch up the file header and close the + output file +You should set the parameters before the first writeframesraw or +writeframes. The total number of frames does not need to be set, +but when it is set to the correct value, the header does not have to +be patched up. +It is best to first set all parameters, perhaps possibly the +compression type, and then write audio frames using writeframesraw. +When all frames have been written, either call writeframes(b'') or +close() to patch up the sizes in the header. +Marks can be added anytime. If there are any marks, you must call +close() after all frames have been written. +The close() method is called automatically when the class instance +is destroyed. + +When a file is opened with the extension '.aiff', an AIFF file is +written, otherwise an AIFF-C file is written. This default can be +changed by calling aiff() or aifc() before the first writeframes or +writeframesraw. +""" + +import struct +import builtins +import warnings + +__all__ = ["Error", "open"] + +class Error(Exception): + pass + +_AIFC_version = 0xA2805140 # Version 1 of AIFF-C + +def _read_long(file): + try: + return struct.unpack('>l', file.read(4))[0] + except struct.error: + raise EOFError from None + +def _read_ulong(file): + try: + return struct.unpack('>L', file.read(4))[0] + except struct.error: + raise EOFError from None + +def _read_short(file): + try: + return struct.unpack('>h', file.read(2))[0] + except struct.error: + raise EOFError from None + +def _read_ushort(file): + try: + return struct.unpack('>H', file.read(2))[0] + except struct.error: + raise EOFError from None + +def _read_string(file): + length = ord(file.read(1)) + if length == 0: + data = b'' + else: + data = file.read(length) + if length & 1 == 0: + dummy = file.read(1) + return data + +_HUGE_VAL = 1.79769313486231e+308 # See + +def _read_float(f): # 10 bytes + expon = _read_short(f) # 2 bytes + sign = 1 + if expon < 0: + sign = -1 + expon = expon + 0x8000 + himant = _read_ulong(f) # 4 bytes + lomant = _read_ulong(f) # 4 bytes + if expon == himant == lomant == 0: + f = 0.0 + elif expon == 0x7FFF: + f = _HUGE_VAL + else: + expon = expon - 16383 + f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) + return sign * f + +def _write_short(f, x): + f.write(struct.pack('>h', x)) + +def _write_ushort(f, x): + f.write(struct.pack('>H', x)) + +def _write_long(f, x): + f.write(struct.pack('>l', x)) + +def _write_ulong(f, x): + f.write(struct.pack('>L', x)) + +def _write_string(f, s): + if len(s) > 255: + raise ValueError("string exceeds maximum pstring length") + f.write(struct.pack('B', len(s))) + f.write(s) + if len(s) & 1 == 0: + f.write(b'\x00') + +def _write_float(f, x): + import math + if x < 0: + sign = 0x8000 + x = x * -1 + else: + sign = 0 + if x == 0: + expon = 0 + himant = 0 + lomant = 0 + else: + fmant, expon = math.frexp(x) + if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN + expon = sign|0x7FFF + himant = 0 + lomant = 0 + else: # Finite + expon = expon + 16382 + if expon < 0: # denormalized + fmant = math.ldexp(fmant, expon) + expon = 0 + expon = expon | sign + fmant = math.ldexp(fmant, 32) + fsmant = math.floor(fmant) + himant = int(fsmant) + fmant = math.ldexp(fmant - fsmant, 32) + fsmant = math.floor(fmant) + lomant = int(fsmant) + _write_ushort(f, expon) + _write_ulong(f, himant) + _write_ulong(f, lomant) + +from chunk import Chunk +from collections import namedtuple + +_aifc_params = namedtuple('_aifc_params', + 'nchannels sampwidth framerate nframes comptype compname') + +_aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)' +_aifc_params.sampwidth.__doc__ = 'Sample width in bytes' +_aifc_params.framerate.__doc__ = 'Sampling frequency' +_aifc_params.nframes.__doc__ = 'Number of audio frames' +_aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)' +_aifc_params.compname.__doc__ = ("""\ +A human-readable version of the compression type +('not compressed' for AIFF files)""") + + +class Aifc_read: + # Variables used in this class: + # + # These variables are available to the user though appropriate + # methods of this class: + # _file -- the open file with methods read(), close(), and seek() + # set through the __init__() method + # _nchannels -- the number of audio channels + # available through the getnchannels() method + # _nframes -- the number of audio frames + # available through the getnframes() method + # _sampwidth -- the number of bytes per audio sample + # available through the getsampwidth() method + # _framerate -- the sampling frequency + # available through the getframerate() method + # _comptype -- the AIFF-C compression type ('NONE' if AIFF) + # available through the getcomptype() method + # _compname -- the human-readable AIFF-C compression type + # available through the getcomptype() method + # _markers -- the marks in the audio file + # available through the getmarkers() and getmark() + # methods + # _soundpos -- the position in the audio stream + # available through the tell() method, set through the + # setpos() method + # + # These variables are used internally only: + # _version -- the AIFF-C version number + # _decomp -- the decompressor from builtin module cl + # _comm_chunk_read -- 1 iff the COMM chunk has been read + # _aifc -- 1 iff reading an AIFF-C file + # _ssnd_seek_needed -- 1 iff positioned correctly in audio + # file for readframes() + # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk + # _framesize -- size of one frame in the file + + _file = None # Set here since __del__ checks it + + def initfp(self, file): + self._version = 0 + self._convert = None + self._markers = [] + self._soundpos = 0 + self._file = file + chunk = Chunk(file) + if chunk.getname() != b'FORM': + raise Error('file does not start with FORM id') + formdata = chunk.read(4) + if formdata == b'AIFF': + self._aifc = 0 + elif formdata == b'AIFC': + self._aifc = 1 + else: + raise Error('not an AIFF or AIFF-C file') + self._comm_chunk_read = 0 + self._ssnd_chunk = None + while 1: + self._ssnd_seek_needed = 1 + try: + chunk = Chunk(self._file) + except EOFError: + break + chunkname = chunk.getname() + if chunkname == b'COMM': + self._read_comm_chunk(chunk) + self._comm_chunk_read = 1 + elif chunkname == b'SSND': + self._ssnd_chunk = chunk + dummy = chunk.read(8) + self._ssnd_seek_needed = 0 + elif chunkname == b'FVER': + self._version = _read_ulong(chunk) + elif chunkname == b'MARK': + self._readmark(chunk) + chunk.skip() + if not self._comm_chunk_read or not self._ssnd_chunk: + raise Error('COMM chunk and/or SSND chunk missing') + + def __init__(self, f): + if isinstance(f, str): + file_object = builtins.open(f, 'rb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + else: + # assume it is an open file object already + self.initfp(f) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + # + # User visible methods. + # + def getfp(self): + return self._file + + def rewind(self): + self._ssnd_seek_needed = 1 + self._soundpos = 0 + + def close(self): + file = self._file + if file is not None: + self._file = None + file.close() + + def tell(self): + return self._soundpos + + def getnchannels(self): + return self._nchannels + + def getnframes(self): + return self._nframes + + def getsampwidth(self): + return self._sampwidth + + def getframerate(self): + return self._framerate + + def getcomptype(self): + return self._comptype + + def getcompname(self): + return self._compname + +## def getversion(self): +## return self._version + + def getparams(self): + return _aifc_params(self.getnchannels(), self.getsampwidth(), + self.getframerate(), self.getnframes(), + self.getcomptype(), self.getcompname()) + + def getmarkers(self): + if len(self._markers) == 0: + return None + return self._markers + + def getmark(self, id): + for marker in self._markers: + if id == marker[0]: + return marker + raise Error('marker {0!r} does not exist'.format(id)) + + def setpos(self, pos): + if pos < 0 or pos > self._nframes: + raise Error('position not in range') + self._soundpos = pos + self._ssnd_seek_needed = 1 + + def readframes(self, nframes): + if self._ssnd_seek_needed: + self._ssnd_chunk.seek(0) + dummy = self._ssnd_chunk.read(8) + pos = self._soundpos * self._framesize + if pos: + self._ssnd_chunk.seek(pos + 8) + self._ssnd_seek_needed = 0 + if nframes == 0: + return b'' + data = self._ssnd_chunk.read(nframes * self._framesize) + if self._convert and data: + data = self._convert(data) + self._soundpos = self._soundpos + len(data) // (self._nchannels + * self._sampwidth) + return data + + # + # Internal methods. + # + + def _alaw2lin(self, data): + import audioop + return audioop.alaw2lin(data, 2) + + def _ulaw2lin(self, data): + import audioop + return audioop.ulaw2lin(data, 2) + + def _adpcm2lin(self, data): + import audioop + if not hasattr(self, '_adpcmstate'): + # first time + self._adpcmstate = None + data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate) + return data + + def _read_comm_chunk(self, chunk): + self._nchannels = _read_short(chunk) + self._nframes = _read_long(chunk) + self._sampwidth = (_read_short(chunk) + 7) // 8 + self._framerate = int(_read_float(chunk)) + if self._sampwidth <= 0: + raise Error('bad sample width') + if self._nchannels <= 0: + raise Error('bad # of channels') + self._framesize = self._nchannels * self._sampwidth + if self._aifc: + #DEBUG: SGI's soundeditor produces a bad size :-( + kludge = 0 + if chunk.chunksize == 18: + kludge = 1 + warnings.warn('Warning: bad COMM chunk size') + chunk.chunksize = 23 + #DEBUG end + self._comptype = chunk.read(4) + #DEBUG start + if kludge: + length = ord(chunk.file.read(1)) + if length & 1 == 0: + length = length + 1 + chunk.chunksize = chunk.chunksize + length + chunk.file.seek(-1, 1) + #DEBUG end + self._compname = _read_string(chunk) + if self._comptype != b'NONE': + if self._comptype == b'G722': + self._convert = self._adpcm2lin + elif self._comptype in (b'ulaw', b'ULAW'): + self._convert = self._ulaw2lin + elif self._comptype in (b'alaw', b'ALAW'): + self._convert = self._alaw2lin + else: + raise Error('unsupported compression type') + self._sampwidth = 2 + else: + self._comptype = b'NONE' + self._compname = b'not compressed' + + def _readmark(self, chunk): + nmarkers = _read_short(chunk) + # Some files appear to contain invalid counts. + # Cope with this by testing for EOF. + try: + for i in range(nmarkers): + id = _read_short(chunk) + pos = _read_long(chunk) + name = _read_string(chunk) + if pos or name: + # some files appear to have + # dummy markers consisting of + # a position 0 and name '' + self._markers.append((id, pos, name)) + except EOFError: + w = ('Warning: MARK chunk contains only %s marker%s instead of %s' % + (len(self._markers), '' if len(self._markers) == 1 else 's', + nmarkers)) + warnings.warn(w) + +class Aifc_write: + # Variables used in this class: + # + # These variables are user settable through appropriate methods + # of this class: + # _file -- the open file with methods write(), close(), tell(), seek() + # set through the __init__() method + # _comptype -- the AIFF-C compression type ('NONE' in AIFF) + # set through the setcomptype() or setparams() method + # _compname -- the human-readable AIFF-C compression type + # set through the setcomptype() or setparams() method + # _nchannels -- the number of audio channels + # set through the setnchannels() or setparams() method + # _sampwidth -- the number of bytes per audio sample + # set through the setsampwidth() or setparams() method + # _framerate -- the sampling frequency + # set through the setframerate() or setparams() method + # _nframes -- the number of audio frames written to the header + # set through the setnframes() or setparams() method + # _aifc -- whether we're writing an AIFF-C file or an AIFF file + # set through the aifc() method, reset through the + # aiff() method + # + # These variables are used internally only: + # _version -- the AIFF-C version number + # _comp -- the compressor from builtin module cl + # _nframeswritten -- the number of audio frames actually written + # _datalength -- the size of the audio samples written to the header + # _datawritten -- the size of the audio samples actually written + + _file = None # Set here since __del__ checks it + + def __init__(self, f): + if isinstance(f, str): + file_object = builtins.open(f, 'wb') + try: + self.initfp(file_object) + except: + file_object.close() + raise + + # treat .aiff file extensions as non-compressed audio + if f.endswith('.aiff'): + self._aifc = 0 + else: + # assume it is an open file object already + self.initfp(f) + + def initfp(self, file): + self._file = file + self._version = _AIFC_version + self._comptype = b'NONE' + self._compname = b'not compressed' + self._convert = None + self._nchannels = 0 + self._sampwidth = 0 + self._framerate = 0 + self._nframes = 0 + self._nframeswritten = 0 + self._datawritten = 0 + self._datalength = 0 + self._markers = [] + self._marklength = 0 + self._aifc = 1 # AIFF-C is default + + def __del__(self): + self.close() + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + # + # User visible methods. + # + def aiff(self): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + self._aifc = 0 + + def aifc(self): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + self._aifc = 1 + + def setnchannels(self, nchannels): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if nchannels < 1: + raise Error('bad # of channels') + self._nchannels = nchannels + + def getnchannels(self): + if not self._nchannels: + raise Error('number of channels not set') + return self._nchannels + + def setsampwidth(self, sampwidth): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if sampwidth < 1 or sampwidth > 4: + raise Error('bad sample width') + self._sampwidth = sampwidth + + def getsampwidth(self): + if not self._sampwidth: + raise Error('sample width not set') + return self._sampwidth + + def setframerate(self, framerate): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if framerate <= 0: + raise Error('bad frame rate') + self._framerate = framerate + + def getframerate(self): + if not self._framerate: + raise Error('frame rate not set') + return self._framerate + + def setnframes(self, nframes): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + self._nframes = nframes + + def getnframes(self): + return self._nframeswritten + + def setcomptype(self, comptype, compname): + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if comptype not in (b'NONE', b'ulaw', b'ULAW', + b'alaw', b'ALAW', b'G722'): + raise Error('unsupported compression type') + self._comptype = comptype + self._compname = compname + + def getcomptype(self): + return self._comptype + + def getcompname(self): + return self._compname + +## def setversion(self, version): +## if self._nframeswritten: +## raise Error, 'cannot change parameters after starting to write' +## self._version = version + + def setparams(self, params): + nchannels, sampwidth, framerate, nframes, comptype, compname = params + if self._nframeswritten: + raise Error('cannot change parameters after starting to write') + if comptype not in (b'NONE', b'ulaw', b'ULAW', + b'alaw', b'ALAW', b'G722'): + raise Error('unsupported compression type') + self.setnchannels(nchannels) + self.setsampwidth(sampwidth) + self.setframerate(framerate) + self.setnframes(nframes) + self.setcomptype(comptype, compname) + + def getparams(self): + if not self._nchannels or not self._sampwidth or not self._framerate: + raise Error('not all parameters set') + return _aifc_params(self._nchannels, self._sampwidth, self._framerate, + self._nframes, self._comptype, self._compname) + + def setmark(self, id, pos, name): + if id <= 0: + raise Error('marker ID must be > 0') + if pos < 0: + raise Error('marker position must be >= 0') + if not isinstance(name, bytes): + raise Error('marker name must be bytes') + for i in range(len(self._markers)): + if id == self._markers[i][0]: + self._markers[i] = id, pos, name + return + self._markers.append((id, pos, name)) + + def getmark(self, id): + for marker in self._markers: + if id == marker[0]: + return marker + raise Error('marker {0!r} does not exist'.format(id)) + + def getmarkers(self): + if len(self._markers) == 0: + return None + return self._markers + + def tell(self): + return self._nframeswritten + + def writeframesraw(self, data): + if not isinstance(data, (bytes, bytearray)): + data = memoryview(data).cast('B') + self._ensure_header_written(len(data)) + nframes = len(data) // (self._sampwidth * self._nchannels) + if self._convert: + data = self._convert(data) + self._file.write(data) + self._nframeswritten = self._nframeswritten + nframes + self._datawritten = self._datawritten + len(data) + + def writeframes(self, data): + self.writeframesraw(data) + if self._nframeswritten != self._nframes or \ + self._datalength != self._datawritten: + self._patchheader() + + def close(self): + if self._file is None: + return + try: + self._ensure_header_written(0) + if self._datawritten & 1: + # quick pad to even size + self._file.write(b'\x00') + self._datawritten = self._datawritten + 1 + self._writemarkers() + if self._nframeswritten != self._nframes or \ + self._datalength != self._datawritten or \ + self._marklength: + self._patchheader() + finally: + # Prevent ref cycles + self._convert = None + f = self._file + self._file = None + f.close() + + # + # Internal methods. + # + + def _lin2alaw(self, data): + import audioop + return audioop.lin2alaw(data, 2) + + def _lin2ulaw(self, data): + import audioop + return audioop.lin2ulaw(data, 2) + + def _lin2adpcm(self, data): + import audioop + if not hasattr(self, '_adpcmstate'): + self._adpcmstate = None + data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate) + return data + + def _ensure_header_written(self, datasize): + if not self._nframeswritten: + if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): + if not self._sampwidth: + self._sampwidth = 2 + if self._sampwidth != 2: + raise Error('sample width must be 2 when compressing ' + 'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)') + if not self._nchannels: + raise Error('# channels not specified') + if not self._sampwidth: + raise Error('sample width not specified') + if not self._framerate: + raise Error('sampling rate not specified') + self._write_header(datasize) + + def _init_compression(self): + if self._comptype == b'G722': + self._convert = self._lin2adpcm + elif self._comptype in (b'ulaw', b'ULAW'): + self._convert = self._lin2ulaw + elif self._comptype in (b'alaw', b'ALAW'): + self._convert = self._lin2alaw + + def _write_header(self, initlength): + if self._aifc and self._comptype != b'NONE': + self._init_compression() + self._file.write(b'FORM') + if not self._nframes: + self._nframes = initlength // (self._nchannels * self._sampwidth) + self._datalength = self._nframes * self._nchannels * self._sampwidth + if self._datalength & 1: + self._datalength = self._datalength + 1 + if self._aifc: + if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'): + self._datalength = self._datalength // 2 + if self._datalength & 1: + self._datalength = self._datalength + 1 + elif self._comptype == b'G722': + self._datalength = (self._datalength + 3) // 4 + if self._datalength & 1: + self._datalength = self._datalength + 1 + try: + self._form_length_pos = self._file.tell() + except (AttributeError, OSError): + self._form_length_pos = None + commlength = self._write_form_length(self._datalength) + if self._aifc: + self._file.write(b'AIFC') + self._file.write(b'FVER') + _write_ulong(self._file, 4) + _write_ulong(self._file, self._version) + else: + self._file.write(b'AIFF') + self._file.write(b'COMM') + _write_ulong(self._file, commlength) + _write_short(self._file, self._nchannels) + if self._form_length_pos is not None: + self._nframes_pos = self._file.tell() + _write_ulong(self._file, self._nframes) + if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'): + _write_short(self._file, 8) + else: + _write_short(self._file, self._sampwidth * 8) + _write_float(self._file, self._framerate) + if self._aifc: + self._file.write(self._comptype) + _write_string(self._file, self._compname) + self._file.write(b'SSND') + if self._form_length_pos is not None: + self._ssnd_length_pos = self._file.tell() + _write_ulong(self._file, self._datalength + 8) + _write_ulong(self._file, 0) + _write_ulong(self._file, 0) + + def _write_form_length(self, datalength): + if self._aifc: + commlength = 18 + 5 + len(self._compname) + if commlength & 1: + commlength = commlength + 1 + verslength = 12 + else: + commlength = 18 + verslength = 0 + _write_ulong(self._file, 4 + verslength + self._marklength + \ + 8 + commlength + 16 + datalength) + return commlength + + def _patchheader(self): + curpos = self._file.tell() + if self._datawritten & 1: + datalength = self._datawritten + 1 + self._file.write(b'\x00') + else: + datalength = self._datawritten + if datalength == self._datalength and \ + self._nframes == self._nframeswritten and \ + self._marklength == 0: + self._file.seek(curpos, 0) + return + self._file.seek(self._form_length_pos, 0) + dummy = self._write_form_length(datalength) + self._file.seek(self._nframes_pos, 0) + _write_ulong(self._file, self._nframeswritten) + self._file.seek(self._ssnd_length_pos, 0) + _write_ulong(self._file, datalength + 8) + self._file.seek(curpos, 0) + self._nframes = self._nframeswritten + self._datalength = datalength + + def _writemarkers(self): + if len(self._markers) == 0: + return + self._file.write(b'MARK') + length = 2 + for marker in self._markers: + id, pos, name = marker + length = length + len(name) + 1 + 6 + if len(name) & 1 == 0: + length = length + 1 + _write_ulong(self._file, length) + self._marklength = length + 8 + _write_short(self._file, len(self._markers)) + for marker in self._markers: + id, pos, name = marker + _write_short(self._file, id) + _write_ulong(self._file, pos) + _write_string(self._file, name) + +def open(f, mode=None): + if mode is None: + if hasattr(f, 'mode'): + mode = f.mode + else: + mode = 'rb' + if mode in ('r', 'rb'): + return Aifc_read(f) + elif mode in ('w', 'wb'): + return Aifc_write(f) + else: + raise Error("mode must be 'r', 'rb', 'w', or 'wb'") + + +if __name__ == '__main__': + import sys + if not sys.argv[1:]: + sys.argv.append('/usr/demos/data/audio/bach.aiff') + fn = sys.argv[1] + with open(fn, 'r') as f: + print("Reading", fn) + print("nchannels =", f.getnchannels()) + print("nframes =", f.getnframes()) + print("sampwidth =", f.getsampwidth()) + print("framerate =", f.getframerate()) + print("comptype =", f.getcomptype()) + print("compname =", f.getcompname()) + if sys.argv[2:]: + gn = sys.argv[2] + print("Writing", gn) + with open(gn, 'w') as g: + g.setparams(f.getparams()) + while 1: + data = f.readframes(1024) + if not data: + break + g.writeframes(data) + print("Done.") diff --git a/python310/antigravity.py b/python310/antigravity.py new file mode 100644 index 0000000000000000000000000000000000000000..6dc520733577af68cd7926105204baf807251a39 --- /dev/null +++ b/python310/antigravity.py @@ -0,0 +1,17 @@ + +import webbrowser +import hashlib + +webbrowser.open("https://xkcd.com/353/") + +def geohash(latitude, longitude, datedow): + '''Compute geohash() using the Munroe algorithm. + + >>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68') + 37.857713 -122.544543 + + ''' + # https://xkcd.com/426/ + h = hashlib.md5(datedow, usedforsecurity=False).hexdigest() + p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])] + print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:])) diff --git a/python310/argparse.py b/python310/argparse.py new file mode 100644 index 0000000000000000000000000000000000000000..81f8e8ab18b3b9ca4e9fe3243d483e09229ca80d --- /dev/null +++ b/python310/argparse.py @@ -0,0 +1,2606 @@ +# Author: Steven J. Bethard . +# New maintainer as of 29 August 2019: Raymond Hettinger + +"""Command-line parsing library + +This module is an optparse-inspired command-line parsing library that: + + - handles both optional and positional arguments + - produces highly informative usage messages + - supports parsers that dispatch to sub-parsers + +The following is a simple usage example that sums integers from the +command-line and writes the result to a file:: + + parser = argparse.ArgumentParser( + description='sum the integers at the command line') + parser.add_argument( + 'integers', metavar='int', nargs='+', type=int, + help='an integer to be summed') + parser.add_argument( + '--log', default=sys.stdout, type=argparse.FileType('w'), + help='the file where the sum should be written') + args = parser.parse_args() + args.log.write('%s' % sum(args.integers)) + args.log.close() + +The module contains the following public classes: + + - ArgumentParser -- The main entry point for command-line parsing. As the + example above shows, the add_argument() method is used to populate + the parser with actions for optional and positional arguments. Then + the parse_args() method is invoked to convert the args at the + command-line into an object with attributes. + + - ArgumentError -- The exception raised by ArgumentParser objects when + there are errors with the parser's actions. Errors raised while + parsing the command-line are caught by ArgumentParser and emitted + as command-line messages. + + - FileType -- A factory for defining types of files to be created. As the + example above shows, instances of FileType are typically passed as + the type= argument of add_argument() calls. + + - Action -- The base class for parser actions. Typically actions are + selected by passing strings like 'store_true' or 'append_const' to + the action= argument of add_argument(). However, for greater + customization of ArgumentParser actions, subclasses of Action may + be defined and passed as the action= argument. + + - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter, + ArgumentDefaultsHelpFormatter -- Formatter classes which + may be passed as the formatter_class= argument to the + ArgumentParser constructor. HelpFormatter is the default, + RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser + not to change the formatting for help text, and + ArgumentDefaultsHelpFormatter adds information about argument defaults + to the help. + +All other classes in this module are considered implementation details. +(Also note that HelpFormatter and RawDescriptionHelpFormatter are only +considered public as object names -- the API of the formatter objects is +still considered an implementation detail.) +""" + +__version__ = '1.1' +__all__ = [ + 'ArgumentParser', + 'ArgumentError', + 'ArgumentTypeError', + 'BooleanOptionalAction', + 'FileType', + 'HelpFormatter', + 'ArgumentDefaultsHelpFormatter', + 'RawDescriptionHelpFormatter', + 'RawTextHelpFormatter', + 'MetavarTypeHelpFormatter', + 'Namespace', + 'Action', + 'ONE_OR_MORE', + 'OPTIONAL', + 'PARSER', + 'REMAINDER', + 'SUPPRESS', + 'ZERO_OR_MORE', +] + + +import os as _os +import re as _re +import sys as _sys + +try: + from gettext import gettext as _, ngettext +except ImportError: + def _(message): + return message + def ngettext(singular,plural,n): + if n == 1: + return singular + else: + return plural + +SUPPRESS = '==SUPPRESS==' + +OPTIONAL = '?' +ZERO_OR_MORE = '*' +ONE_OR_MORE = '+' +PARSER = 'A...' +REMAINDER = '...' +_UNRECOGNIZED_ARGS_ATTR = '_unrecognized_args' + +# ============================= +# Utility functions and classes +# ============================= + +class _AttributeHolder(object): + """Abstract base class that provides __repr__. + + The __repr__ method returns a string in the format:: + ClassName(attr=name, attr=name, ...) + The attributes are determined either by a class-level attribute, + '_kwarg_names', or by inspecting the instance __dict__. + """ + + def __repr__(self): + type_name = type(self).__name__ + arg_strings = [] + star_args = {} + for arg in self._get_args(): + arg_strings.append(repr(arg)) + for name, value in self._get_kwargs(): + if name.isidentifier(): + arg_strings.append('%s=%r' % (name, value)) + else: + star_args[name] = value + if star_args: + arg_strings.append('**%s' % repr(star_args)) + return '%s(%s)' % (type_name, ', '.join(arg_strings)) + + def _get_kwargs(self): + return list(self.__dict__.items()) + + def _get_args(self): + return [] + + +def _copy_items(items): + if items is None: + return [] + # The copy module is used only in the 'append' and 'append_const' + # actions, and it is needed only when the default value isn't a list. + # Delay its import for speeding up the common case. + if type(items) is list: + return items[:] + import copy + return copy.copy(items) + + +# =============== +# Formatting Help +# =============== + +class HelpFormatter(object): + """Formatter for generating usage messages and argument help strings. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def __init__(self, + prog, + indent_increment=2, + max_help_position=24, + width=None): + + # default setting for width + if width is None: + try: + import shutil as _shutil + width = _shutil.get_terminal_size().columns + width -= 2 + except ImportError: + width = 70 + + self._prog = prog + self._indent_increment = indent_increment + self._max_help_position = min(max_help_position, + max(width - 20, indent_increment * 2)) + self._width = width + + self._current_indent = 0 + self._level = 0 + self._action_max_length = 0 + + self._root_section = self._Section(self, None) + self._current_section = self._root_section + + self._whitespace_matcher = _re.compile(r'\s+', _re.ASCII) + self._long_break_matcher = _re.compile(r'\n\n\n+') + + # =============================== + # Section and indentation methods + # =============================== + def _indent(self): + self._current_indent += self._indent_increment + self._level += 1 + + def _dedent(self): + self._current_indent -= self._indent_increment + assert self._current_indent >= 0, 'Indent decreased below 0.' + self._level -= 1 + + class _Section(object): + + def __init__(self, formatter, parent, heading=None): + self.formatter = formatter + self.parent = parent + self.heading = heading + self.items = [] + + def format_help(self): + # format the indented section + if self.parent is not None: + self.formatter._indent() + join = self.formatter._join_parts + item_help = join([func(*args) for func, args in self.items]) + if self.parent is not None: + self.formatter._dedent() + + # return nothing if the section was empty + if not item_help: + return '' + + # add the heading if the section was non-empty + if self.heading is not SUPPRESS and self.heading is not None: + current_indent = self.formatter._current_indent + heading = '%*s%s:\n' % (current_indent, '', self.heading) + else: + heading = '' + + # join the section-initial newline, the heading and the help + return join(['\n', heading, item_help, '\n']) + + def _add_item(self, func, args): + self._current_section.items.append((func, args)) + + # ======================== + # Message building methods + # ======================== + def start_section(self, heading): + self._indent() + section = self._Section(self, self._current_section, heading) + self._add_item(section.format_help, []) + self._current_section = section + + def end_section(self): + self._current_section = self._current_section.parent + self._dedent() + + def add_text(self, text): + if text is not SUPPRESS and text is not None: + self._add_item(self._format_text, [text]) + + def add_usage(self, usage, actions, groups, prefix=None): + if usage is not SUPPRESS: + args = usage, actions, groups, prefix + self._add_item(self._format_usage, args) + + def add_argument(self, action): + if action.help is not SUPPRESS: + + # find all invocations + get_invocation = self._format_action_invocation + invocations = [get_invocation(action)] + for subaction in self._iter_indented_subactions(action): + invocations.append(get_invocation(subaction)) + + # update the maximum item length + invocation_length = max(map(len, invocations)) + action_length = invocation_length + self._current_indent + self._action_max_length = max(self._action_max_length, + action_length) + + # add the item to the list + self._add_item(self._format_action, [action]) + + def add_arguments(self, actions): + for action in actions: + self.add_argument(action) + + # ======================= + # Help-formatting methods + # ======================= + def format_help(self): + help = self._root_section.format_help() + if help: + help = self._long_break_matcher.sub('\n\n', help) + help = help.strip('\n') + '\n' + return help + + def _join_parts(self, part_strings): + return ''.join([part + for part in part_strings + if part and part is not SUPPRESS]) + + def _format_usage(self, usage, actions, groups, prefix): + if prefix is None: + prefix = _('usage: ') + + # if usage is specified, use that + if usage is not None: + usage = usage % dict(prog=self._prog) + + # if no optionals or positionals are available, usage is just prog + elif usage is None and not actions: + usage = '%(prog)s' % dict(prog=self._prog) + + # if optionals and positionals are available, calculate usage + elif usage is None: + prog = '%(prog)s' % dict(prog=self._prog) + + # split optionals from positionals + optionals = [] + positionals = [] + for action in actions: + if action.option_strings: + optionals.append(action) + else: + positionals.append(action) + + # build full usage string + format = self._format_actions_usage + action_usage = format(optionals + positionals, groups) + usage = ' '.join([s for s in [prog, action_usage] if s]) + + # wrap the usage parts if it's too long + text_width = self._width - self._current_indent + if len(prefix) + len(usage) > text_width: + + # break usage into wrappable parts + part_regexp = ( + r'\(.*?\)+(?=\s|$)|' + r'\[.*?\]+(?=\s|$)|' + r'\S+' + ) + opt_usage = format(optionals, groups) + pos_usage = format(positionals, groups) + opt_parts = _re.findall(part_regexp, opt_usage) + pos_parts = _re.findall(part_regexp, pos_usage) + assert ' '.join(opt_parts) == opt_usage + assert ' '.join(pos_parts) == pos_usage + + # helper for wrapping lines + def get_lines(parts, indent, prefix=None): + lines = [] + line = [] + if prefix is not None: + line_len = len(prefix) - 1 + else: + line_len = len(indent) - 1 + for part in parts: + if line_len + 1 + len(part) > text_width and line: + lines.append(indent + ' '.join(line)) + line = [] + line_len = len(indent) - 1 + line.append(part) + line_len += len(part) + 1 + if line: + lines.append(indent + ' '.join(line)) + if prefix is not None: + lines[0] = lines[0][len(indent):] + return lines + + # if prog is short, follow it with optionals or positionals + if len(prefix) + len(prog) <= 0.75 * text_width: + indent = ' ' * (len(prefix) + len(prog) + 1) + if opt_parts: + lines = get_lines([prog] + opt_parts, indent, prefix) + lines.extend(get_lines(pos_parts, indent)) + elif pos_parts: + lines = get_lines([prog] + pos_parts, indent, prefix) + else: + lines = [prog] + + # if prog is long, put it on its own line + else: + indent = ' ' * len(prefix) + parts = opt_parts + pos_parts + lines = get_lines(parts, indent) + if len(lines) > 1: + lines = [] + lines.extend(get_lines(opt_parts, indent)) + lines.extend(get_lines(pos_parts, indent)) + lines = [prog] + lines + + # join lines into usage + usage = '\n'.join(lines) + + # prefix with 'usage:' + return '%s%s\n\n' % (prefix, usage) + + def _format_actions_usage(self, actions, groups): + # find group indices and identify actions in groups + group_actions = set() + inserts = {} + for group in groups: + if not group._group_actions: + raise ValueError(f'empty group {group}') + + try: + start = actions.index(group._group_actions[0]) + except ValueError: + continue + else: + group_action_count = len(group._group_actions) + end = start + group_action_count + if actions[start:end] == group._group_actions: + + suppressed_actions_count = 0 + for action in group._group_actions: + group_actions.add(action) + if action.help is SUPPRESS: + suppressed_actions_count += 1 + + exposed_actions_count = group_action_count - suppressed_actions_count + + if not group.required: + if start in inserts: + inserts[start] += ' [' + else: + inserts[start] = '[' + if end in inserts: + inserts[end] += ']' + else: + inserts[end] = ']' + elif exposed_actions_count > 1: + if start in inserts: + inserts[start] += ' (' + else: + inserts[start] = '(' + if end in inserts: + inserts[end] += ')' + else: + inserts[end] = ')' + for i in range(start + 1, end): + inserts[i] = '|' + + # collect all actions format strings + parts = [] + for i, action in enumerate(actions): + + # suppressed arguments are marked with None + # remove | separators for suppressed arguments + if action.help is SUPPRESS: + parts.append(None) + if inserts.get(i) == '|': + inserts.pop(i) + elif inserts.get(i + 1) == '|': + inserts.pop(i + 1) + + # produce all arg strings + elif not action.option_strings: + default = self._get_default_metavar_for_positional(action) + part = self._format_args(action, default) + + # if it's in a group, strip the outer [] + if action in group_actions: + if part[0] == '[' and part[-1] == ']': + part = part[1:-1] + + # add the action string to the list + parts.append(part) + + # produce the first way to invoke the option in brackets + else: + option_string = action.option_strings[0] + + # if the Optional doesn't take a value, format is: + # -s or --long + if action.nargs == 0: + part = action.format_usage() + + # if the Optional takes a value, format is: + # -s ARGS or --long ARGS + else: + default = self._get_default_metavar_for_optional(action) + args_string = self._format_args(action, default) + part = '%s %s' % (option_string, args_string) + + # make it look optional if it's not required or in a group + if not action.required and action not in group_actions: + part = '[%s]' % part + + # add the action string to the list + parts.append(part) + + # insert things at the necessary indices + for i in sorted(inserts, reverse=True): + parts[i:i] = [inserts[i]] + + # join all the action items with spaces + text = ' '.join([item for item in parts if item is not None]) + + # clean up separators for mutually exclusive groups + open = r'[\[(]' + close = r'[\])]' + text = _re.sub(r'(%s) ' % open, r'\1', text) + text = _re.sub(r' (%s)' % close, r'\1', text) + text = _re.sub(r'%s *%s' % (open, close), r'', text) + text = text.strip() + + # return the text + return text + + def _format_text(self, text): + if '%(prog)' in text: + text = text % dict(prog=self._prog) + text_width = max(self._width - self._current_indent, 11) + indent = ' ' * self._current_indent + return self._fill_text(text, text_width, indent) + '\n\n' + + def _format_action(self, action): + # determine the required width and the entry label + help_position = min(self._action_max_length + 2, + self._max_help_position) + help_width = max(self._width - help_position, 11) + action_width = help_position - self._current_indent - 2 + action_header = self._format_action_invocation(action) + + # no help; start on same line and add a final newline + if not action.help: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + + # short action name; start on the same line and pad two spaces + elif len(action_header) <= action_width: + tup = self._current_indent, '', action_width, action_header + action_header = '%*s%-*s ' % tup + indent_first = 0 + + # long action name; start on the next line + else: + tup = self._current_indent, '', action_header + action_header = '%*s%s\n' % tup + indent_first = help_position + + # collect the pieces of the action help + parts = [action_header] + + # if there was help for the action, add lines of help text + if action.help and action.help.strip(): + help_text = self._expand_help(action) + if help_text: + help_lines = self._split_lines(help_text, help_width) + parts.append('%*s%s\n' % (indent_first, '', help_lines[0])) + for line in help_lines[1:]: + parts.append('%*s%s\n' % (help_position, '', line)) + + # or add a newline if the description doesn't end with one + elif not action_header.endswith('\n'): + parts.append('\n') + + # if there are any sub-actions, add their help as well + for subaction in self._iter_indented_subactions(action): + parts.append(self._format_action(subaction)) + + # return a single string + return self._join_parts(parts) + + def _format_action_invocation(self, action): + if not action.option_strings: + default = self._get_default_metavar_for_positional(action) + metavar, = self._metavar_formatter(action, default)(1) + return metavar + + else: + parts = [] + + # if the Optional doesn't take a value, format is: + # -s, --long + if action.nargs == 0: + parts.extend(action.option_strings) + + # if the Optional takes a value, format is: + # -s ARGS, --long ARGS + else: + default = self._get_default_metavar_for_optional(action) + args_string = self._format_args(action, default) + for option_string in action.option_strings: + parts.append('%s %s' % (option_string, args_string)) + + return ', '.join(parts) + + def _metavar_formatter(self, action, default_metavar): + if action.metavar is not None: + result = action.metavar + elif action.choices is not None: + choice_strs = [str(choice) for choice in action.choices] + result = '{%s}' % ','.join(choice_strs) + else: + result = default_metavar + + def format(tuple_size): + if isinstance(result, tuple): + return result + else: + return (result, ) * tuple_size + return format + + def _format_args(self, action, default_metavar): + get_metavar = self._metavar_formatter(action, default_metavar) + if action.nargs is None: + result = '%s' % get_metavar(1) + elif action.nargs == OPTIONAL: + result = '[%s]' % get_metavar(1) + elif action.nargs == ZERO_OR_MORE: + metavar = get_metavar(1) + if len(metavar) == 2: + result = '[%s [%s ...]]' % metavar + else: + result = '[%s ...]' % metavar + elif action.nargs == ONE_OR_MORE: + result = '%s [%s ...]' % get_metavar(2) + elif action.nargs == REMAINDER: + result = '...' + elif action.nargs == PARSER: + result = '%s ...' % get_metavar(1) + elif action.nargs == SUPPRESS: + result = '' + else: + try: + formats = ['%s' for _ in range(action.nargs)] + except TypeError: + raise ValueError("invalid nargs value") from None + result = ' '.join(formats) % get_metavar(action.nargs) + return result + + def _expand_help(self, action): + params = dict(vars(action), prog=self._prog) + for name in list(params): + if params[name] is SUPPRESS: + del params[name] + for name in list(params): + if hasattr(params[name], '__name__'): + params[name] = params[name].__name__ + if params.get('choices') is not None: + choices_str = ', '.join([str(c) for c in params['choices']]) + params['choices'] = choices_str + return self._get_help_string(action) % params + + def _iter_indented_subactions(self, action): + try: + get_subactions = action._get_subactions + except AttributeError: + pass + else: + self._indent() + yield from get_subactions() + self._dedent() + + def _split_lines(self, text, width): + text = self._whitespace_matcher.sub(' ', text).strip() + # The textwrap module is used only for formatting help. + # Delay its import for speeding up the common usage of argparse. + import textwrap + return textwrap.wrap(text, width) + + def _fill_text(self, text, width, indent): + text = self._whitespace_matcher.sub(' ', text).strip() + import textwrap + return textwrap.fill(text, width, + initial_indent=indent, + subsequent_indent=indent) + + def _get_help_string(self, action): + return action.help + + def _get_default_metavar_for_optional(self, action): + return action.dest.upper() + + def _get_default_metavar_for_positional(self, action): + return action.dest + + +class RawDescriptionHelpFormatter(HelpFormatter): + """Help message formatter which retains any formatting in descriptions. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _fill_text(self, text, width, indent): + return ''.join(indent + line for line in text.splitlines(keepends=True)) + + +class RawTextHelpFormatter(RawDescriptionHelpFormatter): + """Help message formatter which retains formatting of all help text. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _split_lines(self, text, width): + return text.splitlines() + + +class ArgumentDefaultsHelpFormatter(HelpFormatter): + """Help message formatter which adds default values to argument help. + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_help_string(self, action): + help = action.help + if '%(default)' not in action.help: + if action.default is not SUPPRESS: + defaulting_nargs = [OPTIONAL, ZERO_OR_MORE] + if action.option_strings or action.nargs in defaulting_nargs: + help += ' (default: %(default)s)' + return help + + +class MetavarTypeHelpFormatter(HelpFormatter): + """Help message formatter which uses the argument 'type' as the default + metavar value (instead of the argument 'dest') + + Only the name of this class is considered a public API. All the methods + provided by the class are considered an implementation detail. + """ + + def _get_default_metavar_for_optional(self, action): + return action.type.__name__ + + def _get_default_metavar_for_positional(self, action): + return action.type.__name__ + + + +# ===================== +# Options and Arguments +# ===================== + +def _get_action_name(argument): + if argument is None: + return None + elif argument.option_strings: + return '/'.join(argument.option_strings) + elif argument.metavar not in (None, SUPPRESS): + return argument.metavar + elif argument.dest not in (None, SUPPRESS): + return argument.dest + elif argument.choices: + return '{' + ','.join(argument.choices) + '}' + else: + return None + + +class ArgumentError(Exception): + """An error from creating or using an argument (optional or positional). + + The string value of this exception is the message, augmented with + information about the argument that caused it. + """ + + def __init__(self, argument, message): + self.argument_name = _get_action_name(argument) + self.message = message + + def __str__(self): + if self.argument_name is None: + format = '%(message)s' + else: + format = 'argument %(argument_name)s: %(message)s' + return format % dict(message=self.message, + argument_name=self.argument_name) + + +class ArgumentTypeError(Exception): + """An error from trying to convert a command line string to a type.""" + pass + + +# ============== +# Action classes +# ============== + +class Action(_AttributeHolder): + """Information about how to convert command line strings to Python objects. + + Action objects are used by an ArgumentParser to represent the information + needed to parse a single argument from one or more strings from the + command line. The keyword arguments to the Action constructor are also + all attributes of Action instances. + + Keyword Arguments: + + - option_strings -- A list of command-line option strings which + should be associated with this action. + + - dest -- The name of the attribute to hold the created object(s) + + - nargs -- The number of command-line arguments that should be + consumed. By default, one argument will be consumed and a single + value will be produced. Other values include: + - N (an integer) consumes N arguments (and produces a list) + - '?' consumes zero or one arguments + - '*' consumes zero or more arguments (and produces a list) + - '+' consumes one or more arguments (and produces a list) + Note that the difference between the default and nargs=1 is that + with the default, a single value will be produced, while with + nargs=1, a list containing a single value will be produced. + + - const -- The value to be produced if the option is specified and the + option uses an action that takes no values. + + - default -- The value to be produced if the option is not specified. + + - type -- A callable that accepts a single string argument, and + returns the converted value. The standard Python types str, int, + float, and complex are useful examples of such callables. If None, + str is used. + + - choices -- A container of values that should be allowed. If not None, + after a command-line argument has been converted to the appropriate + type, an exception will be raised if it is not a member of this + collection. + + - required -- True if the action must always be specified at the + command line. This is only meaningful for optional command-line + arguments. + + - help -- The help string describing the argument. + + - metavar -- The name to be used for the option's argument with the + help string. If None, the 'dest' value will be used as the name. + """ + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + self.option_strings = option_strings + self.dest = dest + self.nargs = nargs + self.const = const + self.default = default + self.type = type + self.choices = choices + self.required = required + self.help = help + self.metavar = metavar + + def _get_kwargs(self): + names = [ + 'option_strings', + 'dest', + 'nargs', + 'const', + 'default', + 'type', + 'choices', + 'required', + 'help', + 'metavar', + ] + return [(name, getattr(self, name)) for name in names] + + def format_usage(self): + return self.option_strings[0] + + def __call__(self, parser, namespace, values, option_string=None): + raise NotImplementedError(_('.__call__() not defined')) + +class BooleanOptionalAction(Action): + def __init__(self, + option_strings, + dest, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + + _option_strings = [] + for option_string in option_strings: + _option_strings.append(option_string) + + if option_string.startswith('--'): + option_string = '--no-' + option_string[2:] + _option_strings.append(option_string) + + if help is not None and default is not None and default is not SUPPRESS: + help += " (default: %(default)s)" + + super().__init__( + option_strings=_option_strings, + dest=dest, + nargs=0, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + if option_string in self.option_strings: + setattr(namespace, self.dest, not option_string.startswith('--no-')) + + def format_usage(self): + return ' | '.join(self.option_strings) + + +class _StoreAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for store actions must be != 0; if you ' + 'have nothing to store, actions such as store ' + 'true or store const may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_StoreAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, values) + + +class _StoreConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_StoreConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + +class _StoreTrueAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=False, + required=False, + help=None): + super(_StoreTrueAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + required=required, + help=help) + + +class _StoreFalseAction(_StoreConstAction): + + def __init__(self, + option_strings, + dest, + default=True, + required=False, + help=None): + super(_StoreFalseAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=False, + default=default, + required=required, + help=help) + + +class _AppendAction(Action): + + def __init__(self, + option_strings, + dest, + nargs=None, + const=None, + default=None, + type=None, + choices=None, + required=False, + help=None, + metavar=None): + if nargs == 0: + raise ValueError('nargs for append actions must be != 0; if arg ' + 'strings are not supplying the value to append, ' + 'the append const action may be more appropriate') + if const is not None and nargs != OPTIONAL: + raise ValueError('nargs must be %r to supply const' % OPTIONAL) + super(_AppendAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=nargs, + const=const, + default=default, + type=type, + choices=choices, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = getattr(namespace, self.dest, None) + items = _copy_items(items) + items.append(values) + setattr(namespace, self.dest, items) + + +class _AppendConstAction(Action): + + def __init__(self, + option_strings, + dest, + const, + default=None, + required=False, + help=None, + metavar=None): + super(_AppendConstAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + const=const, + default=default, + required=required, + help=help, + metavar=metavar) + + def __call__(self, parser, namespace, values, option_string=None): + items = getattr(namespace, self.dest, None) + items = _copy_items(items) + items.append(self.const) + setattr(namespace, self.dest, items) + + +class _CountAction(Action): + + def __init__(self, + option_strings, + dest, + default=None, + required=False, + help=None): + super(_CountAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + count = getattr(namespace, self.dest, None) + if count is None: + count = 0 + setattr(namespace, self.dest, count + 1) + + +class _HelpAction(Action): + + def __init__(self, + option_strings, + dest=SUPPRESS, + default=SUPPRESS, + help=None): + super(_HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + parser.print_help() + parser.exit() + + +class _VersionAction(Action): + + def __init__(self, + option_strings, + version=None, + dest=SUPPRESS, + default=SUPPRESS, + help="show program's version number and exit"): + super(_VersionAction, self).__init__( + option_strings=option_strings, + dest=dest, + default=default, + nargs=0, + help=help) + self.version = version + + def __call__(self, parser, namespace, values, option_string=None): + version = self.version + if version is None: + version = parser.version + formatter = parser._get_formatter() + formatter.add_text(version) + parser._print_message(formatter.format_help(), _sys.stdout) + parser.exit() + + +class _SubParsersAction(Action): + + class _ChoicesPseudoAction(Action): + + def __init__(self, name, aliases, help): + metavar = dest = name + if aliases: + metavar += ' (%s)' % ', '.join(aliases) + sup = super(_SubParsersAction._ChoicesPseudoAction, self) + sup.__init__(option_strings=[], dest=dest, help=help, + metavar=metavar) + + def __init__(self, + option_strings, + prog, + parser_class, + dest=SUPPRESS, + required=False, + help=None, + metavar=None): + + self._prog_prefix = prog + self._parser_class = parser_class + self._name_parser_map = {} + self._choices_actions = [] + + super(_SubParsersAction, self).__init__( + option_strings=option_strings, + dest=dest, + nargs=PARSER, + choices=self._name_parser_map, + required=required, + help=help, + metavar=metavar) + + def add_parser(self, name, **kwargs): + # set prog from the existing prefix + if kwargs.get('prog') is None: + kwargs['prog'] = '%s %s' % (self._prog_prefix, name) + + aliases = kwargs.pop('aliases', ()) + + # create a pseudo-action to hold the choice help + if 'help' in kwargs: + help = kwargs.pop('help') + choice_action = self._ChoicesPseudoAction(name, aliases, help) + self._choices_actions.append(choice_action) + + # create the parser and add it to the map + parser = self._parser_class(**kwargs) + self._name_parser_map[name] = parser + + # make parser available under aliases also + for alias in aliases: + self._name_parser_map[alias] = parser + + return parser + + def _get_subactions(self): + return self._choices_actions + + def __call__(self, parser, namespace, values, option_string=None): + parser_name = values[0] + arg_strings = values[1:] + + # set the parser name if requested + if self.dest is not SUPPRESS: + setattr(namespace, self.dest, parser_name) + + # select the parser + try: + parser = self._name_parser_map[parser_name] + except KeyError: + args = {'parser_name': parser_name, + 'choices': ', '.join(self._name_parser_map)} + msg = _('unknown parser %(parser_name)r (choices: %(choices)s)') % args + raise ArgumentError(self, msg) + + # parse all the remaining options into the namespace + # store any unrecognized options on the object, so that the top + # level parser can decide what to do with them + + # In case this subparser defines new defaults, we parse them + # in a new namespace object and then update the original + # namespace for the relevant parts. + subnamespace, arg_strings = parser.parse_known_args(arg_strings, None) + for key, value in vars(subnamespace).items(): + setattr(namespace, key, value) + + if arg_strings: + vars(namespace).setdefault(_UNRECOGNIZED_ARGS_ATTR, []) + getattr(namespace, _UNRECOGNIZED_ARGS_ATTR).extend(arg_strings) + +class _ExtendAction(_AppendAction): + def __call__(self, parser, namespace, values, option_string=None): + items = getattr(namespace, self.dest, None) + items = _copy_items(items) + items.extend(values) + setattr(namespace, self.dest, items) + +# ============== +# Type classes +# ============== + +class FileType(object): + """Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + """ + + def __init__(self, mode='r', bufsize=-1, encoding=None, errors=None): + self._mode = mode + self._bufsize = bufsize + self._encoding = encoding + self._errors = errors + + def __call__(self, string): + # the special argument "-" means sys.std{in,out} + if string == '-': + if 'r' in self._mode: + return _sys.stdin.buffer if 'b' in self._mode else _sys.stdin + elif any(c in self._mode for c in 'wax'): + return _sys.stdout.buffer if 'b' in self._mode else _sys.stdout + else: + msg = _('argument "-" with mode %r') % self._mode + raise ValueError(msg) + + # all other arguments are used as file names + try: + return open(string, self._mode, self._bufsize, self._encoding, + self._errors) + except OSError as e: + args = {'filename': string, 'error': e} + message = _("can't open '%(filename)s': %(error)s") + raise ArgumentTypeError(message % args) + + def __repr__(self): + args = self._mode, self._bufsize + kwargs = [('encoding', self._encoding), ('errors', self._errors)] + args_str = ', '.join([repr(arg) for arg in args if arg != -1] + + ['%s=%r' % (kw, arg) for kw, arg in kwargs + if arg is not None]) + return '%s(%s)' % (type(self).__name__, args_str) + +# =========================== +# Optional and Positional Parsing +# =========================== + +class Namespace(_AttributeHolder): + """Simple object for storing attributes. + + Implements equality by attribute names and values, and provides a simple + string representation. + """ + + def __init__(self, **kwargs): + for name in kwargs: + setattr(self, name, kwargs[name]) + + def __eq__(self, other): + if not isinstance(other, Namespace): + return NotImplemented + return vars(self) == vars(other) + + def __contains__(self, key): + return key in self.__dict__ + + +class _ActionsContainer(object): + + def __init__(self, + description, + prefix_chars, + argument_default, + conflict_handler): + super(_ActionsContainer, self).__init__() + + self.description = description + self.argument_default = argument_default + self.prefix_chars = prefix_chars + self.conflict_handler = conflict_handler + + # set up registries + self._registries = {} + + # register actions + self.register('action', None, _StoreAction) + self.register('action', 'store', _StoreAction) + self.register('action', 'store_const', _StoreConstAction) + self.register('action', 'store_true', _StoreTrueAction) + self.register('action', 'store_false', _StoreFalseAction) + self.register('action', 'append', _AppendAction) + self.register('action', 'append_const', _AppendConstAction) + self.register('action', 'count', _CountAction) + self.register('action', 'help', _HelpAction) + self.register('action', 'version', _VersionAction) + self.register('action', 'parsers', _SubParsersAction) + self.register('action', 'extend', _ExtendAction) + + # raise an exception if the conflict handler is invalid + self._get_handler() + + # action storage + self._actions = [] + self._option_string_actions = {} + + # groups + self._action_groups = [] + self._mutually_exclusive_groups = [] + + # defaults storage + self._defaults = {} + + # determines whether an "option" looks like a negative number + self._negative_number_matcher = _re.compile(r'^-\d+$|^-\d*\.\d+$') + + # whether or not there are any optionals that look like negative + # numbers -- uses a list so it can be shared and edited + self._has_negative_number_optionals = [] + + # ==================== + # Registration methods + # ==================== + def register(self, registry_name, value, object): + registry = self._registries.setdefault(registry_name, {}) + registry[value] = object + + def _registry_get(self, registry_name, value, default=None): + return self._registries[registry_name].get(value, default) + + # ================================== + # Namespace default accessor methods + # ================================== + def set_defaults(self, **kwargs): + self._defaults.update(kwargs) + + # if these defaults match any existing arguments, replace + # the previous default on the object with the new one + for action in self._actions: + if action.dest in kwargs: + action.default = kwargs[action.dest] + + def get_default(self, dest): + for action in self._actions: + if action.dest == dest and action.default is not None: + return action.default + return self._defaults.get(dest, None) + + + # ======================= + # Adding argument actions + # ======================= + def add_argument(self, *args, **kwargs): + """ + add_argument(dest, ..., name=value, ...) + add_argument(option_string, option_string, ..., name=value, ...) + """ + + # if no positional args are supplied or only one is supplied and + # it doesn't look like an option string, parse a positional + # argument + chars = self.prefix_chars + if not args or len(args) == 1 and args[0][0] not in chars: + if args and 'dest' in kwargs: + raise ValueError('dest supplied twice for positional argument') + kwargs = self._get_positional_kwargs(*args, **kwargs) + + # otherwise, we're adding an optional argument + else: + kwargs = self._get_optional_kwargs(*args, **kwargs) + + # if no default was supplied, use the parser-level default + if 'default' not in kwargs: + dest = kwargs['dest'] + if dest in self._defaults: + kwargs['default'] = self._defaults[dest] + elif self.argument_default is not None: + kwargs['default'] = self.argument_default + + # create the action object, and add it to the parser + action_class = self._pop_action_class(kwargs) + if not callable(action_class): + raise ValueError('unknown action "%s"' % (action_class,)) + action = action_class(**kwargs) + + # raise an error if the action type is not callable + type_func = self._registry_get('type', action.type, action.type) + if not callable(type_func): + raise ValueError('%r is not callable' % (type_func,)) + + if type_func is FileType: + raise ValueError('%r is a FileType class object, instance of it' + ' must be passed' % (type_func,)) + + # raise an error if the metavar does not match the type + if hasattr(self, "_get_formatter"): + try: + self._get_formatter()._format_args(action, None) + except TypeError: + raise ValueError("length of metavar tuple does not match nargs") + + return self._add_action(action) + + def add_argument_group(self, *args, **kwargs): + group = _ArgumentGroup(self, *args, **kwargs) + self._action_groups.append(group) + return group + + def add_mutually_exclusive_group(self, **kwargs): + group = _MutuallyExclusiveGroup(self, **kwargs) + self._mutually_exclusive_groups.append(group) + return group + + def _add_action(self, action): + # resolve any conflicts + self._check_conflict(action) + + # add to actions list + self._actions.append(action) + action.container = self + + # index the action by any option strings it has + for option_string in action.option_strings: + self._option_string_actions[option_string] = action + + # set the flag if any option strings look like negative numbers + for option_string in action.option_strings: + if self._negative_number_matcher.match(option_string): + if not self._has_negative_number_optionals: + self._has_negative_number_optionals.append(True) + + # return the created action + return action + + def _remove_action(self, action): + self._actions.remove(action) + + def _add_container_actions(self, container): + # collect groups by titles + title_group_map = {} + for group in self._action_groups: + if group.title in title_group_map: + msg = _('cannot merge actions - two groups are named %r') + raise ValueError(msg % (group.title)) + title_group_map[group.title] = group + + # map each action to its group + group_map = {} + for group in container._action_groups: + + # if a group with the title exists, use that, otherwise + # create a new group matching the container's group + if group.title not in title_group_map: + title_group_map[group.title] = self.add_argument_group( + title=group.title, + description=group.description, + conflict_handler=group.conflict_handler) + + # map the actions to their new group + for action in group._group_actions: + group_map[action] = title_group_map[group.title] + + # add container's mutually exclusive groups + # NOTE: if add_mutually_exclusive_group ever gains title= and + # description= then this code will need to be expanded as above + for group in container._mutually_exclusive_groups: + mutex_group = self.add_mutually_exclusive_group( + required=group.required) + + # map the actions to their new mutex group + for action in group._group_actions: + group_map[action] = mutex_group + + # add all actions to this container or their group + for action in container._actions: + group_map.get(action, self)._add_action(action) + + def _get_positional_kwargs(self, dest, **kwargs): + # make sure required is not specified + if 'required' in kwargs: + msg = _("'required' is an invalid argument for positionals") + raise TypeError(msg) + + # mark positional arguments as required if at least one is + # always required + if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]: + kwargs['required'] = True + if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs: + kwargs['required'] = True + + # return the keyword arguments with no option strings + return dict(kwargs, dest=dest, option_strings=[]) + + def _get_optional_kwargs(self, *args, **kwargs): + # determine short and long option strings + option_strings = [] + long_option_strings = [] + for option_string in args: + # error on strings that don't start with an appropriate prefix + if not option_string[0] in self.prefix_chars: + args = {'option': option_string, + 'prefix_chars': self.prefix_chars} + msg = _('invalid option string %(option)r: ' + 'must start with a character %(prefix_chars)r') + raise ValueError(msg % args) + + # strings starting with two prefix characters are long options + option_strings.append(option_string) + if len(option_string) > 1 and option_string[1] in self.prefix_chars: + long_option_strings.append(option_string) + + # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x' + dest = kwargs.pop('dest', None) + if dest is None: + if long_option_strings: + dest_option_string = long_option_strings[0] + else: + dest_option_string = option_strings[0] + dest = dest_option_string.lstrip(self.prefix_chars) + if not dest: + msg = _('dest= is required for options like %r') + raise ValueError(msg % option_string) + dest = dest.replace('-', '_') + + # return the updated keyword arguments + return dict(kwargs, dest=dest, option_strings=option_strings) + + def _pop_action_class(self, kwargs, default=None): + action = kwargs.pop('action', default) + return self._registry_get('action', action, action) + + def _get_handler(self): + # determine function from conflict handler string + handler_func_name = '_handle_conflict_%s' % self.conflict_handler + try: + return getattr(self, handler_func_name) + except AttributeError: + msg = _('invalid conflict_resolution value: %r') + raise ValueError(msg % self.conflict_handler) + + def _check_conflict(self, action): + + # find all options that conflict with this option + confl_optionals = [] + for option_string in action.option_strings: + if option_string in self._option_string_actions: + confl_optional = self._option_string_actions[option_string] + confl_optionals.append((option_string, confl_optional)) + + # resolve any conflicts + if confl_optionals: + conflict_handler = self._get_handler() + conflict_handler(action, confl_optionals) + + def _handle_conflict_error(self, action, conflicting_actions): + message = ngettext('conflicting option string: %s', + 'conflicting option strings: %s', + len(conflicting_actions)) + conflict_string = ', '.join([option_string + for option_string, action + in conflicting_actions]) + raise ArgumentError(action, message % conflict_string) + + def _handle_conflict_resolve(self, action, conflicting_actions): + + # remove all conflicting options + for option_string, action in conflicting_actions: + + # remove the conflicting option + action.option_strings.remove(option_string) + self._option_string_actions.pop(option_string, None) + + # if the option now has no option string, remove it from the + # container holding it + if not action.option_strings: + action.container._remove_action(action) + + +class _ArgumentGroup(_ActionsContainer): + + def __init__(self, container, title=None, description=None, **kwargs): + # add any missing keyword arguments by checking the container + update = kwargs.setdefault + update('conflict_handler', container.conflict_handler) + update('prefix_chars', container.prefix_chars) + update('argument_default', container.argument_default) + super_init = super(_ArgumentGroup, self).__init__ + super_init(description=description, **kwargs) + + # group attributes + self.title = title + self._group_actions = [] + + # share most attributes with the container + self._registries = container._registries + self._actions = container._actions + self._option_string_actions = container._option_string_actions + self._defaults = container._defaults + self._has_negative_number_optionals = \ + container._has_negative_number_optionals + self._mutually_exclusive_groups = container._mutually_exclusive_groups + + def _add_action(self, action): + action = super(_ArgumentGroup, self)._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + super(_ArgumentGroup, self)._remove_action(action) + self._group_actions.remove(action) + + +class _MutuallyExclusiveGroup(_ArgumentGroup): + + def __init__(self, container, required=False): + super(_MutuallyExclusiveGroup, self).__init__(container) + self.required = required + self._container = container + + def _add_action(self, action): + if action.required: + msg = _('mutually exclusive arguments must be optional') + raise ValueError(msg) + action = self._container._add_action(action) + self._group_actions.append(action) + return action + + def _remove_action(self, action): + self._container._remove_action(action) + self._group_actions.remove(action) + + +class ArgumentParser(_AttributeHolder, _ActionsContainer): + """Object for parsing command line strings into Python objects. + + Keyword Arguments: + - prog -- The name of the program (default: + ``os.path.basename(sys.argv[0])``) + - usage -- A usage message (default: auto-generated from arguments) + - description -- A description of what the program does + - epilog -- Text following the argument descriptions + - parents -- Parsers whose arguments should be copied into this one + - formatter_class -- HelpFormatter class for printing help messages + - prefix_chars -- Characters that prefix optional arguments + - fromfile_prefix_chars -- Characters that prefix files containing + additional arguments + - argument_default -- The default value for all arguments + - conflict_handler -- String indicating how to handle conflicts + - add_help -- Add a -h/-help option + - allow_abbrev -- Allow long options to be abbreviated unambiguously + - exit_on_error -- Determines whether or not ArgumentParser exits with + error info when an error occurs + """ + + def __init__(self, + prog=None, + usage=None, + description=None, + epilog=None, + parents=[], + formatter_class=HelpFormatter, + prefix_chars='-', + fromfile_prefix_chars=None, + argument_default=None, + conflict_handler='error', + add_help=True, + allow_abbrev=True, + exit_on_error=True): + + superinit = super(ArgumentParser, self).__init__ + superinit(description=description, + prefix_chars=prefix_chars, + argument_default=argument_default, + conflict_handler=conflict_handler) + + # default setting for prog + if prog is None: + prog = _os.path.basename(_sys.argv[0]) + + self.prog = prog + self.usage = usage + self.epilog = epilog + self.formatter_class = formatter_class + self.fromfile_prefix_chars = fromfile_prefix_chars + self.add_help = add_help + self.allow_abbrev = allow_abbrev + self.exit_on_error = exit_on_error + + add_group = self.add_argument_group + self._positionals = add_group(_('positional arguments')) + self._optionals = add_group(_('options')) + self._subparsers = None + + # register types + def identity(string): + return string + self.register('type', None, identity) + + # add help argument if necessary + # (using explicit default to override global argument_default) + default_prefix = '-' if '-' in prefix_chars else prefix_chars[0] + if self.add_help: + self.add_argument( + default_prefix+'h', default_prefix*2+'help', + action='help', default=SUPPRESS, + help=_('show this help message and exit')) + + # add parent arguments and defaults + for parent in parents: + self._add_container_actions(parent) + try: + defaults = parent._defaults + except AttributeError: + pass + else: + self._defaults.update(defaults) + + # ======================= + # Pretty __repr__ methods + # ======================= + def _get_kwargs(self): + names = [ + 'prog', + 'usage', + 'description', + 'formatter_class', + 'conflict_handler', + 'add_help', + ] + return [(name, getattr(self, name)) for name in names] + + # ================================== + # Optional/Positional adding methods + # ================================== + def add_subparsers(self, **kwargs): + if self._subparsers is not None: + self.error(_('cannot have multiple subparser arguments')) + + # add the parser class to the arguments if it's not present + kwargs.setdefault('parser_class', type(self)) + + if 'title' in kwargs or 'description' in kwargs: + title = _(kwargs.pop('title', 'subcommands')) + description = _(kwargs.pop('description', None)) + self._subparsers = self.add_argument_group(title, description) + else: + self._subparsers = self._positionals + + # prog defaults to the usage message of this parser, skipping + # optional arguments and with no "usage:" prefix + if kwargs.get('prog') is None: + formatter = self._get_formatter() + positionals = self._get_positional_actions() + groups = self._mutually_exclusive_groups + formatter.add_usage(self.usage, positionals, groups, '') + kwargs['prog'] = formatter.format_help().strip() + + # create the parsers action and add it to the positionals list + parsers_class = self._pop_action_class(kwargs, 'parsers') + action = parsers_class(option_strings=[], **kwargs) + self._subparsers._add_action(action) + + # return the created parsers action + return action + + def _add_action(self, action): + if action.option_strings: + self._optionals._add_action(action) + else: + self._positionals._add_action(action) + return action + + def _get_optional_actions(self): + return [action + for action in self._actions + if action.option_strings] + + def _get_positional_actions(self): + return [action + for action in self._actions + if not action.option_strings] + + # ===================================== + # Command line argument parsing methods + # ===================================== + def parse_args(self, args=None, namespace=None): + args, argv = self.parse_known_args(args, namespace) + if argv: + msg = _('unrecognized arguments: %s') + self.error(msg % ' '.join(argv)) + return args + + def parse_known_args(self, args=None, namespace=None): + if args is None: + # args default to the system args + args = _sys.argv[1:] + else: + # make sure that args are mutable + args = list(args) + + # default Namespace built from parser defaults + if namespace is None: + namespace = Namespace() + + # add any action defaults that aren't present + for action in self._actions: + if action.dest is not SUPPRESS: + if not hasattr(namespace, action.dest): + if action.default is not SUPPRESS: + setattr(namespace, action.dest, action.default) + + # add any parser defaults that aren't present + for dest in self._defaults: + if not hasattr(namespace, dest): + setattr(namespace, dest, self._defaults[dest]) + + # parse the arguments and exit if there are any errors + if self.exit_on_error: + try: + namespace, args = self._parse_known_args(args, namespace) + except ArgumentError: + err = _sys.exc_info()[1] + self.error(str(err)) + else: + namespace, args = self._parse_known_args(args, namespace) + + if hasattr(namespace, _UNRECOGNIZED_ARGS_ATTR): + args.extend(getattr(namespace, _UNRECOGNIZED_ARGS_ATTR)) + delattr(namespace, _UNRECOGNIZED_ARGS_ATTR) + return namespace, args + + def _parse_known_args(self, arg_strings, namespace): + # replace arg strings that are file references + if self.fromfile_prefix_chars is not None: + arg_strings = self._read_args_from_files(arg_strings) + + # map all mutually exclusive arguments to the other arguments + # they can't occur with + action_conflicts = {} + for mutex_group in self._mutually_exclusive_groups: + group_actions = mutex_group._group_actions + for i, mutex_action in enumerate(mutex_group._group_actions): + conflicts = action_conflicts.setdefault(mutex_action, []) + conflicts.extend(group_actions[:i]) + conflicts.extend(group_actions[i + 1:]) + + # find all option indices, and determine the arg_string_pattern + # which has an 'O' if there is an option at an index, + # an 'A' if there is an argument, or a '-' if there is a '--' + option_string_indices = {} + arg_string_pattern_parts = [] + arg_strings_iter = iter(arg_strings) + for i, arg_string in enumerate(arg_strings_iter): + + # all args after -- are non-options + if arg_string == '--': + arg_string_pattern_parts.append('-') + for arg_string in arg_strings_iter: + arg_string_pattern_parts.append('A') + + # otherwise, add the arg to the arg strings + # and note the index if it was an option + else: + option_tuple = self._parse_optional(arg_string) + if option_tuple is None: + pattern = 'A' + else: + option_string_indices[i] = option_tuple + pattern = 'O' + arg_string_pattern_parts.append(pattern) + + # join the pieces together to form the pattern + arg_strings_pattern = ''.join(arg_string_pattern_parts) + + # converts arg strings to the appropriate and then takes the action + seen_actions = set() + seen_non_default_actions = set() + + def take_action(action, argument_strings, option_string=None): + seen_actions.add(action) + argument_values = self._get_values(action, argument_strings) + + # error if this argument is not allowed with other previously + # seen arguments, assuming that actions that use the default + # value don't really count as "present" + if argument_values is not action.default: + seen_non_default_actions.add(action) + for conflict_action in action_conflicts.get(action, []): + if conflict_action in seen_non_default_actions: + msg = _('not allowed with argument %s') + action_name = _get_action_name(conflict_action) + raise ArgumentError(action, msg % action_name) + + # take the action if we didn't receive a SUPPRESS value + # (e.g. from a default) + if argument_values is not SUPPRESS: + action(self, namespace, argument_values, option_string) + + # function to convert arg_strings into an optional action + def consume_optional(start_index): + + # get the optional identified at this index + option_tuple = option_string_indices[start_index] + action, option_string, explicit_arg = option_tuple + + # identify additional optionals in the same arg string + # (e.g. -xyz is the same as -x -y -z if no args are required) + match_argument = self._match_argument + action_tuples = [] + while True: + + # if we found no optional action, skip it + if action is None: + extras.append(arg_strings[start_index]) + return start_index + 1 + + # if there is an explicit argument, try to match the + # optional's string arguments to only this + if explicit_arg is not None: + arg_count = match_argument(action, 'A') + + # if the action is a single-dash option and takes no + # arguments, try to parse more single-dash options out + # of the tail of the option string + chars = self.prefix_chars + if ( + arg_count == 0 + and option_string[1] not in chars + and explicit_arg != '' + ): + action_tuples.append((action, [], option_string)) + char = option_string[0] + option_string = char + explicit_arg[0] + new_explicit_arg = explicit_arg[1:] or None + optionals_map = self._option_string_actions + if option_string in optionals_map: + action = optionals_map[option_string] + explicit_arg = new_explicit_arg + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if the action expect exactly one argument, we've + # successfully matched the option; exit the loop + elif arg_count == 1: + stop = start_index + 1 + args = [explicit_arg] + action_tuples.append((action, args, option_string)) + break + + # error if a double-dash option did not use the + # explicit argument + else: + msg = _('ignored explicit argument %r') + raise ArgumentError(action, msg % explicit_arg) + + # if there is no explicit argument, try to match the + # optional's string arguments with the following strings + # if successful, exit the loop + else: + start = start_index + 1 + selected_patterns = arg_strings_pattern[start:] + arg_count = match_argument(action, selected_patterns) + stop = start + arg_count + args = arg_strings[start:stop] + action_tuples.append((action, args, option_string)) + break + + # add the Optional to the list and return the index at which + # the Optional's string args stopped + assert action_tuples + for action, args, option_string in action_tuples: + take_action(action, args, option_string) + return stop + + # the list of Positionals left to be parsed; this is modified + # by consume_positionals() + positionals = self._get_positional_actions() + + # function to convert arg_strings into positional actions + def consume_positionals(start_index): + # match as many Positionals as possible + match_partial = self._match_arguments_partial + selected_pattern = arg_strings_pattern[start_index:] + arg_counts = match_partial(positionals, selected_pattern) + + # slice off the appropriate arg strings for each Positional + # and add the Positional and its args to the list + for action, arg_count in zip(positionals, arg_counts): + args = arg_strings[start_index: start_index + arg_count] + start_index += arg_count + take_action(action, args) + + # slice off the Positionals that we just parsed and return the + # index at which the Positionals' string args stopped + positionals[:] = positionals[len(arg_counts):] + return start_index + + # consume Positionals and Optionals alternately, until we have + # passed the last option string + extras = [] + start_index = 0 + if option_string_indices: + max_option_string_index = max(option_string_indices) + else: + max_option_string_index = -1 + while start_index <= max_option_string_index: + + # consume any Positionals preceding the next option + next_option_string_index = min([ + index + for index in option_string_indices + if index >= start_index]) + if start_index != next_option_string_index: + positionals_end_index = consume_positionals(start_index) + + # only try to parse the next optional if we didn't consume + # the option string during the positionals parsing + if positionals_end_index > start_index: + start_index = positionals_end_index + continue + else: + start_index = positionals_end_index + + # if we consumed all the positionals we could and we're not + # at the index of an option string, there were extra arguments + if start_index not in option_string_indices: + strings = arg_strings[start_index:next_option_string_index] + extras.extend(strings) + start_index = next_option_string_index + + # consume the next optional and any arguments for it + start_index = consume_optional(start_index) + + # consume any positionals following the last Optional + stop_index = consume_positionals(start_index) + + # if we didn't consume all the argument strings, there were extras + extras.extend(arg_strings[stop_index:]) + + # make sure all required actions were present and also convert + # action defaults which were not given as arguments + required_actions = [] + for action in self._actions: + if action not in seen_actions: + if action.required: + required_actions.append(_get_action_name(action)) + else: + # Convert action default now instead of doing it before + # parsing arguments to avoid calling convert functions + # twice (which may fail) if the argument was given, but + # only if it was defined already in the namespace + if (action.default is not None and + isinstance(action.default, str) and + hasattr(namespace, action.dest) and + action.default is getattr(namespace, action.dest)): + setattr(namespace, action.dest, + self._get_value(action, action.default)) + + if required_actions: + self.error(_('the following arguments are required: %s') % + ', '.join(required_actions)) + + # make sure all required groups had one option present + for group in self._mutually_exclusive_groups: + if group.required: + for action in group._group_actions: + if action in seen_non_default_actions: + break + + # if no actions were used, report the error + else: + names = [_get_action_name(action) + for action in group._group_actions + if action.help is not SUPPRESS] + msg = _('one of the arguments %s is required') + self.error(msg % ' '.join(names)) + + # return the updated namespace and the extra arguments + return namespace, extras + + def _read_args_from_files(self, arg_strings): + # expand arguments referencing files + new_arg_strings = [] + for arg_string in arg_strings: + + # for regular arguments, just add them back into the list + if not arg_string or arg_string[0] not in self.fromfile_prefix_chars: + new_arg_strings.append(arg_string) + + # replace arguments referencing files with the file content + else: + try: + with open(arg_string[1:]) as args_file: + arg_strings = [] + for arg_line in args_file.read().splitlines(): + for arg in self.convert_arg_line_to_args(arg_line): + arg_strings.append(arg) + arg_strings = self._read_args_from_files(arg_strings) + new_arg_strings.extend(arg_strings) + except OSError: + err = _sys.exc_info()[1] + self.error(str(err)) + + # return the modified argument list + return new_arg_strings + + def convert_arg_line_to_args(self, arg_line): + return [arg_line] + + def _match_argument(self, action, arg_strings_pattern): + # match the pattern for this action to the arg strings + nargs_pattern = self._get_nargs_pattern(action) + match = _re.match(nargs_pattern, arg_strings_pattern) + + # raise an exception if we weren't able to find a match + if match is None: + nargs_errors = { + None: _('expected one argument'), + OPTIONAL: _('expected at most one argument'), + ONE_OR_MORE: _('expected at least one argument'), + } + msg = nargs_errors.get(action.nargs) + if msg is None: + msg = ngettext('expected %s argument', + 'expected %s arguments', + action.nargs) % action.nargs + raise ArgumentError(action, msg) + + # return the number of arguments matched + return len(match.group(1)) + + def _match_arguments_partial(self, actions, arg_strings_pattern): + # progressively shorten the actions list by slicing off the + # final actions until we find a match + result = [] + for i in range(len(actions), 0, -1): + actions_slice = actions[:i] + pattern = ''.join([self._get_nargs_pattern(action) + for action in actions_slice]) + match = _re.match(pattern, arg_strings_pattern) + if match is not None: + result.extend([len(string) for string in match.groups()]) + break + + # return the list of arg string counts + return result + + def _parse_optional(self, arg_string): + # if it's an empty string, it was meant to be a positional + if not arg_string: + return None + + # if it doesn't start with a prefix, it was meant to be positional + if not arg_string[0] in self.prefix_chars: + return None + + # if the option string is present in the parser, return the action + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + + # if it's just a single character, it was meant to be positional + if len(arg_string) == 1: + return None + + # if the option string before the "=" is present, return the action + if '=' in arg_string: + option_string, explicit_arg = arg_string.split('=', 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + + # search through all possible prefixes of the option string + # and all actions in the parser for possible interpretations + option_tuples = self._get_option_tuples(arg_string) + + # if multiple actions match, the option string was ambiguous + if len(option_tuples) > 1: + options = ', '.join([option_string + for action, option_string, explicit_arg in option_tuples]) + args = {'option': arg_string, 'matches': options} + msg = _('ambiguous option: %(option)s could match %(matches)s') + self.error(msg % args) + + # if exactly one action matched, this segmentation is good, + # so return the parsed action + elif len(option_tuples) == 1: + option_tuple, = option_tuples + return option_tuple + + # if it was not found as an option, but it looks like a negative + # number, it was meant to be positional + # unless there are negative-number-like options + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + + # if it contains a space, it was meant to be a positional + if ' ' in arg_string: + return None + + # it was meant to be an optional but there is no such option + # in this parser (though it might be a valid option in a subparser) + return None, arg_string, None + + def _get_option_tuples(self, option_string): + result = [] + + # option strings starting with two prefix characters are only + # split at the '=' + chars = self.prefix_chars + if option_string[0] in chars and option_string[1] in chars: + if self.allow_abbrev: + if '=' in option_string: + option_prefix, explicit_arg = option_string.split('=', 1) + else: + option_prefix = option_string + explicit_arg = None + for option_string in self._option_string_actions: + if option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # single character options can be concatenated with their arguments + # but multiple character options always have to have their argument + # separate + elif option_string[0] in chars and option_string[1] not in chars: + option_prefix = option_string + explicit_arg = None + short_option_prefix = option_string[:2] + short_explicit_arg = option_string[2:] + + for option_string in self._option_string_actions: + if option_string == short_option_prefix: + action = self._option_string_actions[option_string] + tup = action, option_string, short_explicit_arg + result.append(tup) + elif option_string.startswith(option_prefix): + action = self._option_string_actions[option_string] + tup = action, option_string, explicit_arg + result.append(tup) + + # shouldn't ever get here + else: + self.error(_('unexpected option string: %s') % option_string) + + # return the collected option tuples + return result + + def _get_nargs_pattern(self, action): + # in all examples below, we have to allow for '--' args + # which are represented as '-' in the pattern + nargs = action.nargs + + # the default (None) is assumed to be a single argument + if nargs is None: + nargs_pattern = '(-*A-*)' + + # allow zero or one arguments + elif nargs == OPTIONAL: + nargs_pattern = '(-*A?-*)' + + # allow zero or more arguments + elif nargs == ZERO_OR_MORE: + nargs_pattern = '(-*[A-]*)' + + # allow one or more arguments + elif nargs == ONE_OR_MORE: + nargs_pattern = '(-*A[A-]*)' + + # allow any number of options or arguments + elif nargs == REMAINDER: + nargs_pattern = '([-AO]*)' + + # allow one argument followed by any number of options or arguments + elif nargs == PARSER: + nargs_pattern = '(-*A[-AO]*)' + + # suppress action, like nargs=0 + elif nargs == SUPPRESS: + nargs_pattern = '(-*-*)' + + # all others should be integers + else: + nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs) + + # if this is an optional action, -- is not allowed + if action.option_strings: + nargs_pattern = nargs_pattern.replace('-*', '') + nargs_pattern = nargs_pattern.replace('-', '') + + # return the pattern + return nargs_pattern + + # ======================== + # Alt command line argument parsing, allowing free intermix + # ======================== + + def parse_intermixed_args(self, args=None, namespace=None): + args, argv = self.parse_known_intermixed_args(args, namespace) + if argv: + msg = _('unrecognized arguments: %s') + self.error(msg % ' '.join(argv)) + return args + + def parse_known_intermixed_args(self, args=None, namespace=None): + # returns a namespace and list of extras + # + # positional can be freely intermixed with optionals. optionals are + # first parsed with all positional arguments deactivated. The 'extras' + # are then parsed. If the parser definition is incompatible with the + # intermixed assumptions (e.g. use of REMAINDER, subparsers) a + # TypeError is raised. + # + # positionals are 'deactivated' by setting nargs and default to + # SUPPRESS. This blocks the addition of that positional to the + # namespace + + positionals = self._get_positional_actions() + a = [action for action in positionals + if action.nargs in [PARSER, REMAINDER]] + if a: + raise TypeError('parse_intermixed_args: positional arg' + ' with nargs=%s'%a[0].nargs) + + if [action.dest for group in self._mutually_exclusive_groups + for action in group._group_actions if action in positionals]: + raise TypeError('parse_intermixed_args: positional in' + ' mutuallyExclusiveGroup') + + try: + save_usage = self.usage + try: + if self.usage is None: + # capture the full usage for use in error messages + self.usage = self.format_usage()[7:] + for action in positionals: + # deactivate positionals + action.save_nargs = action.nargs + # action.nargs = 0 + action.nargs = SUPPRESS + action.save_default = action.default + action.default = SUPPRESS + namespace, remaining_args = self.parse_known_args(args, + namespace) + for action in positionals: + # remove the empty positional values from namespace + if (hasattr(namespace, action.dest) + and getattr(namespace, action.dest)==[]): + from warnings import warn + warn('Do not expect %s in %s' % (action.dest, namespace)) + delattr(namespace, action.dest) + finally: + # restore nargs and usage before exiting + for action in positionals: + action.nargs = action.save_nargs + action.default = action.save_default + optionals = self._get_optional_actions() + try: + # parse positionals. optionals aren't normally required, but + # they could be, so make sure they aren't. + for action in optionals: + action.save_required = action.required + action.required = False + for group in self._mutually_exclusive_groups: + group.save_required = group.required + group.required = False + namespace, extras = self.parse_known_args(remaining_args, + namespace) + finally: + # restore parser values before exiting + for action in optionals: + action.required = action.save_required + for group in self._mutually_exclusive_groups: + group.required = group.save_required + finally: + self.usage = save_usage + return namespace, extras + + # ======================== + # Value conversion methods + # ======================== + def _get_values(self, action, arg_strings): + # for everything but PARSER, REMAINDER args, strip out first '--' + if action.nargs not in [PARSER, REMAINDER]: + try: + arg_strings.remove('--') + except ValueError: + pass + + # optional argument produces a default when not present + if not arg_strings and action.nargs == OPTIONAL: + if action.option_strings: + value = action.const + else: + value = action.default + if isinstance(value, str): + value = self._get_value(action, value) + self._check_value(action, value) + + # when nargs='*' on a positional, if there were no command-line + # args, use the default if it is anything other than None + elif (not arg_strings and action.nargs == ZERO_OR_MORE and + not action.option_strings): + if action.default is not None: + value = action.default + else: + value = arg_strings + self._check_value(action, value) + + # single argument or optional argument produces a single value + elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]: + arg_string, = arg_strings + value = self._get_value(action, arg_string) + self._check_value(action, value) + + # REMAINDER arguments convert all values, checking none + elif action.nargs == REMAINDER: + value = [self._get_value(action, v) for v in arg_strings] + + # PARSER arguments convert all values, but check only the first + elif action.nargs == PARSER: + value = [self._get_value(action, v) for v in arg_strings] + self._check_value(action, value[0]) + + # SUPPRESS argument does not put anything in the namespace + elif action.nargs == SUPPRESS: + value = SUPPRESS + + # all other types of nargs produce a list + else: + value = [self._get_value(action, v) for v in arg_strings] + for v in value: + self._check_value(action, v) + + # return the converted value + return value + + def _get_value(self, action, arg_string): + type_func = self._registry_get('type', action.type, action.type) + if not callable(type_func): + msg = _('%r is not callable') + raise ArgumentError(action, msg % type_func) + + # convert the value to the appropriate type + try: + result = type_func(arg_string) + + # ArgumentTypeErrors indicate errors + except ArgumentTypeError: + name = getattr(action.type, '__name__', repr(action.type)) + msg = str(_sys.exc_info()[1]) + raise ArgumentError(action, msg) + + # TypeErrors or ValueErrors also indicate errors + except (TypeError, ValueError): + name = getattr(action.type, '__name__', repr(action.type)) + args = {'type': name, 'value': arg_string} + msg = _('invalid %(type)s value: %(value)r') + raise ArgumentError(action, msg % args) + + # return the converted value + return result + + def _check_value(self, action, value): + # converted value must be one of the choices (if specified) + if action.choices is not None and value not in action.choices: + args = {'value': value, + 'choices': ', '.join(map(repr, action.choices))} + msg = _('invalid choice: %(value)r (choose from %(choices)s)') + raise ArgumentError(action, msg % args) + + # ======================= + # Help-formatting methods + # ======================= + def format_usage(self): + formatter = self._get_formatter() + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + return formatter.format_help() + + def format_help(self): + formatter = self._get_formatter() + + # usage + formatter.add_usage(self.usage, self._actions, + self._mutually_exclusive_groups) + + # description + formatter.add_text(self.description) + + # positionals, optionals and user-defined groups + for action_group in self._action_groups: + formatter.start_section(action_group.title) + formatter.add_text(action_group.description) + formatter.add_arguments(action_group._group_actions) + formatter.end_section() + + # epilog + formatter.add_text(self.epilog) + + # determine help from format above + return formatter.format_help() + + def _get_formatter(self): + return self.formatter_class(prog=self.prog) + + # ===================== + # Help-printing methods + # ===================== + def print_usage(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_usage(), file) + + def print_help(self, file=None): + if file is None: + file = _sys.stdout + self._print_message(self.format_help(), file) + + def _print_message(self, message, file=None): + if message: + if file is None: + file = _sys.stderr + file.write(message) + + # =============== + # Exiting methods + # =============== + def exit(self, status=0, message=None): + if message: + self._print_message(message, _sys.stderr) + _sys.exit(status) + + def error(self, message): + """error(message: string) + + Prints a usage message incorporating the message to stderr and + exits. + + If you override this in a subclass, it should not return -- it + should either exit or raise an exception. + """ + self.print_usage(_sys.stderr) + args = {'prog': self.prog, 'message': message} + self.exit(2, _('%(prog)s: error: %(message)s\n') % args) diff --git a/python310/ast.py b/python310/ast.py new file mode 100644 index 0000000000000000000000000000000000000000..4f5f9827146dfd8731196121970b21724102c372 --- /dev/null +++ b/python310/ast.py @@ -0,0 +1,1709 @@ +""" + ast + ~~~ + + The `ast` module helps Python applications to process trees of the Python + abstract syntax grammar. The abstract syntax itself might change with + each Python release; this module helps to find out programmatically what + the current grammar looks like and allows modifications of it. + + An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as + a flag to the `compile()` builtin function or by using the `parse()` + function from this module. The result will be a tree of objects whose + classes all inherit from `ast.AST`. + + A modified abstract syntax tree can be compiled into a Python code object + using the built-in `compile()` function. + + Additionally various helper functions are provided that make working with + the trees simpler. The main intention of the helper functions and this + module in general is to provide an easy to use interface for libraries + that work tightly with the python syntax (template engines for example). + + + :copyright: Copyright 2008 by Armin Ronacher. + :license: Python License. +""" +import sys +from _ast import * +from contextlib import contextmanager, nullcontext +from enum import IntEnum, auto + + +def parse(source, filename='', mode='exec', *, + type_comments=False, feature_version=None): + """ + Parse the source into an AST node. + Equivalent to compile(source, filename, mode, PyCF_ONLY_AST). + Pass type_comments=True to get back type comments where the syntax allows. + """ + flags = PyCF_ONLY_AST + if type_comments: + flags |= PyCF_TYPE_COMMENTS + if isinstance(feature_version, tuple): + major, minor = feature_version # Should be a 2-tuple. + assert major == 3 + feature_version = minor + elif feature_version is None: + feature_version = -1 + # Else it should be an int giving the minor version for 3.x. + return compile(source, filename, mode, flags, + _feature_version=feature_version) + + +def literal_eval(node_or_string): + """ + Evaluate an expression node or a string containing only a Python + expression. The string or node provided may only consist of the following + Python literal structures: strings, bytes, numbers, tuples, lists, dicts, + sets, booleans, and None. + + Caution: A complex expression can overflow the C stack and cause a crash. + """ + if isinstance(node_or_string, str): + node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval') + if isinstance(node_or_string, Expression): + node_or_string = node_or_string.body + def _raise_malformed_node(node): + msg = "malformed node or string" + if lno := getattr(node, 'lineno', None): + msg += f' on line {lno}' + raise ValueError(msg + f': {node!r}') + def _convert_num(node): + if not isinstance(node, Constant) or type(node.value) not in (int, float, complex): + _raise_malformed_node(node) + return node.value + def _convert_signed_num(node): + if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)): + operand = _convert_num(node.operand) + if isinstance(node.op, UAdd): + return + operand + else: + return - operand + return _convert_num(node) + def _convert(node): + if isinstance(node, Constant): + return node.value + elif isinstance(node, Tuple): + return tuple(map(_convert, node.elts)) + elif isinstance(node, List): + return list(map(_convert, node.elts)) + elif isinstance(node, Set): + return set(map(_convert, node.elts)) + elif (isinstance(node, Call) and isinstance(node.func, Name) and + node.func.id == 'set' and node.args == node.keywords == []): + return set() + elif isinstance(node, Dict): + if len(node.keys) != len(node.values): + _raise_malformed_node(node) + return dict(zip(map(_convert, node.keys), + map(_convert, node.values))) + elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)): + left = _convert_signed_num(node.left) + right = _convert_num(node.right) + if isinstance(left, (int, float)) and isinstance(right, complex): + if isinstance(node.op, Add): + return left + right + else: + return left - right + return _convert_signed_num(node) + return _convert(node_or_string) + + +def dump(node, annotate_fields=True, include_attributes=False, *, indent=None): + """ + Return a formatted dump of the tree in node. This is mainly useful for + debugging purposes. If annotate_fields is true (by default), + the returned string will show the names and the values for fields. + If annotate_fields is false, the result string will be more compact by + omitting unambiguous field names. Attributes such as line + numbers and column offsets are not dumped by default. If this is wanted, + include_attributes can be set to true. If indent is a non-negative + integer or string, then the tree will be pretty-printed with that indent + level. None (the default) selects the single line representation. + """ + def _format(node, level=0): + if indent is not None: + level += 1 + prefix = '\n' + indent * level + sep = ',\n' + indent * level + else: + prefix = '' + sep = ', ' + if isinstance(node, AST): + cls = type(node) + args = [] + allsimple = True + keywords = annotate_fields + for name in node._fields: + try: + value = getattr(node, name) + except AttributeError: + keywords = True + continue + if value is None and getattr(cls, name, ...) is None: + keywords = True + continue + value, simple = _format(value, level) + allsimple = allsimple and simple + if keywords: + args.append('%s=%s' % (name, value)) + else: + args.append(value) + if include_attributes and node._attributes: + for name in node._attributes: + try: + value = getattr(node, name) + except AttributeError: + continue + if value is None and getattr(cls, name, ...) is None: + continue + value, simple = _format(value, level) + allsimple = allsimple and simple + args.append('%s=%s' % (name, value)) + if allsimple and len(args) <= 3: + return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args + return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False + elif isinstance(node, list): + if not node: + return '[]', True + return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False + return repr(node), True + + if not isinstance(node, AST): + raise TypeError('expected AST, got %r' % node.__class__.__name__) + if indent is not None and not isinstance(indent, str): + indent = ' ' * indent + return _format(node)[0] + + +def copy_location(new_node, old_node): + """ + Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset` + attributes) from *old_node* to *new_node* if possible, and return *new_node*. + """ + for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset': + if attr in old_node._attributes and attr in new_node._attributes: + value = getattr(old_node, attr, None) + # end_lineno and end_col_offset are optional attributes, and they + # should be copied whether the value is None or not. + if value is not None or ( + hasattr(old_node, attr) and attr.startswith("end_") + ): + setattr(new_node, attr, value) + return new_node + + +def fix_missing_locations(node): + """ + When you compile a node tree with compile(), the compiler expects lineno and + col_offset attributes for every node that supports them. This is rather + tedious to fill in for generated nodes, so this helper adds these attributes + recursively where not already set, by setting them to the values of the + parent node. It works recursively starting at *node*. + """ + def _fix(node, lineno, col_offset, end_lineno, end_col_offset): + if 'lineno' in node._attributes: + if not hasattr(node, 'lineno'): + node.lineno = lineno + else: + lineno = node.lineno + if 'end_lineno' in node._attributes: + if getattr(node, 'end_lineno', None) is None: + node.end_lineno = end_lineno + else: + end_lineno = node.end_lineno + if 'col_offset' in node._attributes: + if not hasattr(node, 'col_offset'): + node.col_offset = col_offset + else: + col_offset = node.col_offset + if 'end_col_offset' in node._attributes: + if getattr(node, 'end_col_offset', None) is None: + node.end_col_offset = end_col_offset + else: + end_col_offset = node.end_col_offset + for child in iter_child_nodes(node): + _fix(child, lineno, col_offset, end_lineno, end_col_offset) + _fix(node, 1, 0, 1, 0) + return node + + +def increment_lineno(node, n=1): + """ + Increment the line number and end line number of each node in the tree + starting at *node* by *n*. This is useful to "move code" to a different + location in a file. + """ + for child in walk(node): + # TypeIgnore is a special case where lineno is not an attribute + # but rather a field of the node itself. + if isinstance(child, TypeIgnore): + child.lineno = getattr(child, 'lineno', 0) + n + continue + + if 'lineno' in child._attributes: + child.lineno = getattr(child, 'lineno', 0) + n + if ( + "end_lineno" in child._attributes + and (end_lineno := getattr(child, "end_lineno", 0)) is not None + ): + child.end_lineno = end_lineno + n + return node + + +def iter_fields(node): + """ + Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields`` + that is present on *node*. + """ + for field in node._fields: + try: + yield field, getattr(node, field) + except AttributeError: + pass + + +def iter_child_nodes(node): + """ + Yield all direct child nodes of *node*, that is, all fields that are nodes + and all items of fields that are lists of nodes. + """ + for name, field in iter_fields(node): + if isinstance(field, AST): + yield field + elif isinstance(field, list): + for item in field: + if isinstance(item, AST): + yield item + + +def get_docstring(node, clean=True): + """ + Return the docstring for the given node or None if no docstring can + be found. If the node provided does not have docstrings a TypeError + will be raised. + + If *clean* is `True`, all tabs are expanded to spaces and any whitespace + that can be uniformly removed from the second line onwards is removed. + """ + if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)): + raise TypeError("%r can't have docstrings" % node.__class__.__name__) + if not(node.body and isinstance(node.body[0], Expr)): + return None + node = node.body[0].value + if isinstance(node, Str): + text = node.s + elif isinstance(node, Constant) and isinstance(node.value, str): + text = node.value + else: + return None + if clean: + import inspect + text = inspect.cleandoc(text) + return text + + +def _splitlines_no_ff(source): + """Split a string into lines ignoring form feed and other chars. + + This mimics how the Python parser splits source code. + """ + idx = 0 + lines = [] + next_line = '' + while idx < len(source): + c = source[idx] + next_line += c + idx += 1 + # Keep \r\n together + if c == '\r' and idx < len(source) and source[idx] == '\n': + next_line += '\n' + idx += 1 + if c in '\r\n': + lines.append(next_line) + next_line = '' + + if next_line: + lines.append(next_line) + return lines + + +def _pad_whitespace(source): + r"""Replace all chars except '\f\t' in a line with spaces.""" + result = '' + for c in source: + if c in '\f\t': + result += c + else: + result += ' ' + return result + + +def get_source_segment(source, node, *, padded=False): + """Get source code segment of the *source* that generated *node*. + + If some location information (`lineno`, `end_lineno`, `col_offset`, + or `end_col_offset`) is missing, return None. + + If *padded* is `True`, the first line of a multi-line statement will + be padded with spaces to match its original position. + """ + try: + if node.end_lineno is None or node.end_col_offset is None: + return None + lineno = node.lineno - 1 + end_lineno = node.end_lineno - 1 + col_offset = node.col_offset + end_col_offset = node.end_col_offset + except AttributeError: + return None + + lines = _splitlines_no_ff(source) + if end_lineno == lineno: + return lines[lineno].encode()[col_offset:end_col_offset].decode() + + if padded: + padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode()) + else: + padding = '' + + first = padding + lines[lineno].encode()[col_offset:].decode() + last = lines[end_lineno].encode()[:end_col_offset].decode() + lines = lines[lineno+1:end_lineno] + + lines.insert(0, first) + lines.append(last) + return ''.join(lines) + + +def walk(node): + """ + Recursively yield all descendant nodes in the tree starting at *node* + (including *node* itself), in no specified order. This is useful if you + only want to modify nodes in place and don't care about the context. + """ + from collections import deque + todo = deque([node]) + while todo: + node = todo.popleft() + todo.extend(iter_child_nodes(node)) + yield node + + +class NodeVisitor(object): + """ + A node visitor base class that walks the abstract syntax tree and calls a + visitor function for every node found. This function may return a value + which is forwarded by the `visit` method. + + This class is meant to be subclassed, with the subclass adding visitor + methods. + + Per default the visitor functions for the nodes are ``'visit_'`` + + class name of the node. So a `TryFinally` node visit function would + be `visit_TryFinally`. This behavior can be changed by overriding + the `visit` method. If no visitor function exists for a node + (return value `None`) the `generic_visit` visitor is used instead. + + Don't use the `NodeVisitor` if you want to apply changes to nodes during + traversing. For this a special visitor exists (`NodeTransformer`) that + allows modifications. + """ + + def visit(self, node): + """Visit a node.""" + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + return visitor(node) + + def generic_visit(self, node): + """Called if no explicit visitor function exists for a node.""" + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, AST): + self.visit(item) + elif isinstance(value, AST): + self.visit(value) + + def visit_Constant(self, node): + value = node.value + type_name = _const_node_type_names.get(type(value)) + if type_name is None: + for cls, name in _const_node_type_names.items(): + if isinstance(value, cls): + type_name = name + break + if type_name is not None: + method = 'visit_' + type_name + try: + visitor = getattr(self, method) + except AttributeError: + pass + else: + import warnings + warnings.warn(f"{method} is deprecated; add visit_Constant", + DeprecationWarning, 2) + return visitor(node) + return self.generic_visit(node) + + +class NodeTransformer(NodeVisitor): + """ + A :class:`NodeVisitor` subclass that walks the abstract syntax tree and + allows modification of nodes. + + The `NodeTransformer` will walk the AST and use the return value of the + visitor methods to replace or remove the old node. If the return value of + the visitor method is ``None``, the node will be removed from its location, + otherwise it is replaced with the return value. The return value may be the + original node in which case no replacement takes place. + + Here is an example transformer that rewrites all occurrences of name lookups + (``foo``) to ``data['foo']``:: + + class RewriteName(NodeTransformer): + + def visit_Name(self, node): + return Subscript( + value=Name(id='data', ctx=Load()), + slice=Constant(value=node.id), + ctx=node.ctx + ) + + Keep in mind that if the node you're operating on has child nodes you must + either transform the child nodes yourself or call the :meth:`generic_visit` + method for the node first. + + For nodes that were part of a collection of statements (that applies to all + statement nodes), the visitor may also return a list of nodes rather than + just a single node. + + Usually you use the transformer like this:: + + node = YourTransformer().visit(node) + """ + + def generic_visit(self, node): + for field, old_value in iter_fields(node): + if isinstance(old_value, list): + new_values = [] + for value in old_value: + if isinstance(value, AST): + value = self.visit(value) + if value is None: + continue + elif not isinstance(value, AST): + new_values.extend(value) + continue + new_values.append(value) + old_value[:] = new_values + elif isinstance(old_value, AST): + new_node = self.visit(old_value) + if new_node is None: + delattr(node, field) + else: + setattr(node, field, new_node) + return node + + +# If the ast module is loaded more than once, only add deprecated methods once +if not hasattr(Constant, 'n'): + # The following code is for backward compatibility. + # It will be removed in future. + + def _getter(self): + """Deprecated. Use value instead.""" + return self.value + + def _setter(self, value): + self.value = value + + Constant.n = property(_getter, _setter) + Constant.s = property(_getter, _setter) + +class _ABC(type): + + def __init__(cls, *args): + cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead""" + + def __instancecheck__(cls, inst): + if not isinstance(inst, Constant): + return False + if cls in _const_types: + try: + value = inst.value + except AttributeError: + return False + else: + return ( + isinstance(value, _const_types[cls]) and + not isinstance(value, _const_types_not.get(cls, ())) + ) + return type.__instancecheck__(cls, inst) + +def _new(cls, *args, **kwargs): + for key in kwargs: + if key not in cls._fields: + # arbitrary keyword arguments are accepted + continue + pos = cls._fields.index(key) + if pos < len(args): + raise TypeError(f"{cls.__name__} got multiple values for argument {key!r}") + if cls in _const_types: + return Constant(*args, **kwargs) + return Constant.__new__(cls, *args, **kwargs) + +class Num(Constant, metaclass=_ABC): + _fields = ('n',) + __new__ = _new + +class Str(Constant, metaclass=_ABC): + _fields = ('s',) + __new__ = _new + +class Bytes(Constant, metaclass=_ABC): + _fields = ('s',) + __new__ = _new + +class NameConstant(Constant, metaclass=_ABC): + __new__ = _new + +class Ellipsis(Constant, metaclass=_ABC): + _fields = () + + def __new__(cls, *args, **kwargs): + if cls is Ellipsis: + return Constant(..., *args, **kwargs) + return Constant.__new__(cls, *args, **kwargs) + +_const_types = { + Num: (int, float, complex), + Str: (str,), + Bytes: (bytes,), + NameConstant: (type(None), bool), + Ellipsis: (type(...),), +} +_const_types_not = { + Num: (bool,), +} + +_const_node_type_names = { + bool: 'NameConstant', # should be before int + type(None): 'NameConstant', + int: 'Num', + float: 'Num', + complex: 'Num', + str: 'Str', + bytes: 'Bytes', + type(...): 'Ellipsis', +} + +class slice(AST): + """Deprecated AST node class.""" + +class Index(slice): + """Deprecated AST node class. Use the index value directly instead.""" + def __new__(cls, value, **kwargs): + return value + +class ExtSlice(slice): + """Deprecated AST node class. Use ast.Tuple instead.""" + def __new__(cls, dims=(), **kwargs): + return Tuple(list(dims), Load(), **kwargs) + +# If the ast module is loaded more than once, only add deprecated methods once +if not hasattr(Tuple, 'dims'): + # The following code is for backward compatibility. + # It will be removed in future. + + def _dims_getter(self): + """Deprecated. Use elts instead.""" + return self.elts + + def _dims_setter(self, value): + self.elts = value + + Tuple.dims = property(_dims_getter, _dims_setter) + +class Suite(mod): + """Deprecated AST node class. Unused in Python 3.""" + +class AugLoad(expr_context): + """Deprecated AST node class. Unused in Python 3.""" + +class AugStore(expr_context): + """Deprecated AST node class. Unused in Python 3.""" + +class Param(expr_context): + """Deprecated AST node class. Unused in Python 3.""" + + +# Large float and imaginary literals get turned into infinities in the AST. +# We unparse those infinities to INFSTR. +_INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1) + +class _Precedence(IntEnum): + """Precedence table that originated from python grammar.""" + + TUPLE = auto() + YIELD = auto() # 'yield', 'yield from' + TEST = auto() # 'if'-'else', 'lambda' + OR = auto() # 'or' + AND = auto() # 'and' + NOT = auto() # 'not' + CMP = auto() # '<', '>', '==', '>=', '<=', '!=', + # 'in', 'not in', 'is', 'is not' + EXPR = auto() + BOR = EXPR # '|' + BXOR = auto() # '^' + BAND = auto() # '&' + SHIFT = auto() # '<<', '>>' + ARITH = auto() # '+', '-' + TERM = auto() # '*', '@', '/', '%', '//' + FACTOR = auto() # unary '+', '-', '~' + POWER = auto() # '**' + AWAIT = auto() # 'await' + ATOM = auto() + + def next(self): + try: + return self.__class__(self + 1) + except ValueError: + return self + + +_SINGLE_QUOTES = ("'", '"') +_MULTI_QUOTES = ('"""', "'''") +_ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES) + +class _Unparser(NodeVisitor): + """Methods in this class recursively traverse an AST and + output source code for the abstract syntax; original formatting + is disregarded.""" + + def __init__(self, *, _avoid_backslashes=False): + self._source = [] + self._buffer = [] + self._precedences = {} + self._type_ignores = {} + self._indent = 0 + self._avoid_backslashes = _avoid_backslashes + + def interleave(self, inter, f, seq): + """Call f on each item in seq, calling inter() in between.""" + seq = iter(seq) + try: + f(next(seq)) + except StopIteration: + pass + else: + for x in seq: + inter() + f(x) + + def items_view(self, traverser, items): + """Traverse and separate the given *items* with a comma and append it to + the buffer. If *items* is a single item sequence, a trailing comma + will be added.""" + if len(items) == 1: + traverser(items[0]) + self.write(",") + else: + self.interleave(lambda: self.write(", "), traverser, items) + + def maybe_newline(self): + """Adds a newline if it isn't the start of generated source""" + if self._source: + self.write("\n") + + def fill(self, text=""): + """Indent a piece of text and append it, according to the current + indentation level""" + self.maybe_newline() + self.write(" " * self._indent + text) + + def write(self, text): + """Append a piece of text""" + self._source.append(text) + + def buffer_writer(self, text): + self._buffer.append(text) + + @property + def buffer(self): + value = "".join(self._buffer) + self._buffer.clear() + return value + + @contextmanager + def block(self, *, extra = None): + """A context manager for preparing the source for blocks. It adds + the character':', increases the indentation on enter and decreases + the indentation on exit. If *extra* is given, it will be directly + appended after the colon character. + """ + self.write(":") + if extra: + self.write(extra) + self._indent += 1 + yield + self._indent -= 1 + + @contextmanager + def delimit(self, start, end): + """A context manager for preparing the source for expressions. It adds + *start* to the buffer and enters, after exit it adds *end*.""" + + self.write(start) + yield + self.write(end) + + def delimit_if(self, start, end, condition): + if condition: + return self.delimit(start, end) + else: + return nullcontext() + + def require_parens(self, precedence, node): + """Shortcut to adding precedence related parens""" + return self.delimit_if("(", ")", self.get_precedence(node) > precedence) + + def get_precedence(self, node): + return self._precedences.get(node, _Precedence.TEST) + + def set_precedence(self, precedence, *nodes): + for node in nodes: + self._precedences[node] = precedence + + def get_raw_docstring(self, node): + """If a docstring node is found in the body of the *node* parameter, + return that docstring node, None otherwise. + + Logic mirrored from ``_PyAST_GetDocString``.""" + if not isinstance( + node, (AsyncFunctionDef, FunctionDef, ClassDef, Module) + ) or len(node.body) < 1: + return None + node = node.body[0] + if not isinstance(node, Expr): + return None + node = node.value + if isinstance(node, Constant) and isinstance(node.value, str): + return node + + def get_type_comment(self, node): + comment = self._type_ignores.get(node.lineno) or node.type_comment + if comment is not None: + return f" # type: {comment}" + + def traverse(self, node): + if isinstance(node, list): + for item in node: + self.traverse(item) + else: + super().visit(node) + + # Note: as visit() resets the output text, do NOT rely on + # NodeVisitor.generic_visit to handle any nodes (as it calls back in to + # the subclass visit() method, which resets self._source to an empty list) + def visit(self, node): + """Outputs a source code string that, if converted back to an ast + (using ast.parse) will generate an AST equivalent to *node*""" + self._source = [] + self.traverse(node) + return "".join(self._source) + + def _write_docstring_and_traverse_body(self, node): + if (docstring := self.get_raw_docstring(node)): + self._write_docstring(docstring) + self.traverse(node.body[1:]) + else: + self.traverse(node.body) + + def visit_Module(self, node): + self._type_ignores = { + ignore.lineno: f"ignore{ignore.tag}" + for ignore in node.type_ignores + } + self._write_docstring_and_traverse_body(node) + self._type_ignores.clear() + + def visit_FunctionType(self, node): + with self.delimit("(", ")"): + self.interleave( + lambda: self.write(", "), self.traverse, node.argtypes + ) + + self.write(" -> ") + self.traverse(node.returns) + + def visit_Expr(self, node): + self.fill() + self.set_precedence(_Precedence.YIELD, node.value) + self.traverse(node.value) + + def visit_NamedExpr(self, node): + with self.require_parens(_Precedence.TUPLE, node): + self.set_precedence(_Precedence.ATOM, node.target, node.value) + self.traverse(node.target) + self.write(" := ") + self.traverse(node.value) + + def visit_Import(self, node): + self.fill("import ") + self.interleave(lambda: self.write(", "), self.traverse, node.names) + + def visit_ImportFrom(self, node): + self.fill("from ") + self.write("." * (node.level or 0)) + if node.module: + self.write(node.module) + self.write(" import ") + self.interleave(lambda: self.write(", "), self.traverse, node.names) + + def visit_Assign(self, node): + self.fill() + for target in node.targets: + self.traverse(target) + self.write(" = ") + self.traverse(node.value) + if type_comment := self.get_type_comment(node): + self.write(type_comment) + + def visit_AugAssign(self, node): + self.fill() + self.traverse(node.target) + self.write(" " + self.binop[node.op.__class__.__name__] + "= ") + self.traverse(node.value) + + def visit_AnnAssign(self, node): + self.fill() + with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)): + self.traverse(node.target) + self.write(": ") + self.traverse(node.annotation) + if node.value: + self.write(" = ") + self.traverse(node.value) + + def visit_Return(self, node): + self.fill("return") + if node.value: + self.write(" ") + self.traverse(node.value) + + def visit_Pass(self, node): + self.fill("pass") + + def visit_Break(self, node): + self.fill("break") + + def visit_Continue(self, node): + self.fill("continue") + + def visit_Delete(self, node): + self.fill("del ") + self.interleave(lambda: self.write(", "), self.traverse, node.targets) + + def visit_Assert(self, node): + self.fill("assert ") + self.traverse(node.test) + if node.msg: + self.write(", ") + self.traverse(node.msg) + + def visit_Global(self, node): + self.fill("global ") + self.interleave(lambda: self.write(", "), self.write, node.names) + + def visit_Nonlocal(self, node): + self.fill("nonlocal ") + self.interleave(lambda: self.write(", "), self.write, node.names) + + def visit_Await(self, node): + with self.require_parens(_Precedence.AWAIT, node): + self.write("await") + if node.value: + self.write(" ") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_Yield(self, node): + with self.require_parens(_Precedence.YIELD, node): + self.write("yield") + if node.value: + self.write(" ") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_YieldFrom(self, node): + with self.require_parens(_Precedence.YIELD, node): + self.write("yield from ") + if not node.value: + raise ValueError("Node can't be used without a value attribute.") + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + + def visit_Raise(self, node): + self.fill("raise") + if not node.exc: + if node.cause: + raise ValueError(f"Node can't use cause without an exception.") + return + self.write(" ") + self.traverse(node.exc) + if node.cause: + self.write(" from ") + self.traverse(node.cause) + + def visit_Try(self, node): + self.fill("try") + with self.block(): + self.traverse(node.body) + for ex in node.handlers: + self.traverse(ex) + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + if node.finalbody: + self.fill("finally") + with self.block(): + self.traverse(node.finalbody) + + def visit_ExceptHandler(self, node): + self.fill("except") + if node.type: + self.write(" ") + self.traverse(node.type) + if node.name: + self.write(" as ") + self.write(node.name) + with self.block(): + self.traverse(node.body) + + def visit_ClassDef(self, node): + self.maybe_newline() + for deco in node.decorator_list: + self.fill("@") + self.traverse(deco) + self.fill("class " + node.name) + with self.delimit_if("(", ")", condition = node.bases or node.keywords): + comma = False + for e in node.bases: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + for e in node.keywords: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + + with self.block(): + self._write_docstring_and_traverse_body(node) + + def visit_FunctionDef(self, node): + self._function_helper(node, "def") + + def visit_AsyncFunctionDef(self, node): + self._function_helper(node, "async def") + + def _function_helper(self, node, fill_suffix): + self.maybe_newline() + for deco in node.decorator_list: + self.fill("@") + self.traverse(deco) + def_str = fill_suffix + " " + node.name + self.fill(def_str) + with self.delimit("(", ")"): + self.traverse(node.args) + if node.returns: + self.write(" -> ") + self.traverse(node.returns) + with self.block(extra=self.get_type_comment(node)): + self._write_docstring_and_traverse_body(node) + + def visit_For(self, node): + self._for_helper("for ", node) + + def visit_AsyncFor(self, node): + self._for_helper("async for ", node) + + def _for_helper(self, fill, node): + self.fill(fill) + self.traverse(node.target) + self.write(" in ") + self.traverse(node.iter) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + + def visit_If(self, node): + self.fill("if ") + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + # collapse nested ifs into equivalent elifs. + while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If): + node = node.orelse[0] + self.fill("elif ") + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + # final else + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + + def visit_While(self, node): + self.fill("while ") + self.traverse(node.test) + with self.block(): + self.traverse(node.body) + if node.orelse: + self.fill("else") + with self.block(): + self.traverse(node.orelse) + + def visit_With(self, node): + self.fill("with ") + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + + def visit_AsyncWith(self, node): + self.fill("async with ") + self.interleave(lambda: self.write(", "), self.traverse, node.items) + with self.block(extra=self.get_type_comment(node)): + self.traverse(node.body) + + def _str_literal_helper( + self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False + ): + """Helper for writing string literals, minimizing escapes. + Returns the tuple (string literal to write, possible quote types). + """ + def escape_char(c): + # \n and \t are non-printable, but we only escape them if + # escape_special_whitespace is True + if not escape_special_whitespace and c in "\n\t": + return c + # Always escape backslashes and other non-printable characters + if c == "\\" or not c.isprintable(): + return c.encode("unicode_escape").decode("ascii") + return c + + escaped_string = "".join(map(escape_char, string)) + possible_quotes = quote_types + if "\n" in escaped_string: + possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES] + possible_quotes = [q for q in possible_quotes if q not in escaped_string] + if not possible_quotes: + # If there aren't any possible_quotes, fallback to using repr + # on the original string. Try to use a quote from quote_types, + # e.g., so that we use triple quotes for docstrings. + string = repr(string) + quote = next((q for q in quote_types if string[0] in q), string[0]) + return string[1:-1], [quote] + if escaped_string: + # Sort so that we prefer '''"''' over """\"""" + possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1]) + # If we're using triple quotes and we'd need to escape a final + # quote, escape it + if possible_quotes[0][0] == escaped_string[-1]: + assert len(possible_quotes[0]) == 3 + escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1] + return escaped_string, possible_quotes + + def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES): + """Write string literal value with a best effort attempt to avoid backslashes.""" + string, quote_types = self._str_literal_helper(string, quote_types=quote_types) + quote_type = quote_types[0] + self.write(f"{quote_type}{string}{quote_type}") + + def visit_JoinedStr(self, node): + self.write("f") + if self._avoid_backslashes: + self._fstring_JoinedStr(node, self.buffer_writer) + self._write_str_avoiding_backslashes(self.buffer) + return + + # If we don't need to avoid backslashes globally (i.e., we only need + # to avoid them inside FormattedValues), it's cosmetically preferred + # to use escaped whitespace. That is, it's preferred to use backslashes + # for cases like: f"{x}\n". To accomplish this, we keep track of what + # in our buffer corresponds to FormattedValues and what corresponds to + # Constant parts of the f-string, and allow escapes accordingly. + buffer = [] + for value in node.values: + meth = getattr(self, "_fstring_" + type(value).__name__) + meth(value, self.buffer_writer) + buffer.append((self.buffer, isinstance(value, Constant))) + new_buffer = [] + quote_types = _ALL_QUOTES + for value, is_constant in buffer: + # Repeatedly narrow down the list of possible quote_types + value, quote_types = self._str_literal_helper( + value, quote_types=quote_types, + escape_special_whitespace=is_constant + ) + new_buffer.append(value) + value = "".join(new_buffer) + quote_type = quote_types[0] + self.write(f"{quote_type}{value}{quote_type}") + + def visit_FormattedValue(self, node): + self.write("f") + self._fstring_FormattedValue(node, self.buffer_writer) + self._write_str_avoiding_backslashes(self.buffer) + + def _fstring_JoinedStr(self, node, write): + for value in node.values: + meth = getattr(self, "_fstring_" + type(value).__name__) + meth(value, write) + + def _fstring_Constant(self, node, write): + if not isinstance(node.value, str): + raise ValueError("Constants inside JoinedStr should be a string.") + value = node.value.replace("{", "{{").replace("}", "}}") + write(value) + + def _fstring_FormattedValue(self, node, write): + write("{") + unparser = type(self)(_avoid_backslashes=True) + unparser.set_precedence(_Precedence.TEST.next(), node.value) + expr = unparser.visit(node.value) + if expr.startswith("{"): + write(" ") # Separate pair of opening brackets as "{ {" + if "\\" in expr: + raise ValueError("Unable to avoid backslash in f-string expression part") + write(expr) + if node.conversion != -1: + conversion = chr(node.conversion) + if conversion not in "sra": + raise ValueError("Unknown f-string conversion.") + write(f"!{conversion}") + if node.format_spec: + write(":") + meth = getattr(self, "_fstring_" + type(node.format_spec).__name__) + meth(node.format_spec, write) + write("}") + + def visit_Name(self, node): + self.write(node.id) + + def _write_docstring(self, node): + self.fill() + if node.kind == "u": + self.write("u") + self._write_str_avoiding_backslashes(node.value, quote_types=_MULTI_QUOTES) + + def _write_constant(self, value): + if isinstance(value, (float, complex)): + # Substitute overflowing decimal literal for AST infinities, + # and inf - inf for NaNs. + self.write( + repr(value) + .replace("inf", _INFSTR) + .replace("nan", f"({_INFSTR}-{_INFSTR})") + ) + elif self._avoid_backslashes and isinstance(value, str): + self._write_str_avoiding_backslashes(value) + else: + self.write(repr(value)) + + def visit_Constant(self, node): + value = node.value + if isinstance(value, tuple): + with self.delimit("(", ")"): + self.items_view(self._write_constant, value) + elif value is ...: + self.write("...") + else: + if node.kind == "u": + self.write("u") + self._write_constant(node.value) + + def visit_List(self, node): + with self.delimit("[", "]"): + self.interleave(lambda: self.write(", "), self.traverse, node.elts) + + def visit_ListComp(self, node): + with self.delimit("[", "]"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_GeneratorExp(self, node): + with self.delimit("(", ")"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_SetComp(self, node): + with self.delimit("{", "}"): + self.traverse(node.elt) + for gen in node.generators: + self.traverse(gen) + + def visit_DictComp(self, node): + with self.delimit("{", "}"): + self.traverse(node.key) + self.write(": ") + self.traverse(node.value) + for gen in node.generators: + self.traverse(gen) + + def visit_comprehension(self, node): + if node.is_async: + self.write(" async for ") + else: + self.write(" for ") + self.set_precedence(_Precedence.TUPLE, node.target) + self.traverse(node.target) + self.write(" in ") + self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs) + self.traverse(node.iter) + for if_clause in node.ifs: + self.write(" if ") + self.traverse(if_clause) + + def visit_IfExp(self, node): + with self.require_parens(_Precedence.TEST, node): + self.set_precedence(_Precedence.TEST.next(), node.body, node.test) + self.traverse(node.body) + self.write(" if ") + self.traverse(node.test) + self.write(" else ") + self.set_precedence(_Precedence.TEST, node.orelse) + self.traverse(node.orelse) + + def visit_Set(self, node): + if node.elts: + with self.delimit("{", "}"): + self.interleave(lambda: self.write(", "), self.traverse, node.elts) + else: + # `{}` would be interpreted as a dictionary literal, and + # `set` might be shadowed. Thus: + self.write('{*()}') + + def visit_Dict(self, node): + def write_key_value_pair(k, v): + self.traverse(k) + self.write(": ") + self.traverse(v) + + def write_item(item): + k, v = item + if k is None: + # for dictionary unpacking operator in dicts {**{'y': 2}} + # see PEP 448 for details + self.write("**") + self.set_precedence(_Precedence.EXPR, v) + self.traverse(v) + else: + write_key_value_pair(k, v) + + with self.delimit("{", "}"): + self.interleave( + lambda: self.write(", "), write_item, zip(node.keys, node.values) + ) + + def visit_Tuple(self, node): + with self.delimit("(", ")"): + self.items_view(self.traverse, node.elts) + + unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"} + unop_precedence = { + "not": _Precedence.NOT, + "~": _Precedence.FACTOR, + "+": _Precedence.FACTOR, + "-": _Precedence.FACTOR, + } + + def visit_UnaryOp(self, node): + operator = self.unop[node.op.__class__.__name__] + operator_precedence = self.unop_precedence[operator] + with self.require_parens(operator_precedence, node): + self.write(operator) + # factor prefixes (+, -, ~) shouldn't be seperated + # from the value they belong, (e.g: +1 instead of + 1) + if operator_precedence is not _Precedence.FACTOR: + self.write(" ") + self.set_precedence(operator_precedence, node.operand) + self.traverse(node.operand) + + binop = { + "Add": "+", + "Sub": "-", + "Mult": "*", + "MatMult": "@", + "Div": "/", + "Mod": "%", + "LShift": "<<", + "RShift": ">>", + "BitOr": "|", + "BitXor": "^", + "BitAnd": "&", + "FloorDiv": "//", + "Pow": "**", + } + + binop_precedence = { + "+": _Precedence.ARITH, + "-": _Precedence.ARITH, + "*": _Precedence.TERM, + "@": _Precedence.TERM, + "/": _Precedence.TERM, + "%": _Precedence.TERM, + "<<": _Precedence.SHIFT, + ">>": _Precedence.SHIFT, + "|": _Precedence.BOR, + "^": _Precedence.BXOR, + "&": _Precedence.BAND, + "//": _Precedence.TERM, + "**": _Precedence.POWER, + } + + binop_rassoc = frozenset(("**",)) + def visit_BinOp(self, node): + operator = self.binop[node.op.__class__.__name__] + operator_precedence = self.binop_precedence[operator] + with self.require_parens(operator_precedence, node): + if operator in self.binop_rassoc: + left_precedence = operator_precedence.next() + right_precedence = operator_precedence + else: + left_precedence = operator_precedence + right_precedence = operator_precedence.next() + + self.set_precedence(left_precedence, node.left) + self.traverse(node.left) + self.write(f" {operator} ") + self.set_precedence(right_precedence, node.right) + self.traverse(node.right) + + cmpops = { + "Eq": "==", + "NotEq": "!=", + "Lt": "<", + "LtE": "<=", + "Gt": ">", + "GtE": ">=", + "Is": "is", + "IsNot": "is not", + "In": "in", + "NotIn": "not in", + } + + def visit_Compare(self, node): + with self.require_parens(_Precedence.CMP, node): + self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators) + self.traverse(node.left) + for o, e in zip(node.ops, node.comparators): + self.write(" " + self.cmpops[o.__class__.__name__] + " ") + self.traverse(e) + + boolops = {"And": "and", "Or": "or"} + boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR} + + def visit_BoolOp(self, node): + operator = self.boolops[node.op.__class__.__name__] + operator_precedence = self.boolop_precedence[operator] + + def increasing_level_traverse(node): + nonlocal operator_precedence + operator_precedence = operator_precedence.next() + self.set_precedence(operator_precedence, node) + self.traverse(node) + + with self.require_parens(operator_precedence, node): + s = f" {operator} " + self.interleave(lambda: self.write(s), increasing_level_traverse, node.values) + + def visit_Attribute(self, node): + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + # Special case: 3.__abs__() is a syntax error, so if node.value + # is an integer literal then we need to either parenthesize + # it or add an extra space to get 3 .__abs__(). + if isinstance(node.value, Constant) and isinstance(node.value.value, int): + self.write(" ") + self.write(".") + self.write(node.attr) + + def visit_Call(self, node): + self.set_precedence(_Precedence.ATOM, node.func) + self.traverse(node.func) + with self.delimit("(", ")"): + comma = False + for e in node.args: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + for e in node.keywords: + if comma: + self.write(", ") + else: + comma = True + self.traverse(e) + + def visit_Subscript(self, node): + def is_simple_tuple(slice_value): + # when unparsing a non-empty tuple, the parentheses can be safely + # omitted if there aren't any elements that explicitly requires + # parentheses (such as starred expressions). + return ( + isinstance(slice_value, Tuple) + and slice_value.elts + and not any(isinstance(elt, Starred) for elt in slice_value.elts) + ) + + self.set_precedence(_Precedence.ATOM, node.value) + self.traverse(node.value) + with self.delimit("[", "]"): + if is_simple_tuple(node.slice): + self.items_view(self.traverse, node.slice.elts) + else: + self.traverse(node.slice) + + def visit_Starred(self, node): + self.write("*") + self.set_precedence(_Precedence.EXPR, node.value) + self.traverse(node.value) + + def visit_Ellipsis(self, node): + self.write("...") + + def visit_Slice(self, node): + if node.lower: + self.traverse(node.lower) + self.write(":") + if node.upper: + self.traverse(node.upper) + if node.step: + self.write(":") + self.traverse(node.step) + + def visit_Match(self, node): + self.fill("match ") + self.traverse(node.subject) + with self.block(): + for case in node.cases: + self.traverse(case) + + def visit_arg(self, node): + self.write(node.arg) + if node.annotation: + self.write(": ") + self.traverse(node.annotation) + + def visit_arguments(self, node): + first = True + # normal arguments + all_args = node.posonlyargs + node.args + defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults + for index, elements in enumerate(zip(all_args, defaults), 1): + a, d = elements + if first: + first = False + else: + self.write(", ") + self.traverse(a) + if d: + self.write("=") + self.traverse(d) + if index == len(node.posonlyargs): + self.write(", /") + + # varargs, or bare '*' if no varargs but keyword-only arguments present + if node.vararg or node.kwonlyargs: + if first: + first = False + else: + self.write(", ") + self.write("*") + if node.vararg: + self.write(node.vararg.arg) + if node.vararg.annotation: + self.write(": ") + self.traverse(node.vararg.annotation) + + # keyword-only arguments + if node.kwonlyargs: + for a, d in zip(node.kwonlyargs, node.kw_defaults): + self.write(", ") + self.traverse(a) + if d: + self.write("=") + self.traverse(d) + + # kwargs + if node.kwarg: + if first: + first = False + else: + self.write(", ") + self.write("**" + node.kwarg.arg) + if node.kwarg.annotation: + self.write(": ") + self.traverse(node.kwarg.annotation) + + def visit_keyword(self, node): + if node.arg is None: + self.write("**") + else: + self.write(node.arg) + self.write("=") + self.traverse(node.value) + + def visit_Lambda(self, node): + with self.require_parens(_Precedence.TEST, node): + self.write("lambda ") + self.traverse(node.args) + self.write(": ") + self.set_precedence(_Precedence.TEST, node.body) + self.traverse(node.body) + + def visit_alias(self, node): + self.write(node.name) + if node.asname: + self.write(" as " + node.asname) + + def visit_withitem(self, node): + self.traverse(node.context_expr) + if node.optional_vars: + self.write(" as ") + self.traverse(node.optional_vars) + + def visit_match_case(self, node): + self.fill("case ") + self.traverse(node.pattern) + if node.guard: + self.write(" if ") + self.traverse(node.guard) + with self.block(): + self.traverse(node.body) + + def visit_MatchValue(self, node): + self.traverse(node.value) + + def visit_MatchSingleton(self, node): + self._write_constant(node.value) + + def visit_MatchSequence(self, node): + with self.delimit("[", "]"): + self.interleave( + lambda: self.write(", "), self.traverse, node.patterns + ) + + def visit_MatchStar(self, node): + name = node.name + if name is None: + name = "_" + self.write(f"*{name}") + + def visit_MatchMapping(self, node): + def write_key_pattern_pair(pair): + k, p = pair + self.traverse(k) + self.write(": ") + self.traverse(p) + + with self.delimit("{", "}"): + keys = node.keys + self.interleave( + lambda: self.write(", "), + write_key_pattern_pair, + zip(keys, node.patterns, strict=True), + ) + rest = node.rest + if rest is not None: + if keys: + self.write(", ") + self.write(f"**{rest}") + + def visit_MatchClass(self, node): + self.set_precedence(_Precedence.ATOM, node.cls) + self.traverse(node.cls) + with self.delimit("(", ")"): + patterns = node.patterns + self.interleave( + lambda: self.write(", "), self.traverse, patterns + ) + attrs = node.kwd_attrs + if attrs: + def write_attr_pattern(pair): + attr, pattern = pair + self.write(f"{attr}=") + self.traverse(pattern) + + if patterns: + self.write(", ") + self.interleave( + lambda: self.write(", "), + write_attr_pattern, + zip(attrs, node.kwd_patterns, strict=True), + ) + + def visit_MatchAs(self, node): + name = node.name + pattern = node.pattern + if name is None: + self.write("_") + elif pattern is None: + self.write(node.name) + else: + with self.require_parens(_Precedence.TEST, node): + self.set_precedence(_Precedence.BOR, node.pattern) + self.traverse(node.pattern) + self.write(f" as {node.name}") + + def visit_MatchOr(self, node): + with self.require_parens(_Precedence.BOR, node): + self.set_precedence(_Precedence.BOR.next(), *node.patterns) + self.interleave(lambda: self.write(" | "), self.traverse, node.patterns) + +def unparse(ast_obj): + unparser = _Unparser() + return unparser.visit(ast_obj) + + +def main(): + import argparse + + parser = argparse.ArgumentParser(prog='python -m ast') + parser.add_argument('infile', type=argparse.FileType(mode='rb'), nargs='?', + default='-', + help='the file to parse; defaults to stdin') + parser.add_argument('-m', '--mode', default='exec', + choices=('exec', 'single', 'eval', 'func_type'), + help='specify what kind of code must be parsed') + parser.add_argument('--no-type-comments', default=True, action='store_false', + help="don't add information about type comments") + parser.add_argument('-a', '--include-attributes', action='store_true', + help='include attributes such as line numbers and ' + 'column offsets') + parser.add_argument('-i', '--indent', type=int, default=3, + help='indentation of nodes (number of spaces)') + args = parser.parse_args() + + with args.infile as infile: + source = infile.read() + tree = parse(source, args.infile.name, args.mode, type_comments=args.no_type_comments) + print(dump(tree, include_attributes=args.include_attributes, indent=args.indent)) + +if __name__ == '__main__': + main() diff --git a/python310/asynchat.py b/python310/asynchat.py new file mode 100644 index 0000000000000000000000000000000000000000..e081e67c75acb01332bee52dcc05e81eac34d436 --- /dev/null +++ b/python310/asynchat.py @@ -0,0 +1,315 @@ +# -*- Mode: Python; tab-width: 4 -*- +# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp +# Author: Sam Rushing + +# ====================================================================== +# Copyright 1996 by Sam Rushing +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of Sam +# Rushing not be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior +# permission. +# +# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN +# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# ====================================================================== + +r"""A class supporting chat-style (command/response) protocols. + +This class adds support for 'chat' style protocols - where one side +sends a 'command', and the other sends a response (examples would be +the common internet protocols - smtp, nntp, ftp, etc..). + +The handle_read() method looks at the input stream for the current +'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n' +for multi-line output), calling self.found_terminator() on its +receipt. + +for example: +Say you build an async nntp client using this class. At the start +of the connection, you'll have self.terminator set to '\r\n', in +order to process the single-line greeting. Just before issuing a +'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST +command will be accumulated (using your own 'collect_incoming_data' +method) up to the terminator, and then control will be returned to +you - by calling your self.found_terminator() method. +""" +import asyncore +from collections import deque + +from warnings import warn +warn( + 'The asynchat module is deprecated and will be removed in Python 3.12. ' + 'The recommended replacement is asyncio', + DeprecationWarning, + stacklevel=2) + + + +class async_chat(asyncore.dispatcher): + """This is an abstract class. You must derive from this class, and add + the two methods collect_incoming_data() and found_terminator()""" + + # these are overridable defaults + + ac_in_buffer_size = 65536 + ac_out_buffer_size = 65536 + + # we don't want to enable the use of encoding by default, because that is a + # sign of an application bug that we don't want to pass silently + + use_encoding = 0 + encoding = 'latin-1' + + def __init__(self, sock=None, map=None): + # for string terminator matching + self.ac_in_buffer = b'' + + # we use a list here rather than io.BytesIO for a few reasons... + # del lst[:] is faster than bio.truncate(0) + # lst = [] is faster than bio.truncate(0) + self.incoming = [] + + # we toss the use of the "simple producer" and replace it with + # a pure deque, which the original fifo was a wrapping of + self.producer_fifo = deque() + asyncore.dispatcher.__init__(self, sock, map) + + def collect_incoming_data(self, data): + raise NotImplementedError("must be implemented in subclass") + + def _collect_incoming_data(self, data): + self.incoming.append(data) + + def _get_data(self): + d = b''.join(self.incoming) + del self.incoming[:] + return d + + def found_terminator(self): + raise NotImplementedError("must be implemented in subclass") + + def set_terminator(self, term): + """Set the input delimiter. + + Can be a fixed string of any length, an integer, or None. + """ + if isinstance(term, str) and self.use_encoding: + term = bytes(term, self.encoding) + elif isinstance(term, int) and term < 0: + raise ValueError('the number of received bytes must be positive') + self.terminator = term + + def get_terminator(self): + return self.terminator + + # grab some more data from the socket, + # throw it to the collector method, + # check for the terminator, + # if found, transition to the next state. + + def handle_read(self): + + try: + data = self.recv(self.ac_in_buffer_size) + except BlockingIOError: + return + except OSError: + self.handle_error() + return + + if isinstance(data, str) and self.use_encoding: + data = bytes(str, self.encoding) + self.ac_in_buffer = self.ac_in_buffer + data + + # Continue to search for self.terminator in self.ac_in_buffer, + # while calling self.collect_incoming_data. The while loop + # is necessary because we might read several data+terminator + # combos with a single recv(4096). + + while self.ac_in_buffer: + lb = len(self.ac_in_buffer) + terminator = self.get_terminator() + if not terminator: + # no terminator, collect it all + self.collect_incoming_data(self.ac_in_buffer) + self.ac_in_buffer = b'' + elif isinstance(terminator, int): + # numeric terminator + n = terminator + if lb < n: + self.collect_incoming_data(self.ac_in_buffer) + self.ac_in_buffer = b'' + self.terminator = self.terminator - lb + else: + self.collect_incoming_data(self.ac_in_buffer[:n]) + self.ac_in_buffer = self.ac_in_buffer[n:] + self.terminator = 0 + self.found_terminator() + else: + # 3 cases: + # 1) end of buffer matches terminator exactly: + # collect data, transition + # 2) end of buffer matches some prefix: + # collect data to the prefix + # 3) end of buffer does not match any prefix: + # collect data + terminator_len = len(terminator) + index = self.ac_in_buffer.find(terminator) + if index != -1: + # we found the terminator + if index > 0: + # don't bother reporting the empty string + # (source of subtle bugs) + self.collect_incoming_data(self.ac_in_buffer[:index]) + self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:] + # This does the Right Thing if the terminator + # is changed here. + self.found_terminator() + else: + # check for a prefix of the terminator + index = find_prefix_at_end(self.ac_in_buffer, terminator) + if index: + if index != lb: + # we found a prefix, collect up to the prefix + self.collect_incoming_data(self.ac_in_buffer[:-index]) + self.ac_in_buffer = self.ac_in_buffer[-index:] + break + else: + # no prefix, collect it all + self.collect_incoming_data(self.ac_in_buffer) + self.ac_in_buffer = b'' + + def handle_write(self): + self.initiate_send() + + def handle_close(self): + self.close() + + def push(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError('data argument must be byte-ish (%r)', + type(data)) + sabs = self.ac_out_buffer_size + if len(data) > sabs: + for i in range(0, len(data), sabs): + self.producer_fifo.append(data[i:i+sabs]) + else: + self.producer_fifo.append(data) + self.initiate_send() + + def push_with_producer(self, producer): + self.producer_fifo.append(producer) + self.initiate_send() + + def readable(self): + "predicate for inclusion in the readable for select()" + # cannot use the old predicate, it violates the claim of the + # set_terminator method. + + # return (len(self.ac_in_buffer) <= self.ac_in_buffer_size) + return 1 + + def writable(self): + "predicate for inclusion in the writable for select()" + return self.producer_fifo or (not self.connected) + + def close_when_done(self): + "automatically close this channel once the outgoing queue is empty" + self.producer_fifo.append(None) + + def initiate_send(self): + while self.producer_fifo and self.connected: + first = self.producer_fifo[0] + # handle empty string/buffer or None entry + if not first: + del self.producer_fifo[0] + if first is None: + self.handle_close() + return + + # handle classic producer behavior + obs = self.ac_out_buffer_size + try: + data = first[:obs] + except TypeError: + data = first.more() + if data: + self.producer_fifo.appendleft(data) + else: + del self.producer_fifo[0] + continue + + if isinstance(data, str) and self.use_encoding: + data = bytes(data, self.encoding) + + # send the data + try: + num_sent = self.send(data) + except OSError: + self.handle_error() + return + + if num_sent: + if num_sent < len(data) or obs < len(first): + self.producer_fifo[0] = first[num_sent:] + else: + del self.producer_fifo[0] + # we tried to send some actual data + return + + def discard_buffers(self): + # Emergencies only! + self.ac_in_buffer = b'' + del self.incoming[:] + self.producer_fifo.clear() + + +class simple_producer: + + def __init__(self, data, buffer_size=512): + self.data = data + self.buffer_size = buffer_size + + def more(self): + if len(self.data) > self.buffer_size: + result = self.data[:self.buffer_size] + self.data = self.data[self.buffer_size:] + return result + else: + result = self.data + self.data = b'' + return result + + +# Given 'haystack', see if any prefix of 'needle' is at its end. This +# assumes an exact match has already been checked. Return the number of +# characters matched. +# for example: +# f_p_a_e("qwerty\r", "\r\n") => 1 +# f_p_a_e("qwertydkjf", "\r\n") => 0 +# f_p_a_e("qwerty\r\n", "\r\n") => + +# this could maybe be made faster with a computed regex? +# [answer: no; circa Python-2.0, Jan 2001] +# new python: 28961/s +# old python: 18307/s +# re: 12820/s +# regex: 14035/s + +def find_prefix_at_end(haystack, needle): + l = len(needle) - 1 + while l and not haystack.endswith(needle[:l]): + l -= 1 + return l diff --git a/python310/asyncio/__init__.py b/python310/asyncio/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..200b14c2a3f21eb04bdb164896112038fba247f8 --- /dev/null +++ b/python310/asyncio/__init__.py @@ -0,0 +1,43 @@ +"""The asyncio package, tracking PEP 3156.""" + +# flake8: noqa + +import sys + +# This relies on each of the submodules having an __all__ variable. +from .base_events import * +from .coroutines import * +from .events import * +from .exceptions import * +from .futures import * +from .locks import * +from .protocols import * +from .runners import * +from .queues import * +from .streams import * +from .subprocess import * +from .tasks import * +from .threads import * +from .transports import * + +__all__ = (base_events.__all__ + + coroutines.__all__ + + events.__all__ + + exceptions.__all__ + + futures.__all__ + + locks.__all__ + + protocols.__all__ + + runners.__all__ + + queues.__all__ + + streams.__all__ + + subprocess.__all__ + + tasks.__all__ + + threads.__all__ + + transports.__all__) + +if sys.platform == 'win32': # pragma: no cover + from .windows_events import * + __all__ += windows_events.__all__ +else: + from .unix_events import * # pragma: no cover + __all__ += unix_events.__all__ diff --git a/python310/asyncio/__main__.py b/python310/asyncio/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..18bb87a5bc4ffd89fb24ef193ee88af64ee5dcbc --- /dev/null +++ b/python310/asyncio/__main__.py @@ -0,0 +1,125 @@ +import ast +import asyncio +import code +import concurrent.futures +import inspect +import sys +import threading +import types +import warnings + +from . import futures + + +class AsyncIOInteractiveConsole(code.InteractiveConsole): + + def __init__(self, locals, loop): + super().__init__(locals) + self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT + + self.loop = loop + + def runcode(self, code): + future = concurrent.futures.Future() + + def callback(): + global repl_future + global repl_future_interrupted + + repl_future = None + repl_future_interrupted = False + + func = types.FunctionType(code, self.locals) + try: + coro = func() + except SystemExit: + raise + except KeyboardInterrupt as ex: + repl_future_interrupted = True + future.set_exception(ex) + return + except BaseException as ex: + future.set_exception(ex) + return + + if not inspect.iscoroutine(coro): + future.set_result(coro) + return + + try: + repl_future = self.loop.create_task(coro) + futures._chain_future(repl_future, future) + except BaseException as exc: + future.set_exception(exc) + + loop.call_soon_threadsafe(callback) + + try: + return future.result() + except SystemExit: + raise + except BaseException: + if repl_future_interrupted: + self.write("\nKeyboardInterrupt\n") + else: + self.showtraceback() + + +class REPLThread(threading.Thread): + + def run(self): + try: + banner = ( + f'asyncio REPL {sys.version} on {sys.platform}\n' + f'Use "await" directly instead of "asyncio.run()".\n' + f'Type "help", "copyright", "credits" or "license" ' + f'for more information.\n' + f'{getattr(sys, "ps1", ">>> ")}import asyncio' + ) + + console.interact( + banner=banner, + exitmsg='exiting asyncio REPL...') + finally: + warnings.filterwarnings( + 'ignore', + message=r'^coroutine .* was never awaited$', + category=RuntimeWarning) + + loop.call_soon_threadsafe(loop.stop) + + +if __name__ == '__main__': + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + repl_locals = {'asyncio': asyncio} + for key in {'__name__', '__package__', + '__loader__', '__spec__', + '__builtins__', '__file__'}: + repl_locals[key] = locals()[key] + + console = AsyncIOInteractiveConsole(repl_locals, loop) + + repl_future = None + repl_future_interrupted = False + + try: + import readline # NoQA + except ImportError: + pass + + repl_thread = REPLThread() + repl_thread.daemon = True + repl_thread.start() + + while True: + try: + loop.run_forever() + except KeyboardInterrupt: + if repl_future and not repl_future.done(): + repl_future.cancel() + repl_future_interrupted = True + continue + else: + break diff --git a/python310/asyncio/__pycache__/__init__.cpython-310.pyc b/python310/asyncio/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..236e66ee55ba7c4ab494750cff08746f43fe4714 Binary files /dev/null and b/python310/asyncio/__pycache__/__init__.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/__main__.cpython-310.pyc b/python310/asyncio/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de62a61361dc750b0019209741e5e062426728a0 Binary files /dev/null and b/python310/asyncio/__pycache__/__main__.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/base_events.cpython-310.pyc b/python310/asyncio/__pycache__/base_events.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb6a3d20204b667b15aa33c1cb560e85164ed46b Binary files /dev/null and b/python310/asyncio/__pycache__/base_events.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/base_futures.cpython-310.pyc b/python310/asyncio/__pycache__/base_futures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9d80ca919f24352fabc2dbc7e8de98985e48061 Binary files /dev/null and b/python310/asyncio/__pycache__/base_futures.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/base_subprocess.cpython-310.pyc b/python310/asyncio/__pycache__/base_subprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ee0ed820b5e53398da8a626f308115a433b63f5 Binary files /dev/null and b/python310/asyncio/__pycache__/base_subprocess.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/base_tasks.cpython-310.pyc b/python310/asyncio/__pycache__/base_tasks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c89236bf88f8dd2e67618f6027478e61869c3017 Binary files /dev/null and b/python310/asyncio/__pycache__/base_tasks.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/constants.cpython-310.pyc b/python310/asyncio/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38f49451cf86320fb23e7bb7bb18983a1d1e7666 Binary files /dev/null and b/python310/asyncio/__pycache__/constants.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/coroutines.cpython-310.pyc b/python310/asyncio/__pycache__/coroutines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd4f5a66814e56a328f378a1a0b812d8983ecda Binary files /dev/null and b/python310/asyncio/__pycache__/coroutines.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/events.cpython-310.pyc b/python310/asyncio/__pycache__/events.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c33963d52ac21289f3727999d1c473e1116ae20 Binary files /dev/null and b/python310/asyncio/__pycache__/events.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/exceptions.cpython-310.pyc b/python310/asyncio/__pycache__/exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf43aea00ce9f40027837b85ba2b83a4bac139d7 Binary files /dev/null and b/python310/asyncio/__pycache__/exceptions.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/format_helpers.cpython-310.pyc b/python310/asyncio/__pycache__/format_helpers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..558db5d9abc6c7d5c011a52874c9250bb3ce2734 Binary files /dev/null and b/python310/asyncio/__pycache__/format_helpers.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/futures.cpython-310.pyc b/python310/asyncio/__pycache__/futures.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c20861083ebaa046e11f82773c2a6e61c4c2ca7 Binary files /dev/null and b/python310/asyncio/__pycache__/futures.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/locks.cpython-310.pyc b/python310/asyncio/__pycache__/locks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be8a363d9289aa54d32681680d0422de187b0ea0 Binary files /dev/null and b/python310/asyncio/__pycache__/locks.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/log.cpython-310.pyc b/python310/asyncio/__pycache__/log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fda339e0f4d0c6bf5898b3a23a96167602ddffb5 Binary files /dev/null and b/python310/asyncio/__pycache__/log.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/mixins.cpython-310.pyc b/python310/asyncio/__pycache__/mixins.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53375bd4f4933659e0ea1903f59d3e1db41b78f2 Binary files /dev/null and b/python310/asyncio/__pycache__/mixins.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/proactor_events.cpython-310.pyc b/python310/asyncio/__pycache__/proactor_events.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed6cc9a7a2b04a7f591951b41478493e00e53933 Binary files /dev/null and b/python310/asyncio/__pycache__/proactor_events.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/protocols.cpython-310.pyc b/python310/asyncio/__pycache__/protocols.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ee9945d05ec31d48d2e9ab1c305dc8c5070e9a0b Binary files /dev/null and b/python310/asyncio/__pycache__/protocols.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/queues.cpython-310.pyc b/python310/asyncio/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb3eb4f212a2f035c4eccc8baa18b944a81a8bb5 Binary files /dev/null and b/python310/asyncio/__pycache__/queues.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/runners.cpython-310.pyc b/python310/asyncio/__pycache__/runners.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9e28940eee638946b46c6f4e8a55ea781c8b4a17 Binary files /dev/null and b/python310/asyncio/__pycache__/runners.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/selector_events.cpython-310.pyc b/python310/asyncio/__pycache__/selector_events.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ec6758d2a8b0853ff014c7800d5a5ab2721cf74d Binary files /dev/null and b/python310/asyncio/__pycache__/selector_events.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/sslproto.cpython-310.pyc b/python310/asyncio/__pycache__/sslproto.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9c06cc0610904363dde035598bc763df95acf9d0 Binary files /dev/null and b/python310/asyncio/__pycache__/sslproto.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/staggered.cpython-310.pyc b/python310/asyncio/__pycache__/staggered.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e932b8e2d4a232a16969dce66bd084d70ac7dad3 Binary files /dev/null and b/python310/asyncio/__pycache__/staggered.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/streams.cpython-310.pyc b/python310/asyncio/__pycache__/streams.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3ab5668ba1afe00c5d98bd50f96d98601160ad35 Binary files /dev/null and b/python310/asyncio/__pycache__/streams.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/subprocess.cpython-310.pyc b/python310/asyncio/__pycache__/subprocess.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1248d19820fd8274b0681d71c828a5c564e59c7e Binary files /dev/null and b/python310/asyncio/__pycache__/subprocess.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/tasks.cpython-310.pyc b/python310/asyncio/__pycache__/tasks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12d8b88011e65eda96cb4e46718a4e10944d2f00 Binary files /dev/null and b/python310/asyncio/__pycache__/tasks.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/threads.cpython-310.pyc b/python310/asyncio/__pycache__/threads.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..095439c8b33b4bb5398d728aaa73364fca71a0d0 Binary files /dev/null and b/python310/asyncio/__pycache__/threads.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/transports.cpython-310.pyc b/python310/asyncio/__pycache__/transports.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f9980ea3fdf64b2c08a321db1a17f5b5bfb12ea Binary files /dev/null and b/python310/asyncio/__pycache__/transports.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/trsock.cpython-310.pyc b/python310/asyncio/__pycache__/trsock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b300f674ecaabbdc71af6098d1467916e0e714c Binary files /dev/null and b/python310/asyncio/__pycache__/trsock.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/unix_events.cpython-310.pyc b/python310/asyncio/__pycache__/unix_events.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53f66dd19d7ac9ec89526c946e3ef1969e474e65 Binary files /dev/null and b/python310/asyncio/__pycache__/unix_events.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/windows_events.cpython-310.pyc b/python310/asyncio/__pycache__/windows_events.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98350cd8e6a18001730ca41d5f9d87fd3272e55a Binary files /dev/null and b/python310/asyncio/__pycache__/windows_events.cpython-310.pyc differ diff --git a/python310/asyncio/__pycache__/windows_utils.cpython-310.pyc b/python310/asyncio/__pycache__/windows_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8621b0e1bf3c4f66037298dfc9510cd6c1495ca4 Binary files /dev/null and b/python310/asyncio/__pycache__/windows_utils.cpython-310.pyc differ diff --git a/python310/asyncio/base_events.py b/python310/asyncio/base_events.py new file mode 100644 index 0000000000000000000000000000000000000000..0890e9e1da6978857ece75359020ebd40bc70b75 --- /dev/null +++ b/python310/asyncio/base_events.py @@ -0,0 +1,1934 @@ +"""Base implementation of event loop. + +The event loop can be broken up into a multiplexer (the part +responsible for notifying us of I/O events) and the event loop proper, +which wraps a multiplexer with functionality for scheduling callbacks, +immediately or at a given time in the future. + +Whenever a public API takes a callback, subsequent positional +arguments will be passed to the callback if/when it is called. This +avoids the proliferation of trivial lambdas implementing closures. +Keyword arguments for the callback are not supported; this is a +conscious design decision, leaving the door open for keyword arguments +to modify the meaning of the API call itself. +""" + +import collections +import collections.abc +import concurrent.futures +import functools +import heapq +import itertools +import os +import socket +import stat +import subprocess +import threading +import time +import traceback +import sys +import warnings +import weakref + +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import constants +from . import coroutines +from . import events +from . import exceptions +from . import futures +from . import protocols +from . import sslproto +from . import staggered +from . import tasks +from . import transports +from . import trsock +from .log import logger + + +__all__ = 'BaseEventLoop','Server', + + +# Minimum number of _scheduled timer handles before cleanup of +# cancelled handles is performed. +_MIN_SCHEDULED_TIMER_HANDLES = 100 + +# Minimum fraction of _scheduled timer handles that are cancelled +# before cleanup of cancelled handles is performed. +_MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5 + + +_HAS_IPv6 = hasattr(socket, 'AF_INET6') + +# Maximum timeout passed to select to avoid OS limitations +MAXIMUM_SELECT_TIMEOUT = 24 * 3600 + +# Used for deprecation and removal of `loop.create_datagram_endpoint()`'s +# *reuse_address* parameter +_unset = object() + + +def _format_handle(handle): + cb = handle._callback + if isinstance(getattr(cb, '__self__', None), tasks.Task): + # format the task + return repr(cb.__self__) + else: + return str(handle) + + +def _format_pipe(fd): + if fd == subprocess.PIPE: + return '' + elif fd == subprocess.STDOUT: + return '' + else: + return repr(fd) + + +def _set_reuseport(sock): + if not hasattr(socket, 'SO_REUSEPORT'): + raise ValueError('reuse_port not supported by socket module') + else: + try: + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) + except OSError: + raise ValueError('reuse_port not supported by socket module, ' + 'SO_REUSEPORT defined but not implemented.') + + +def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0): + # Try to skip getaddrinfo if "host" is already an IP. Users might have + # handled name resolution in their own code and pass in resolved IPs. + if not hasattr(socket, 'inet_pton'): + return + + if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \ + host is None: + return None + + if type == socket.SOCK_STREAM: + proto = socket.IPPROTO_TCP + elif type == socket.SOCK_DGRAM: + proto = socket.IPPROTO_UDP + else: + return None + + if port is None: + port = 0 + elif isinstance(port, bytes) and port == b'': + port = 0 + elif isinstance(port, str) and port == '': + port = 0 + else: + # If port's a service name like "http", don't skip getaddrinfo. + try: + port = int(port) + except (TypeError, ValueError): + return None + + if family == socket.AF_UNSPEC: + afs = [socket.AF_INET] + if _HAS_IPv6: + afs.append(socket.AF_INET6) + else: + afs = [family] + + if isinstance(host, bytes): + host = host.decode('idna') + if '%' in host: + # Linux's inet_pton doesn't accept an IPv6 zone index after host, + # like '::1%lo0'. + return None + + for af in afs: + try: + socket.inet_pton(af, host) + # The host has already been resolved. + if _HAS_IPv6 and af == socket.AF_INET6: + return af, type, proto, '', (host, port, flowinfo, scopeid) + else: + return af, type, proto, '', (host, port) + except OSError: + pass + + # "host" is not an IP address. + return None + + +def _interleave_addrinfos(addrinfos, first_address_family_count=1): + """Interleave list of addrinfo tuples by family.""" + # Group addresses by family + addrinfos_by_family = collections.OrderedDict() + for addr in addrinfos: + family = addr[0] + if family not in addrinfos_by_family: + addrinfos_by_family[family] = [] + addrinfos_by_family[family].append(addr) + addrinfos_lists = list(addrinfos_by_family.values()) + + reordered = [] + if first_address_family_count > 1: + reordered.extend(addrinfos_lists[0][:first_address_family_count - 1]) + del addrinfos_lists[0][:first_address_family_count - 1] + reordered.extend( + a for a in itertools.chain.from_iterable( + itertools.zip_longest(*addrinfos_lists) + ) if a is not None) + return reordered + + +def _run_until_complete_cb(fut): + if not fut.cancelled(): + exc = fut.exception() + if isinstance(exc, (SystemExit, KeyboardInterrupt)): + # Issue #22429: run_forever() already finished, no need to + # stop it. + return + futures._get_loop(fut).stop() + + +if hasattr(socket, 'TCP_NODELAY'): + def _set_nodelay(sock): + if (sock.family in {socket.AF_INET, socket.AF_INET6} and + sock.type == socket.SOCK_STREAM and + sock.proto == socket.IPPROTO_TCP): + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) +else: + def _set_nodelay(sock): + pass + + +def _check_ssl_socket(sock): + if ssl is not None and isinstance(sock, ssl.SSLSocket): + raise TypeError("Socket cannot be of type SSLSocket") + + +class _SendfileFallbackProtocol(protocols.Protocol): + def __init__(self, transp): + if not isinstance(transp, transports._FlowControlMixin): + raise TypeError("transport should be _FlowControlMixin instance") + self._transport = transp + self._proto = transp.get_protocol() + self._should_resume_reading = transp.is_reading() + self._should_resume_writing = transp._protocol_paused + transp.pause_reading() + transp.set_protocol(self) + if self._should_resume_writing: + self._write_ready_fut = self._transport._loop.create_future() + else: + self._write_ready_fut = None + + async def drain(self): + if self._transport.is_closing(): + raise ConnectionError("Connection closed by peer") + fut = self._write_ready_fut + if fut is None: + return + await fut + + def connection_made(self, transport): + raise RuntimeError("Invalid state: " + "connection should have been established already.") + + def connection_lost(self, exc): + if self._write_ready_fut is not None: + # Never happens if peer disconnects after sending the whole content + # Thus disconnection is always an exception from user perspective + if exc is None: + self._write_ready_fut.set_exception( + ConnectionError("Connection is closed by peer")) + else: + self._write_ready_fut.set_exception(exc) + self._proto.connection_lost(exc) + + def pause_writing(self): + if self._write_ready_fut is not None: + return + self._write_ready_fut = self._transport._loop.create_future() + + def resume_writing(self): + if self._write_ready_fut is None: + return + self._write_ready_fut.set_result(False) + self._write_ready_fut = None + + def data_received(self, data): + raise RuntimeError("Invalid state: reading should be paused") + + def eof_received(self): + raise RuntimeError("Invalid state: reading should be paused") + + async def restore(self): + self._transport.set_protocol(self._proto) + if self._should_resume_reading: + self._transport.resume_reading() + if self._write_ready_fut is not None: + # Cancel the future. + # Basically it has no effect because protocol is switched back, + # no code should wait for it anymore. + self._write_ready_fut.cancel() + if self._should_resume_writing: + self._proto.resume_writing() + + +class Server(events.AbstractServer): + + def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog, + ssl_handshake_timeout): + self._loop = loop + self._sockets = sockets + self._active_count = 0 + self._waiters = [] + self._protocol_factory = protocol_factory + self._backlog = backlog + self._ssl_context = ssl_context + self._ssl_handshake_timeout = ssl_handshake_timeout + self._serving = False + self._serving_forever_fut = None + + def __repr__(self): + return f'<{self.__class__.__name__} sockets={self.sockets!r}>' + + def _attach(self): + assert self._sockets is not None + self._active_count += 1 + + def _detach(self): + assert self._active_count > 0 + self._active_count -= 1 + if self._active_count == 0 and self._sockets is None: + self._wakeup() + + def _wakeup(self): + waiters = self._waiters + self._waiters = None + for waiter in waiters: + if not waiter.done(): + waiter.set_result(waiter) + + def _start_serving(self): + if self._serving: + return + self._serving = True + for sock in self._sockets: + sock.listen(self._backlog) + self._loop._start_serving( + self._protocol_factory, sock, self._ssl_context, + self, self._backlog, self._ssl_handshake_timeout) + + def get_loop(self): + return self._loop + + def is_serving(self): + return self._serving + + @property + def sockets(self): + if self._sockets is None: + return () + return tuple(trsock.TransportSocket(s) for s in self._sockets) + + def close(self): + sockets = self._sockets + if sockets is None: + return + self._sockets = None + + for sock in sockets: + self._loop._stop_serving(sock) + + self._serving = False + + if (self._serving_forever_fut is not None and + not self._serving_forever_fut.done()): + self._serving_forever_fut.cancel() + self._serving_forever_fut = None + + if self._active_count == 0: + self._wakeup() + + async def start_serving(self): + self._start_serving() + # Skip one loop iteration so that all 'loop.add_reader' + # go through. + await tasks.sleep(0) + + async def serve_forever(self): + if self._serving_forever_fut is not None: + raise RuntimeError( + f'server {self!r} is already being awaited on serve_forever()') + if self._sockets is None: + raise RuntimeError(f'server {self!r} is closed') + + self._start_serving() + self._serving_forever_fut = self._loop.create_future() + + try: + await self._serving_forever_fut + except exceptions.CancelledError: + try: + self.close() + await self.wait_closed() + finally: + raise + finally: + self._serving_forever_fut = None + + async def wait_closed(self): + if self._sockets is None or self._waiters is None: + return + waiter = self._loop.create_future() + self._waiters.append(waiter) + await waiter + + +class BaseEventLoop(events.AbstractEventLoop): + + def __init__(self): + self._timer_cancelled_count = 0 + self._closed = False + self._stopping = False + self._ready = collections.deque() + self._scheduled = [] + self._default_executor = None + self._internal_fds = 0 + # Identifier of the thread running the event loop, or None if the + # event loop is not running + self._thread_id = None + self._clock_resolution = time.get_clock_info('monotonic').resolution + self._exception_handler = None + self.set_debug(coroutines._is_debug_mode()) + # In debug mode, if the execution of a callback or a step of a task + # exceed this duration in seconds, the slow callback/task is logged. + self.slow_callback_duration = 0.1 + self._current_handle = None + self._task_factory = None + self._coroutine_origin_tracking_enabled = False + self._coroutine_origin_tracking_saved_depth = None + + # A weak set of all asynchronous generators that are + # being iterated by the loop. + self._asyncgens = weakref.WeakSet() + # Set to True when `loop.shutdown_asyncgens` is called. + self._asyncgens_shutdown_called = False + # Set to True when `loop.shutdown_default_executor` is called. + self._executor_shutdown_called = False + + def __repr__(self): + return ( + f'<{self.__class__.__name__} running={self.is_running()} ' + f'closed={self.is_closed()} debug={self.get_debug()}>' + ) + + def create_future(self): + """Create a Future object attached to the loop.""" + return futures.Future(loop=self) + + def create_task(self, coro, *, name=None): + """Schedule a coroutine object. + + Return a task object. + """ + self._check_closed() + if self._task_factory is None: + task = tasks.Task(coro, loop=self, name=name) + if task._source_traceback: + del task._source_traceback[-1] + else: + task = self._task_factory(self, coro) + tasks._set_task_name(task, name) + + return task + + def set_task_factory(self, factory): + """Set a task factory that will be used by loop.create_task(). + + If factory is None the default task factory will be set. + + If factory is a callable, it should have a signature matching + '(loop, coro)', where 'loop' will be a reference to the active + event loop, 'coro' will be a coroutine object. The callable + must return a Future. + """ + if factory is not None and not callable(factory): + raise TypeError('task factory must be a callable or None') + self._task_factory = factory + + def get_task_factory(self): + """Return a task factory, or None if the default one is in use.""" + return self._task_factory + + def _make_socket_transport(self, sock, protocol, waiter=None, *, + extra=None, server=None): + """Create socket transport.""" + raise NotImplementedError + + def _make_ssl_transport( + self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None, + ssl_handshake_timeout=None, + call_connection_made=True): + """Create SSL transport.""" + raise NotImplementedError + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + """Create datagram transport.""" + raise NotImplementedError + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create read pipe transport.""" + raise NotImplementedError + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + """Create write pipe transport.""" + raise NotImplementedError + + async def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + """Create subprocess transport.""" + raise NotImplementedError + + def _write_to_self(self): + """Write a byte to self-pipe, to wake up the event loop. + + This may be called from a different thread. + + The subclass is responsible for implementing the self-pipe. + """ + raise NotImplementedError + + def _process_events(self, event_list): + """Process selector events.""" + raise NotImplementedError + + def _check_closed(self): + if self._closed: + raise RuntimeError('Event loop is closed') + + def _check_default_executor(self): + if self._executor_shutdown_called: + raise RuntimeError('Executor shutdown has been called') + + def _asyncgen_finalizer_hook(self, agen): + self._asyncgens.discard(agen) + if not self.is_closed(): + self.call_soon_threadsafe(self.create_task, agen.aclose()) + + def _asyncgen_firstiter_hook(self, agen): + if self._asyncgens_shutdown_called: + warnings.warn( + f"asynchronous generator {agen!r} was scheduled after " + f"loop.shutdown_asyncgens() call", + ResourceWarning, source=self) + + self._asyncgens.add(agen) + + async def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + self._asyncgens_shutdown_called = True + + if not len(self._asyncgens): + # If Python version is <3.6 or we don't have any asynchronous + # generators alive. + return + + closing_agens = list(self._asyncgens) + self._asyncgens.clear() + + results = await tasks.gather( + *[ag.aclose() for ag in closing_agens], + return_exceptions=True) + + for result, agen in zip(results, closing_agens): + if isinstance(result, Exception): + self.call_exception_handler({ + 'message': f'an error occurred during closing of ' + f'asynchronous generator {agen!r}', + 'exception': result, + 'asyncgen': agen + }) + + async def shutdown_default_executor(self): + """Schedule the shutdown of the default executor.""" + self._executor_shutdown_called = True + if self._default_executor is None: + return + future = self.create_future() + thread = threading.Thread(target=self._do_shutdown, args=(future,)) + thread.start() + try: + await future + finally: + thread.join() + + def _do_shutdown(self, future): + try: + self._default_executor.shutdown(wait=True) + if not self.is_closed(): + self.call_soon_threadsafe(future.set_result, None) + except Exception as ex: + if not self.is_closed(): + self.call_soon_threadsafe(future.set_exception, ex) + + def _check_running(self): + if self.is_running(): + raise RuntimeError('This event loop is already running') + if events._get_running_loop() is not None: + raise RuntimeError( + 'Cannot run the event loop while another loop is running') + + def run_forever(self): + """Run until stop() is called.""" + self._check_closed() + self._check_running() + self._set_coroutine_origin_tracking(self._debug) + + old_agen_hooks = sys.get_asyncgen_hooks() + try: + self._thread_id = threading.get_ident() + sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook, + finalizer=self._asyncgen_finalizer_hook) + + events._set_running_loop(self) + while True: + self._run_once() + if self._stopping: + break + finally: + self._stopping = False + self._thread_id = None + events._set_running_loop(None) + self._set_coroutine_origin_tracking(False) + sys.set_asyncgen_hooks(*old_agen_hooks) + + def run_until_complete(self, future): + """Run until the Future is done. + + If the argument is a coroutine, it is wrapped in a Task. + + WARNING: It would be disastrous to call run_until_complete() + with the same coroutine twice -- it would wrap it in two + different Tasks and that can't be good. + + Return the Future's result, or raise its exception. + """ + self._check_closed() + self._check_running() + + new_task = not futures.isfuture(future) + future = tasks.ensure_future(future, loop=self) + if new_task: + # An exception is raised if the future didn't complete, so there + # is no need to log the "destroy pending task" message + future._log_destroy_pending = False + + future.add_done_callback(_run_until_complete_cb) + try: + self.run_forever() + except: + if new_task and future.done() and not future.cancelled(): + # The coroutine raised a BaseException. Consume the exception + # to not log a warning, the caller doesn't have access to the + # local task. + future.exception() + raise + finally: + future.remove_done_callback(_run_until_complete_cb) + if not future.done(): + raise RuntimeError('Event loop stopped before Future completed.') + + return future.result() + + def stop(self): + """Stop running the event loop. + + Every callback already scheduled will still run. This simply informs + run_forever to stop looping after a complete iteration. + """ + self._stopping = True + + def close(self): + """Close the event loop. + + This clears the queues and shuts down the executor, + but does not wait for the executor to finish. + + The event loop must not be running. + """ + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self._closed: + return + if self._debug: + logger.debug("Close %r", self) + self._closed = True + self._ready.clear() + self._scheduled.clear() + self._executor_shutdown_called = True + executor = self._default_executor + if executor is not None: + self._default_executor = None + executor.shutdown(wait=False) + + def is_closed(self): + """Returns True if the event loop was closed.""" + return self._closed + + def __del__(self, _warn=warnings.warn): + if not self.is_closed(): + _warn(f"unclosed event loop {self!r}", ResourceWarning, source=self) + if not self.is_running(): + self.close() + + def is_running(self): + """Returns True if the event loop is running.""" + return (self._thread_id is not None) + + def time(self): + """Return the time according to the event loop's clock. + + This is a float expressed in seconds since an epoch, but the + epoch, precision, accuracy and drift are unspecified and may + differ per event loop. + """ + return time.monotonic() + + def call_later(self, delay, callback, *args, context=None): + """Arrange for a callback to be called at a given time. + + Return a Handle: an opaque object with a cancel() method that + can be used to cancel the call. + + The delay can be an int or float, expressed in seconds. It is + always relative to the current time. + + Each callback will be called exactly once. If two callbacks + are scheduled for exactly the same time, it undefined which + will be called first. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + timer = self.call_at(self.time() + delay, callback, *args, + context=context) + if timer._source_traceback: + del timer._source_traceback[-1] + return timer + + def call_at(self, when, callback, *args, context=None): + """Like call_later(), but uses an absolute time. + + Absolute time corresponds to the event loop's time() method. + """ + self._check_closed() + if self._debug: + self._check_thread() + self._check_callback(callback, 'call_at') + timer = events.TimerHandle(when, callback, args, self, context) + if timer._source_traceback: + del timer._source_traceback[-1] + heapq.heappush(self._scheduled, timer) + timer._scheduled = True + return timer + + def call_soon(self, callback, *args, context=None): + """Arrange for a callback to be called as soon as possible. + + This operates as a FIFO queue: callbacks are called in the + order in which they are registered. Each callback will be + called exactly once. + + Any positional arguments after the callback will be passed to + the callback when it is called. + """ + self._check_closed() + if self._debug: + self._check_thread() + self._check_callback(callback, 'call_soon') + handle = self._call_soon(callback, args, context) + if handle._source_traceback: + del handle._source_traceback[-1] + return handle + + def _check_callback(self, callback, method): + if (coroutines.iscoroutine(callback) or + coroutines.iscoroutinefunction(callback)): + raise TypeError( + f"coroutines cannot be used with {method}()") + if not callable(callback): + raise TypeError( + f'a callable object was expected by {method}(), ' + f'got {callback!r}') + + def _call_soon(self, callback, args, context): + handle = events.Handle(callback, args, self, context) + if handle._source_traceback: + del handle._source_traceback[-1] + self._ready.append(handle) + return handle + + def _check_thread(self): + """Check that the current thread is the thread running the event loop. + + Non-thread-safe methods of this class make this assumption and will + likely behave incorrectly when the assumption is violated. + + Should only be called when (self._debug == True). The caller is + responsible for checking this condition for performance reasons. + """ + if self._thread_id is None: + return + thread_id = threading.get_ident() + if thread_id != self._thread_id: + raise RuntimeError( + "Non-thread-safe operation invoked on an event loop other " + "than the current one") + + def call_soon_threadsafe(self, callback, *args, context=None): + """Like call_soon(), but thread-safe.""" + self._check_closed() + if self._debug: + self._check_callback(callback, 'call_soon_threadsafe') + handle = self._call_soon(callback, args, context) + if handle._source_traceback: + del handle._source_traceback[-1] + self._write_to_self() + return handle + + def run_in_executor(self, executor, func, *args): + self._check_closed() + if self._debug: + self._check_callback(func, 'run_in_executor') + if executor is None: + executor = self._default_executor + # Only check when the default executor is being used + self._check_default_executor() + if executor is None: + executor = concurrent.futures.ThreadPoolExecutor( + thread_name_prefix='asyncio' + ) + self._default_executor = executor + return futures.wrap_future( + executor.submit(func, *args), loop=self) + + def set_default_executor(self, executor): + if not isinstance(executor, concurrent.futures.ThreadPoolExecutor): + warnings.warn( + 'Using the default executor that is not an instance of ' + 'ThreadPoolExecutor is deprecated and will be prohibited ' + 'in Python 3.9', + DeprecationWarning, 2) + self._default_executor = executor + + def _getaddrinfo_debug(self, host, port, family, type, proto, flags): + msg = [f"{host}:{port!r}"] + if family: + msg.append(f'family={family!r}') + if type: + msg.append(f'type={type!r}') + if proto: + msg.append(f'proto={proto!r}') + if flags: + msg.append(f'flags={flags!r}') + msg = ', '.join(msg) + logger.debug('Get address info %s', msg) + + t0 = self.time() + addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags) + dt = self.time() - t0 + + msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}' + if dt >= self.slow_callback_duration: + logger.info(msg) + else: + logger.debug(msg) + return addrinfo + + async def getaddrinfo(self, host, port, *, + family=0, type=0, proto=0, flags=0): + if self._debug: + getaddr_func = self._getaddrinfo_debug + else: + getaddr_func = socket.getaddrinfo + + return await self.run_in_executor( + None, getaddr_func, host, port, family, type, proto, flags) + + async def getnameinfo(self, sockaddr, flags=0): + return await self.run_in_executor( + None, socket.getnameinfo, sockaddr, flags) + + async def sock_sendfile(self, sock, file, offset=0, count=None, + *, fallback=True): + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + _check_ssl_socket(sock) + self._check_sendfile_params(sock, file, offset, count) + try: + return await self._sock_sendfile_native(sock, file, + offset, count) + except exceptions.SendfileNotAvailableError as exc: + if not fallback: + raise + return await self._sock_sendfile_fallback(sock, file, + offset, count) + + async def _sock_sendfile_native(self, sock, file, offset, count): + # NB: sendfile syscall is not supported for SSL sockets and + # non-mmap files even if sendfile is supported by OS + raise exceptions.SendfileNotAvailableError( + f"syscall sendfile is not available for socket {sock!r} " + f"and file {file!r} combination") + + async def _sock_sendfile_fallback(self, sock, file, offset, count): + if offset: + file.seek(offset) + blocksize = ( + min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE) + if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE + ) + buf = bytearray(blocksize) + total_sent = 0 + try: + while True: + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + break + view = memoryview(buf)[:blocksize] + read = await self.run_in_executor(None, file.readinto, view) + if not read: + break # EOF + await self.sock_sendall(sock, view[:read]) + total_sent += read + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset + total_sent) + + def _check_sendfile_params(self, sock, file, offset, count): + if 'b' not in getattr(file, 'mode', 'b'): + raise ValueError("file should be opened in binary mode") + if not sock.type == socket.SOCK_STREAM: + raise ValueError("only SOCK_STREAM type sockets are supported") + if count is not None: + if not isinstance(count, int): + raise TypeError( + "count must be a positive integer (got {!r})".format(count)) + if count <= 0: + raise ValueError( + "count must be a positive integer (got {!r})".format(count)) + if not isinstance(offset, int): + raise TypeError( + "offset must be a non-negative integer (got {!r})".format( + offset)) + if offset < 0: + raise ValueError( + "offset must be a non-negative integer (got {!r})".format( + offset)) + + async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None): + """Create, bind and connect one socket.""" + my_exceptions = [] + exceptions.append(my_exceptions) + family, type_, proto, _, address = addr_info + sock = None + try: + sock = socket.socket(family=family, type=type_, proto=proto) + sock.setblocking(False) + if local_addr_infos is not None: + for lfamily, _, _, _, laddr in local_addr_infos: + # skip local addresses of different family + if lfamily != family: + continue + try: + sock.bind(laddr) + break + except OSError as exc: + msg = ( + f'error while attempting to bind on ' + f'address {laddr!r}: ' + f'{exc.strerror.lower()}' + ) + exc = OSError(exc.errno, msg) + my_exceptions.append(exc) + else: # all bind attempts failed + if my_exceptions: + raise my_exceptions.pop() + else: + raise OSError(f"no matching local address with {family=} found") + await self.sock_connect(sock, address) + return sock + except OSError as exc: + my_exceptions.append(exc) + if sock is not None: + sock.close() + raise + except: + if sock is not None: + sock.close() + raise + finally: + exceptions = my_exceptions = None + + async def create_connection( + self, protocol_factory, host=None, port=None, + *, ssl=None, family=0, + proto=0, flags=0, sock=None, + local_addr=None, server_hostname=None, + ssl_handshake_timeout=None, + happy_eyeballs_delay=None, interleave=None): + """Connect to a TCP server. + + Create a streaming transport connection to a given internet host and + port: socket family AF_INET or socket.AF_INET6 depending on host (or + family if specified), socket type SOCK_STREAM. protocol_factory must be + a callable returning a protocol instance. + + This method is a coroutine which will try to establish the connection + in the background. When successful, the coroutine returns a + (transport, protocol) pair. + """ + if server_hostname is not None and not ssl: + raise ValueError('server_hostname is only meaningful with ssl') + + if server_hostname is None and ssl: + # Use host as default for server_hostname. It is an error + # if host is empty or not set, e.g. when an + # already-connected socket was passed or when only a port + # is given. To avoid this error, you can pass + # server_hostname='' -- this will bypass the hostname + # check. (This also means that if host is a numeric + # IP/IPv6 address, we will attempt to verify that exact + # address; this will probably fail, but it is possible to + # create a certificate for a specific IP address, so we + # don't judge it here.) + if not host: + raise ValueError('You must set server_hostname ' + 'when using ssl without a host') + server_hostname = host + + if ssl_handshake_timeout is not None and not ssl: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if sock is not None: + _check_ssl_socket(sock) + + if happy_eyeballs_delay is not None and interleave is None: + # If using happy eyeballs, default to interleave addresses by family + interleave = 1 + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + infos = await self._ensure_resolved( + (host, port), family=family, + type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self) + if not infos: + raise OSError('getaddrinfo() returned empty list') + + if local_addr is not None: + laddr_infos = await self._ensure_resolved( + local_addr, family=family, + type=socket.SOCK_STREAM, proto=proto, + flags=flags, loop=self) + if not laddr_infos: + raise OSError('getaddrinfo() returned empty list') + else: + laddr_infos = None + + if interleave: + infos = _interleave_addrinfos(infos, interleave) + + exceptions = [] + if happy_eyeballs_delay is None: + # not using happy eyeballs + for addrinfo in infos: + try: + sock = await self._connect_sock( + exceptions, addrinfo, laddr_infos) + break + except OSError: + continue + else: # using happy eyeballs + sock, _, _ = await staggered.staggered_race( + (functools.partial(self._connect_sock, + exceptions, addrinfo, laddr_infos) + for addrinfo in infos), + happy_eyeballs_delay, loop=self) + + if sock is None: + exceptions = [exc for sub in exceptions for exc in sub] + try: + if len(exceptions) == 1: + raise exceptions[0] + else: + # If they all have the same str(), raise one. + model = str(exceptions[0]) + if all(str(exc) == model for exc in exceptions): + raise exceptions[0] + # Raise a combined exception so the user can see all + # the various error messages. + raise OSError('Multiple exceptions: {}'.format( + ', '.join(str(exc) for exc in exceptions))) + finally: + exceptions = None + + else: + if sock is None: + raise ValueError( + 'host and port was not specified and no sock specified') + if sock.type != socket.SOCK_STREAM: + # We allow AF_INET, AF_INET6, AF_UNIX as long as they + # are SOCK_STREAM. + # We support passing AF_UNIX sockets even though we have + # a dedicated API for that: create_unix_connection. + # Disallowing AF_UNIX in this method, breaks backwards + # compatibility. + raise ValueError( + f'A Stream Socket was expected, got {sock!r}') + + transport, protocol = await self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r connected to %s:%r: (%r, %r)", + sock, host, port, transport, protocol) + return transport, protocol + + async def _create_connection_transport( + self, sock, protocol_factory, ssl, + server_hostname, server_side=False, + ssl_handshake_timeout=None): + + sock.setblocking(False) + + protocol = protocol_factory() + waiter = self.create_future() + if ssl: + sslcontext = None if isinstance(ssl, bool) else ssl + transport = self._make_ssl_transport( + sock, protocol, sslcontext, waiter, + server_side=server_side, server_hostname=server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + else: + transport = self._make_socket_transport(sock, protocol, waiter) + + try: + await waiter + except: + transport.close() + raise + + return transport, protocol + + async def sendfile(self, transport, file, offset=0, count=None, + *, fallback=True): + """Send a file to transport. + + Return the total number of bytes which were sent. + + The method uses high-performance os.sendfile if available. + + file must be a regular file object opened in binary mode. + + offset tells from where to start reading the file. If specified, + count is the total number of bytes to transmit as opposed to + sending the file until EOF is reached. File position is updated on + return or also in case of error in which case file.tell() + can be used to figure out the number of bytes + which were sent. + + fallback set to True makes asyncio to manually read and send + the file when the platform does not support the sendfile syscall + (e.g. Windows or SSL socket on Unix). + + Raise SendfileNotAvailableError if the system does not support + sendfile syscall and fallback is False. + """ + if transport.is_closing(): + raise RuntimeError("Transport is closing") + mode = getattr(transport, '_sendfile_compatible', + constants._SendfileMode.UNSUPPORTED) + if mode is constants._SendfileMode.UNSUPPORTED: + raise RuntimeError( + f"sendfile is not supported for transport {transport!r}") + if mode is constants._SendfileMode.TRY_NATIVE: + try: + return await self._sendfile_native(transport, file, + offset, count) + except exceptions.SendfileNotAvailableError as exc: + if not fallback: + raise + + if not fallback: + raise RuntimeError( + f"fallback is disabled and native sendfile is not " + f"supported for transport {transport!r}") + + return await self._sendfile_fallback(transport, file, + offset, count) + + async def _sendfile_native(self, transp, file, offset, count): + raise exceptions.SendfileNotAvailableError( + "sendfile syscall is not supported") + + async def _sendfile_fallback(self, transp, file, offset, count): + if offset: + file.seek(offset) + blocksize = min(count, 16384) if count else 16384 + buf = bytearray(blocksize) + total_sent = 0 + proto = _SendfileFallbackProtocol(transp) + try: + while True: + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + return total_sent + view = memoryview(buf)[:blocksize] + read = await self.run_in_executor(None, file.readinto, view) + if not read: + return total_sent # EOF + await proto.drain() + transp.write(view[:read]) + total_sent += read + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset + total_sent) + await proto.restore() + + async def start_tls(self, transport, protocol, sslcontext, *, + server_side=False, + server_hostname=None, + ssl_handshake_timeout=None): + """Upgrade transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + """ + if ssl is None: + raise RuntimeError('Python ssl module is not available') + + if not isinstance(sslcontext, ssl.SSLContext): + raise TypeError( + f'sslcontext is expected to be an instance of ssl.SSLContext, ' + f'got {sslcontext!r}') + + if not getattr(transport, '_start_tls_compatible', False): + raise TypeError( + f'transport {transport!r} is not supported by start_tls()') + + waiter = self.create_future() + ssl_protocol = sslproto.SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout, + call_connection_made=False) + + # Pause early so that "ssl_protocol.data_received()" doesn't + # have a chance to get called before "ssl_protocol.connection_made()". + transport.pause_reading() + + transport.set_protocol(ssl_protocol) + conmade_cb = self.call_soon(ssl_protocol.connection_made, transport) + resume_cb = self.call_soon(transport.resume_reading) + + try: + await waiter + except BaseException: + transport.close() + conmade_cb.cancel() + resume_cb.cancel() + raise + + return ssl_protocol._app_transport + + async def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, *, + family=0, proto=0, flags=0, + reuse_address=_unset, reuse_port=None, + allow_broadcast=None, sock=None): + """Create datagram connection.""" + if sock is not None: + if sock.type != socket.SOCK_DGRAM: + raise ValueError( + f'A UDP Socket was expected, got {sock!r}') + if (local_addr or remote_addr or + family or proto or flags or + reuse_port or allow_broadcast): + # show the problematic kwargs in exception msg + opts = dict(local_addr=local_addr, remote_addr=remote_addr, + family=family, proto=proto, flags=flags, + reuse_address=reuse_address, reuse_port=reuse_port, + allow_broadcast=allow_broadcast) + problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v) + raise ValueError( + f'socket modifier keyword arguments can not be used ' + f'when sock is specified. ({problems})') + sock.setblocking(False) + r_addr = None + else: + if not (local_addr or remote_addr): + if family == 0: + raise ValueError('unexpected address family') + addr_pairs_info = (((family, proto), (None, None)),) + elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX: + for addr in (local_addr, remote_addr): + if addr is not None and not isinstance(addr, str): + raise TypeError('string is expected') + + if local_addr and local_addr[0] not in (0, '\x00'): + try: + if stat.S_ISSOCK(os.stat(local_addr).st_mode): + os.remove(local_addr) + except FileNotFoundError: + pass + except OSError as err: + # Directory may have permissions only to create socket. + logger.error('Unable to check or remove stale UNIX ' + 'socket %r: %r', + local_addr, err) + + addr_pairs_info = (((family, proto), + (local_addr, remote_addr)), ) + else: + # join address by (family, protocol) + addr_infos = {} # Using order preserving dict + for idx, addr in ((0, local_addr), (1, remote_addr)): + if addr is not None: + assert isinstance(addr, tuple) and len(addr) == 2, ( + '2-tuple is expected') + + infos = await self._ensure_resolved( + addr, family=family, type=socket.SOCK_DGRAM, + proto=proto, flags=flags, loop=self) + if not infos: + raise OSError('getaddrinfo() returned empty list') + + for fam, _, pro, _, address in infos: + key = (fam, pro) + if key not in addr_infos: + addr_infos[key] = [None, None] + addr_infos[key][idx] = address + + # each addr has to have info for each (family, proto) pair + addr_pairs_info = [ + (key, addr_pair) for key, addr_pair in addr_infos.items() + if not ((local_addr and addr_pair[0] is None) or + (remote_addr and addr_pair[1] is None))] + + if not addr_pairs_info: + raise ValueError('can not get address information') + + exceptions = [] + + # bpo-37228 + if reuse_address is not _unset: + if reuse_address: + raise ValueError("Passing `reuse_address=True` is no " + "longer supported, as the usage of " + "SO_REUSEPORT in UDP poses a significant " + "security concern.") + else: + warnings.warn("The *reuse_address* parameter has been " + "deprecated as of 3.5.10 and is scheduled " + "for removal in 3.11.", DeprecationWarning, + stacklevel=2) + + for ((family, proto), + (local_address, remote_address)) in addr_pairs_info: + sock = None + r_addr = None + try: + sock = socket.socket( + family=family, type=socket.SOCK_DGRAM, proto=proto) + if reuse_port: + _set_reuseport(sock) + if allow_broadcast: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_BROADCAST, 1) + sock.setblocking(False) + + if local_addr: + sock.bind(local_address) + if remote_addr: + if not allow_broadcast: + await self.sock_connect(sock, remote_address) + r_addr = remote_address + except OSError as exc: + if sock is not None: + sock.close() + exceptions.append(exc) + except: + if sock is not None: + sock.close() + raise + else: + break + else: + raise exceptions[0] + + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_datagram_transport( + sock, protocol, r_addr, waiter) + if self._debug: + if local_addr: + logger.info("Datagram endpoint local_addr=%r remote_addr=%r " + "created: (%r, %r)", + local_addr, remote_addr, transport, protocol) + else: + logger.debug("Datagram endpoint remote_addr=%r created: " + "(%r, %r)", + remote_addr, transport, protocol) + + try: + await waiter + except: + transport.close() + raise + + return transport, protocol + + async def _ensure_resolved(self, address, *, + family=0, type=socket.SOCK_STREAM, + proto=0, flags=0, loop): + host, port = address[:2] + info = _ipaddr_info(host, port, family, type, proto, *address[2:]) + if info is not None: + # "host" is already a resolved IP. + return [info] + else: + return await loop.getaddrinfo(host, port, family=family, type=type, + proto=proto, flags=flags) + + async def _create_server_getaddrinfo(self, host, port, family, flags): + infos = await self._ensure_resolved((host, port), family=family, + type=socket.SOCK_STREAM, + flags=flags, loop=self) + if not infos: + raise OSError(f'getaddrinfo({host!r}) returned empty list') + return infos + + async def create_server( + self, protocol_factory, host=None, port=None, + *, + family=socket.AF_UNSPEC, + flags=socket.AI_PASSIVE, + sock=None, + backlog=100, + ssl=None, + reuse_address=None, + reuse_port=None, + ssl_handshake_timeout=None, + start_serving=True): + """Create a TCP server. + + The host parameter can be a string, in that case the TCP server is + bound to host and port. + + The host parameter can also be a sequence of strings and in that case + the TCP server is bound to all hosts of the sequence. If a host + appears multiple times (possibly indirectly e.g. when hostnames + resolve to the same IP address), the server is only bound once to that + host. + + Return a Server object which can be used to stop the service. + + This method is a coroutine. + """ + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + + if ssl_handshake_timeout is not None and ssl is None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if sock is not None: + _check_ssl_socket(sock) + + if host is not None or port is not None: + if sock is not None: + raise ValueError( + 'host/port and sock can not be specified at the same time') + + if reuse_address is None: + reuse_address = os.name == 'posix' and sys.platform != 'cygwin' + sockets = [] + if host == '': + hosts = [None] + elif (isinstance(host, str) or + not isinstance(host, collections.abc.Iterable)): + hosts = [host] + else: + hosts = host + + fs = [self._create_server_getaddrinfo(host, port, family=family, + flags=flags) + for host in hosts] + infos = await tasks.gather(*fs) + infos = set(itertools.chain.from_iterable(infos)) + + completed = False + try: + for res in infos: + af, socktype, proto, canonname, sa = res + try: + sock = socket.socket(af, socktype, proto) + except socket.error: + # Assume it's a bad family/type/protocol combination. + if self._debug: + logger.warning('create_server() failed to create ' + 'socket.socket(%r, %r, %r)', + af, socktype, proto, exc_info=True) + continue + sockets.append(sock) + if reuse_address: + sock.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + if reuse_port: + _set_reuseport(sock) + # Disable IPv4/IPv6 dual stack support (enabled by + # default on Linux) which makes a single socket + # listen on both address families. + if (_HAS_IPv6 and + af == socket.AF_INET6 and + hasattr(socket, 'IPPROTO_IPV6')): + sock.setsockopt(socket.IPPROTO_IPV6, + socket.IPV6_V6ONLY, + True) + try: + sock.bind(sa) + except OSError as err: + raise OSError(err.errno, 'error while attempting ' + 'to bind on address %r: %s' + % (sa, err.strerror.lower())) from None + completed = True + finally: + if not completed: + for sock in sockets: + sock.close() + else: + if sock is None: + raise ValueError('Neither host/port nor sock were specified') + if sock.type != socket.SOCK_STREAM: + raise ValueError(f'A Stream Socket was expected, got {sock!r}') + sockets = [sock] + + for sock in sockets: + sock.setblocking(False) + + server = Server(self, sockets, protocol_factory, + ssl, backlog, ssl_handshake_timeout) + if start_serving: + server._start_serving() + # Skip one loop iteration so that all 'loop.add_reader' + # go through. + await tasks.sleep(0) + + if self._debug: + logger.info("%r is serving", server) + return server + + async def connect_accepted_socket( + self, protocol_factory, sock, + *, ssl=None, + ssl_handshake_timeout=None): + if sock.type != socket.SOCK_STREAM: + raise ValueError(f'A Stream Socket was expected, got {sock!r}') + + if ssl_handshake_timeout is not None and not ssl: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if sock is not None: + _check_ssl_socket(sock) + + transport, protocol = await self._create_connection_transport( + sock, protocol_factory, ssl, '', server_side=True, + ssl_handshake_timeout=ssl_handshake_timeout) + if self._debug: + # Get the socket from the transport because SSL transport closes + # the old socket and creates a new SSL socket + sock = transport.get_extra_info('socket') + logger.debug("%r handled: (%r, %r)", sock, transport, protocol) + return transport, protocol + + async def connect_read_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_read_pipe_transport(pipe, protocol, waiter) + + try: + await waiter + except: + transport.close() + raise + + if self._debug: + logger.debug('Read pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + return transport, protocol + + async def connect_write_pipe(self, protocol_factory, pipe): + protocol = protocol_factory() + waiter = self.create_future() + transport = self._make_write_pipe_transport(pipe, protocol, waiter) + + try: + await waiter + except: + transport.close() + raise + + if self._debug: + logger.debug('Write pipe %r connected: (%r, %r)', + pipe.fileno(), transport, protocol) + return transport, protocol + + def _log_subprocess(self, msg, stdin, stdout, stderr): + info = [msg] + if stdin is not None: + info.append(f'stdin={_format_pipe(stdin)}') + if stdout is not None and stderr == subprocess.STDOUT: + info.append(f'stdout=stderr={_format_pipe(stdout)}') + else: + if stdout is not None: + info.append(f'stdout={_format_pipe(stdout)}') + if stderr is not None: + info.append(f'stderr={_format_pipe(stderr)}') + logger.debug(' '.join(info)) + + async def subprocess_shell(self, protocol_factory, cmd, *, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=False, + shell=True, bufsize=0, + encoding=None, errors=None, text=None, + **kwargs): + if not isinstance(cmd, (bytes, str)): + raise ValueError("cmd must be a string") + if universal_newlines: + raise ValueError("universal_newlines must be False") + if not shell: + raise ValueError("shell must be True") + if bufsize != 0: + raise ValueError("bufsize must be 0") + if text: + raise ValueError("text must be False") + if encoding is not None: + raise ValueError("encoding must be None") + if errors is not None: + raise ValueError("errors must be None") + + protocol = protocol_factory() + debug_log = None + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = 'run shell command %r' % cmd + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = await self._make_subprocess_transport( + protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs) + if self._debug and debug_log is not None: + logger.info('%s: %r', debug_log, transport) + return transport, protocol + + async def subprocess_exec(self, protocol_factory, program, *args, + stdin=subprocess.PIPE, stdout=subprocess.PIPE, + stderr=subprocess.PIPE, universal_newlines=False, + shell=False, bufsize=0, + encoding=None, errors=None, text=None, + **kwargs): + if universal_newlines: + raise ValueError("universal_newlines must be False") + if shell: + raise ValueError("shell must be False") + if bufsize != 0: + raise ValueError("bufsize must be 0") + if text: + raise ValueError("text must be False") + if encoding is not None: + raise ValueError("encoding must be None") + if errors is not None: + raise ValueError("errors must be None") + + popen_args = (program,) + args + protocol = protocol_factory() + debug_log = None + if self._debug: + # don't log parameters: they may contain sensitive information + # (password) and may be too long + debug_log = f'execute program {program!r}' + self._log_subprocess(debug_log, stdin, stdout, stderr) + transport = await self._make_subprocess_transport( + protocol, popen_args, False, stdin, stdout, stderr, + bufsize, **kwargs) + if self._debug and debug_log is not None: + logger.info('%s: %r', debug_log, transport) + return transport, protocol + + def get_exception_handler(self): + """Return an exception handler, or None if the default one is in use. + """ + return self._exception_handler + + def set_exception_handler(self, handler): + """Set handler as the new event loop exception handler. + + If handler is None, the default exception handler will + be set. + + If handler is a callable object, it should have a + signature matching '(loop, context)', where 'loop' + will be a reference to the active event loop, 'context' + will be a dict object (see `call_exception_handler()` + documentation for details about context). + """ + if handler is not None and not callable(handler): + raise TypeError(f'A callable object or None is expected, ' + f'got {handler!r}') + self._exception_handler = handler + + def default_exception_handler(self, context): + """Default exception handler. + + This is called when an exception occurs and no exception + handler is set, and can be called by a custom exception + handler that wants to defer to the default behavior. + + This default handler logs the error message and other + context-dependent information. In debug mode, a truncated + stack trace is also appended showing where the given object + (e.g. a handle or future or task) was created, if any. + + The context parameter has the same meaning as in + `call_exception_handler()`. + """ + message = context.get('message') + if not message: + message = 'Unhandled exception in event loop' + + exception = context.get('exception') + if exception is not None: + exc_info = (type(exception), exception, exception.__traceback__) + else: + exc_info = False + + if ('source_traceback' not in context and + self._current_handle is not None and + self._current_handle._source_traceback): + context['handle_traceback'] = \ + self._current_handle._source_traceback + + log_lines = [message] + for key in sorted(context): + if key in {'message', 'exception'}: + continue + value = context[key] + if key == 'source_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Object created at (most recent call last):\n' + value += tb.rstrip() + elif key == 'handle_traceback': + tb = ''.join(traceback.format_list(value)) + value = 'Handle created at (most recent call last):\n' + value += tb.rstrip() + else: + value = repr(value) + log_lines.append(f'{key}: {value}') + + logger.error('\n'.join(log_lines), exc_info=exc_info) + + def call_exception_handler(self, context): + """Call the current event loop's exception handler. + + The context argument is a dict containing the following keys: + + - 'message': Error message; + - 'exception' (optional): Exception object; + - 'future' (optional): Future instance; + - 'task' (optional): Task instance; + - 'handle' (optional): Handle instance; + - 'protocol' (optional): Protocol instance; + - 'transport' (optional): Transport instance; + - 'socket' (optional): Socket instance; + - 'asyncgen' (optional): Asynchronous generator that caused + the exception. + + New keys maybe introduced in the future. + + Note: do not overload this method in an event loop subclass. + For custom exception handling, use the + `set_exception_handler()` method. + """ + if self._exception_handler is None: + try: + self.default_exception_handler(context) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + # Second protection layer for unexpected errors + # in the default implementation, as well as for subclassed + # event loops with overloaded "default_exception_handler". + logger.error('Exception in default exception handler', + exc_info=True) + else: + try: + self._exception_handler(self, context) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + # Exception in the user set custom exception handler. + try: + # Let's try default handler. + self.default_exception_handler({ + 'message': 'Unhandled error in exception handler', + 'exception': exc, + 'context': context, + }) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + # Guard 'default_exception_handler' in case it is + # overloaded. + logger.error('Exception in default exception handler ' + 'while handling an unexpected error ' + 'in custom exception handler', + exc_info=True) + + def _add_callback(self, handle): + """Add a Handle to _ready.""" + if not handle._cancelled: + self._ready.append(handle) + + def _add_callback_signalsafe(self, handle): + """Like _add_callback() but called from a signal handler.""" + self._add_callback(handle) + self._write_to_self() + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + if handle._scheduled: + self._timer_cancelled_count += 1 + + def _run_once(self): + """Run one full iteration of the event loop. + + This calls all currently ready callbacks, polls for I/O, + schedules the resulting callbacks, and finally schedules + 'call_later' callbacks. + """ + + sched_count = len(self._scheduled) + if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and + self._timer_cancelled_count / sched_count > + _MIN_CANCELLED_TIMER_HANDLES_FRACTION): + # Remove delayed calls that were cancelled if their number + # is too high + new_scheduled = [] + for handle in self._scheduled: + if handle._cancelled: + handle._scheduled = False + else: + new_scheduled.append(handle) + + heapq.heapify(new_scheduled) + self._scheduled = new_scheduled + self._timer_cancelled_count = 0 + else: + # Remove delayed calls that were cancelled from head of queue. + while self._scheduled and self._scheduled[0]._cancelled: + self._timer_cancelled_count -= 1 + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + + timeout = None + if self._ready or self._stopping: + timeout = 0 + elif self._scheduled: + # Compute the desired timeout. + when = self._scheduled[0]._when + timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT) + + event_list = self._selector.select(timeout) + self._process_events(event_list) + # Needed to break cycles when an exception occurs. + event_list = None + + # Handle 'later' callbacks that are ready. + end_time = self.time() + self._clock_resolution + while self._scheduled: + handle = self._scheduled[0] + if handle._when >= end_time: + break + handle = heapq.heappop(self._scheduled) + handle._scheduled = False + self._ready.append(handle) + + # This is the only place where callbacks are actually *called*. + # All other places just add them to ready. + # Note: We run all currently scheduled callbacks, but not any + # callbacks scheduled by callbacks run this time around -- + # they will be run the next time (after another I/O poll). + # Use an idiom that is thread-safe without using locks. + ntodo = len(self._ready) + for i in range(ntodo): + handle = self._ready.popleft() + if handle._cancelled: + continue + if self._debug: + try: + self._current_handle = handle + t0 = self.time() + handle._run() + dt = self.time() - t0 + if dt >= self.slow_callback_duration: + logger.warning('Executing %s took %.3f seconds', + _format_handle(handle), dt) + finally: + self._current_handle = None + else: + handle._run() + handle = None # Needed to break cycles when an exception occurs. + + def _set_coroutine_origin_tracking(self, enabled): + if bool(enabled) == bool(self._coroutine_origin_tracking_enabled): + return + + if enabled: + self._coroutine_origin_tracking_saved_depth = ( + sys.get_coroutine_origin_tracking_depth()) + sys.set_coroutine_origin_tracking_depth( + constants.DEBUG_STACK_DEPTH) + else: + sys.set_coroutine_origin_tracking_depth( + self._coroutine_origin_tracking_saved_depth) + + self._coroutine_origin_tracking_enabled = enabled + + def get_debug(self): + return self._debug + + def set_debug(self, enabled): + self._debug = enabled + + if self.is_running(): + self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled) diff --git a/python310/asyncio/base_futures.py b/python310/asyncio/base_futures.py new file mode 100644 index 0000000000000000000000000000000000000000..2c01ac98e10fcaa4ac0b88a2f196dc6450769bf7 --- /dev/null +++ b/python310/asyncio/base_futures.py @@ -0,0 +1,80 @@ +__all__ = () + +import reprlib +from _thread import get_ident + +from . import format_helpers + +# States for Future. +_PENDING = 'PENDING' +_CANCELLED = 'CANCELLED' +_FINISHED = 'FINISHED' + + +def isfuture(obj): + """Check for a Future. + + This returns True when obj is a Future instance or is advertising + itself as duck-type compatible by setting _asyncio_future_blocking. + See comment in Future for more details. + """ + return (hasattr(obj.__class__, '_asyncio_future_blocking') and + obj._asyncio_future_blocking is not None) + + +def _format_callbacks(cb): + """helper function for Future.__repr__""" + size = len(cb) + if not size: + cb = '' + + def format_cb(callback): + return format_helpers._format_callback_source(callback, ()) + + if size == 1: + cb = format_cb(cb[0][0]) + elif size == 2: + cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0])) + elif size > 2: + cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]), + size - 2, + format_cb(cb[-1][0])) + return f'cb=[{cb}]' + + +# bpo-42183: _repr_running is needed for repr protection +# when a Future or Task result contains itself directly or indirectly. +# The logic is borrowed from @reprlib.recursive_repr decorator. +# Unfortunately, the direct decorator usage is impossible because of +# AttributeError: '_asyncio.Task' object has no attribute '__module__' error. +# +# After fixing this thing we can return to the decorator based approach. +_repr_running = set() + + +def _future_repr_info(future): + # (Future) -> str + """helper function for Future.__repr__""" + info = [future._state.lower()] + if future._state == _FINISHED: + if future._exception is not None: + info.append(f'exception={future._exception!r}') + else: + key = id(future), get_ident() + if key in _repr_running: + result = '...' + else: + _repr_running.add(key) + try: + # use reprlib to limit the length of the output, especially + # for very long strings + result = reprlib.repr(future._result) + finally: + _repr_running.discard(key) + info.append(f'result={result}') + if future._callbacks: + info.append(_format_callbacks(future._callbacks)) + if future._source_traceback: + frame = future._source_traceback[-1] + info.append(f'created at {frame[0]}:{frame[1]}') + return info diff --git a/python310/asyncio/base_subprocess.py b/python310/asyncio/base_subprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..14d505192288142616cb28e865dd53850df2d7d5 --- /dev/null +++ b/python310/asyncio/base_subprocess.py @@ -0,0 +1,285 @@ +import collections +import subprocess +import warnings + +from . import protocols +from . import transports +from .log import logger + + +class BaseSubprocessTransport(transports.SubprocessTransport): + + def __init__(self, loop, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=None, extra=None, **kwargs): + super().__init__(extra) + self._closed = False + self._protocol = protocol + self._loop = loop + self._proc = None + self._pid = None + self._returncode = None + self._exit_waiters = [] + self._pending_calls = collections.deque() + self._pipes = {} + self._finished = False + + if stdin == subprocess.PIPE: + self._pipes[0] = None + if stdout == subprocess.PIPE: + self._pipes[1] = None + if stderr == subprocess.PIPE: + self._pipes[2] = None + + # Create the child process: set the _proc attribute + try: + self._start(args=args, shell=shell, stdin=stdin, stdout=stdout, + stderr=stderr, bufsize=bufsize, **kwargs) + except: + self.close() + raise + + self._pid = self._proc.pid + self._extra['subprocess'] = self._proc + + if self._loop.get_debug(): + if isinstance(args, (bytes, str)): + program = args + else: + program = args[0] + logger.debug('process %r created: pid %s', + program, self._pid) + + self._loop.create_task(self._connect_pipes(waiter)) + + def __repr__(self): + info = [self.__class__.__name__] + if self._closed: + info.append('closed') + if self._pid is not None: + info.append(f'pid={self._pid}') + if self._returncode is not None: + info.append(f'returncode={self._returncode}') + elif self._pid is not None: + info.append('running') + else: + info.append('not started') + + stdin = self._pipes.get(0) + if stdin is not None: + info.append(f'stdin={stdin.pipe}') + + stdout = self._pipes.get(1) + stderr = self._pipes.get(2) + if stdout is not None and stderr is stdout: + info.append(f'stdout=stderr={stdout.pipe}') + else: + if stdout is not None: + info.append(f'stdout={stdout.pipe}') + if stderr is not None: + info.append(f'stderr={stderr.pipe}') + + return '<{}>'.format(' '.join(info)) + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + raise NotImplementedError + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closed + + def close(self): + if self._closed: + return + self._closed = True + + for proto in self._pipes.values(): + if proto is None: + continue + proto.pipe.close() + + if (self._proc is not None and + # has the child process finished? + self._returncode is None and + # the child process has finished, but the + # transport hasn't been notified yet? + self._proc.poll() is None): + + if self._loop.get_debug(): + logger.warning('Close running child process: kill %r', self) + + try: + self._proc.kill() + except ProcessLookupError: + pass + + # Don't clear the _proc reference yet: _post_init() may still run + + def __del__(self, _warn=warnings.warn): + if not self._closed: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self.close() + + def get_pid(self): + return self._pid + + def get_returncode(self): + return self._returncode + + def get_pipe_transport(self, fd): + if fd in self._pipes: + return self._pipes[fd].pipe + else: + return None + + def _check_proc(self): + if self._proc is None: + raise ProcessLookupError() + + def send_signal(self, signal): + self._check_proc() + self._proc.send_signal(signal) + + def terminate(self): + self._check_proc() + self._proc.terminate() + + def kill(self): + self._check_proc() + self._proc.kill() + + async def _connect_pipes(self, waiter): + try: + proc = self._proc + loop = self._loop + + if proc.stdin is not None: + _, pipe = await loop.connect_write_pipe( + lambda: WriteSubprocessPipeProto(self, 0), + proc.stdin) + self._pipes[0] = pipe + + if proc.stdout is not None: + _, pipe = await loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 1), + proc.stdout) + self._pipes[1] = pipe + + if proc.stderr is not None: + _, pipe = await loop.connect_read_pipe( + lambda: ReadSubprocessPipeProto(self, 2), + proc.stderr) + self._pipes[2] = pipe + + assert self._pending_calls is not None + + loop.call_soon(self._protocol.connection_made, self) + for callback, data in self._pending_calls: + loop.call_soon(callback, *data) + self._pending_calls = None + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if waiter is not None and not waiter.cancelled(): + waiter.set_exception(exc) + else: + if waiter is not None and not waiter.cancelled(): + waiter.set_result(None) + + def _call(self, cb, *data): + if self._pending_calls is not None: + self._pending_calls.append((cb, data)) + else: + self._loop.call_soon(cb, *data) + + def _pipe_connection_lost(self, fd, exc): + self._call(self._protocol.pipe_connection_lost, fd, exc) + self._try_finish() + + def _pipe_data_received(self, fd, data): + self._call(self._protocol.pipe_data_received, fd, data) + + def _process_exited(self, returncode): + assert returncode is not None, returncode + assert self._returncode is None, self._returncode + if self._loop.get_debug(): + logger.info('%r exited with return code %r', self, returncode) + self._returncode = returncode + if self._proc.returncode is None: + # asyncio uses a child watcher: copy the status into the Popen + # object. On Python 3.6, it is required to avoid a ResourceWarning. + self._proc.returncode = returncode + self._call(self._protocol.process_exited) + self._try_finish() + + # wake up futures waiting for wait() + for waiter in self._exit_waiters: + if not waiter.cancelled(): + waiter.set_result(returncode) + self._exit_waiters = None + + async def _wait(self): + """Wait until the process exit and return the process return code. + + This method is a coroutine.""" + if self._returncode is not None: + return self._returncode + + waiter = self._loop.create_future() + self._exit_waiters.append(waiter) + return await waiter + + def _try_finish(self): + assert not self._finished + if self._returncode is None: + return + if all(p is not None and p.disconnected + for p in self._pipes.values()): + self._finished = True + self._call(self._call_connection_lost, None) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._loop = None + self._proc = None + self._protocol = None + + +class WriteSubprocessPipeProto(protocols.BaseProtocol): + + def __init__(self, proc, fd): + self.proc = proc + self.fd = fd + self.pipe = None + self.disconnected = False + + def connection_made(self, transport): + self.pipe = transport + + def __repr__(self): + return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>' + + def connection_lost(self, exc): + self.disconnected = True + self.proc._pipe_connection_lost(self.fd, exc) + self.proc = None + + def pause_writing(self): + self.proc._protocol.pause_writing() + + def resume_writing(self): + self.proc._protocol.resume_writing() + + +class ReadSubprocessPipeProto(WriteSubprocessPipeProto, + protocols.Protocol): + + def data_received(self, data): + self.proc._pipe_data_received(self.fd, data) diff --git a/python310/asyncio/base_tasks.py b/python310/asyncio/base_tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..09bb171a2ce750a06a39e978bea0d3a7accf25f4 --- /dev/null +++ b/python310/asyncio/base_tasks.py @@ -0,0 +1,85 @@ +import linecache +import traceback + +from . import base_futures +from . import coroutines + + +def _task_repr_info(task): + info = base_futures._future_repr_info(task) + + if task._must_cancel: + # replace status + info[0] = 'cancelling' + + info.insert(1, 'name=%r' % task.get_name()) + + coro = coroutines._format_coroutine(task._coro) + info.insert(2, f'coro=<{coro}>') + + if task._fut_waiter is not None: + info.insert(3, f'wait_for={task._fut_waiter!r}') + return info + + +def _task_get_stack(task, limit): + frames = [] + if hasattr(task._coro, 'cr_frame'): + # case 1: 'async def' coroutines + f = task._coro.cr_frame + elif hasattr(task._coro, 'gi_frame'): + # case 2: legacy coroutines + f = task._coro.gi_frame + elif hasattr(task._coro, 'ag_frame'): + # case 3: async generators + f = task._coro.ag_frame + else: + # case 4: unknown objects + f = None + if f is not None: + while f is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(f) + f = f.f_back + frames.reverse() + elif task._exception is not None: + tb = task._exception.__traceback__ + while tb is not None: + if limit is not None: + if limit <= 0: + break + limit -= 1 + frames.append(tb.tb_frame) + tb = tb.tb_next + return frames + + +def _task_print_stack(task, limit, file): + extracted_list = [] + checked = set() + for f in task.get_stack(limit=limit): + lineno = f.f_lineno + co = f.f_code + filename = co.co_filename + name = co.co_name + if filename not in checked: + checked.add(filename) + linecache.checkcache(filename) + line = linecache.getline(filename, lineno, f.f_globals) + extracted_list.append((filename, lineno, name, line)) + + exc = task._exception + if not extracted_list: + print(f'No stack for {task!r}', file=file) + elif exc is not None: + print(f'Traceback for {task!r} (most recent call last):', file=file) + else: + print(f'Stack for {task!r} (most recent call last):', file=file) + + traceback.print_list(extracted_list, file=file) + if exc is not None: + for line in traceback.format_exception_only(exc.__class__, exc): + print(line, file=file, end='') diff --git a/python310/asyncio/constants.py b/python310/asyncio/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..33feed60e55b008104ec01f0ce6cbb9d573a8eb3 --- /dev/null +++ b/python310/asyncio/constants.py @@ -0,0 +1,27 @@ +import enum + +# After the connection is lost, log warnings after this many write()s. +LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5 + +# Seconds to wait before retrying accept(). +ACCEPT_RETRY_DELAY = 1 + +# Number of stack entries to capture in debug mode. +# The larger the number, the slower the operation in debug mode +# (see extract_stack() in format_helpers.py). +DEBUG_STACK_DEPTH = 10 + +# Number of seconds to wait for SSL handshake to complete +# The default timeout matches that of Nginx. +SSL_HANDSHAKE_TIMEOUT = 60.0 + +# Used in sendfile fallback code. We use fallback for platforms +# that don't support sendfile, or for TLS connections. +SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256 + +# The enum should be here to break circular dependencies between +# base_events and sslproto +class _SendfileMode(enum.Enum): + UNSUPPORTED = enum.auto() + TRY_NATIVE = enum.auto() + FALLBACK = enum.auto() diff --git a/python310/asyncio/coroutines.py b/python310/asyncio/coroutines.py new file mode 100644 index 0000000000000000000000000000000000000000..9664ea74d7514775fac6622d25d1d9dc175b92a9 --- /dev/null +++ b/python310/asyncio/coroutines.py @@ -0,0 +1,269 @@ +__all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine' + +import collections.abc +import functools +import inspect +import os +import sys +import traceback +import types +import warnings + +from . import base_futures +from . import constants +from . import format_helpers +from .log import logger + + +def _is_debug_mode(): + # If you set _DEBUG to true, @coroutine will wrap the resulting + # generator objects in a CoroWrapper instance (defined below). That + # instance will log a message when the generator is never iterated + # over, which may happen when you forget to use "await" or "yield from" + # with a coroutine call. + # Note that the value of the _DEBUG flag is taken + # when the decorator is used, so to be of any use it must be set + # before you define your coroutines. A downside of using this feature + # is that tracebacks show entries for the CoroWrapper.__next__ method + # when _DEBUG is true. + return sys.flags.dev_mode or (not sys.flags.ignore_environment and + bool(os.environ.get('PYTHONASYNCIODEBUG'))) + + +_DEBUG = _is_debug_mode() + + +class CoroWrapper: + # Wrapper for coroutine object in _DEBUG mode. + + def __init__(self, gen, func=None): + assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen + self.gen = gen + self.func = func # Used to unwrap @coroutine decorator + self._source_traceback = format_helpers.extract_stack(sys._getframe(1)) + self.__name__ = getattr(gen, '__name__', None) + self.__qualname__ = getattr(gen, '__qualname__', None) + + def __repr__(self): + coro_repr = _format_coroutine(self) + if self._source_traceback: + frame = self._source_traceback[-1] + coro_repr += f', created at {frame[0]}:{frame[1]}' + + return f'<{self.__class__.__name__} {coro_repr}>' + + def __iter__(self): + return self + + def __next__(self): + return self.gen.send(None) + + def send(self, value): + return self.gen.send(value) + + def throw(self, type, value=None, traceback=None): + return self.gen.throw(type, value, traceback) + + def close(self): + return self.gen.close() + + @property + def gi_frame(self): + return self.gen.gi_frame + + @property + def gi_running(self): + return self.gen.gi_running + + @property + def gi_code(self): + return self.gen.gi_code + + def __await__(self): + return self + + @property + def gi_yieldfrom(self): + return self.gen.gi_yieldfrom + + def __del__(self): + # Be careful accessing self.gen.frame -- self.gen might not exist. + gen = getattr(self, 'gen', None) + frame = getattr(gen, 'gi_frame', None) + if frame is not None and frame.f_lasti == -1: + msg = f'{self!r} was never yielded from' + tb = getattr(self, '_source_traceback', ()) + if tb: + tb = ''.join(traceback.format_list(tb)) + msg += (f'\nCoroutine object created at ' + f'(most recent call last, truncated to ' + f'{constants.DEBUG_STACK_DEPTH} last lines):\n') + msg += tb.rstrip() + logger.error(msg) + + +def coroutine(func): + """Decorator to mark coroutines. + + If the coroutine is not yielded from before it is destroyed, + an error message is logged. + """ + warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead', + DeprecationWarning, + stacklevel=2) + if inspect.iscoroutinefunction(func): + # In Python 3.5 that's all we need to do for coroutines + # defined with "async def". + return func + + if inspect.isgeneratorfunction(func): + coro = func + else: + @functools.wraps(func) + def coro(*args, **kw): + res = func(*args, **kw) + if (base_futures.isfuture(res) or inspect.isgenerator(res) or + isinstance(res, CoroWrapper)): + res = yield from res + else: + # If 'res' is an awaitable, run it. + try: + await_meth = res.__await__ + except AttributeError: + pass + else: + if isinstance(res, collections.abc.Awaitable): + res = yield from await_meth() + return res + + coro = types.coroutine(coro) + if not _DEBUG: + wrapper = coro + else: + @functools.wraps(func) + def wrapper(*args, **kwds): + w = CoroWrapper(coro(*args, **kwds), func=func) + if w._source_traceback: + del w._source_traceback[-1] + # Python < 3.5 does not implement __qualname__ + # on generator objects, so we set it manually. + # We use getattr as some callables (such as + # functools.partial may lack __qualname__). + w.__name__ = getattr(func, '__name__', None) + w.__qualname__ = getattr(func, '__qualname__', None) + return w + + wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction(). + return wrapper + + +# A marker for iscoroutinefunction. +_is_coroutine = object() + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function.""" + return (inspect.iscoroutinefunction(func) or + getattr(func, '_is_coroutine', None) is _is_coroutine) + + +# Prioritize native coroutine check to speed-up +# asyncio.iscoroutine. +_COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, + collections.abc.Coroutine, CoroWrapper) +_iscoroutine_typecache = set() + + +def iscoroutine(obj): + """Return True if obj is a coroutine object.""" + if type(obj) in _iscoroutine_typecache: + return True + + if isinstance(obj, _COROUTINE_TYPES): + # Just in case we don't want to cache more than 100 + # positive types. That shouldn't ever happen, unless + # someone stressing the system on purpose. + if len(_iscoroutine_typecache) < 100: + _iscoroutine_typecache.add(type(obj)) + return True + else: + return False + + +def _format_coroutine(coro): + assert iscoroutine(coro) + + is_corowrapper = isinstance(coro, CoroWrapper) + + def get_name(coro): + # Coroutines compiled with Cython sometimes don't have + # proper __qualname__ or __name__. While that is a bug + # in Cython, asyncio shouldn't crash with an AttributeError + # in its __repr__ functions. + if is_corowrapper: + return format_helpers._format_callback(coro.func, (), {}) + + if hasattr(coro, '__qualname__') and coro.__qualname__: + coro_name = coro.__qualname__ + elif hasattr(coro, '__name__') and coro.__name__: + coro_name = coro.__name__ + else: + # Stop masking Cython bugs, expose them in a friendly way. + coro_name = f'<{type(coro).__name__} without __name__>' + return f'{coro_name}()' + + def is_running(coro): + try: + return coro.cr_running + except AttributeError: + try: + return coro.gi_running + except AttributeError: + return False + + coro_code = None + if hasattr(coro, 'cr_code') and coro.cr_code: + coro_code = coro.cr_code + elif hasattr(coro, 'gi_code') and coro.gi_code: + coro_code = coro.gi_code + + coro_name = get_name(coro) + + if not coro_code: + # Built-in types might not have __qualname__ or __name__. + if is_running(coro): + return f'{coro_name} running' + else: + return coro_name + + coro_frame = None + if hasattr(coro, 'gi_frame') and coro.gi_frame: + coro_frame = coro.gi_frame + elif hasattr(coro, 'cr_frame') and coro.cr_frame: + coro_frame = coro.cr_frame + + # If Cython's coroutine has a fake code object without proper + # co_filename -- expose that. + filename = coro_code.co_filename or '' + + lineno = 0 + if (is_corowrapper and + coro.func is not None and + not inspect.isgeneratorfunction(coro.func)): + source = format_helpers._get_function_source(coro.func) + if source is not None: + filename, lineno = source + if coro_frame is None: + coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' + else: + coro_repr = f'{coro_name} running, defined at {filename}:{lineno}' + + elif coro_frame is not None: + lineno = coro_frame.f_lineno + coro_repr = f'{coro_name} running at {filename}:{lineno}' + + else: + lineno = coro_code.co_firstlineno + coro_repr = f'{coro_name} done, defined at {filename}:{lineno}' + + return coro_repr diff --git a/python310/asyncio/events.py b/python310/asyncio/events.py new file mode 100644 index 0000000000000000000000000000000000000000..e0882309edaa3c18703765a61abb92ac203a64dd --- /dev/null +++ b/python310/asyncio/events.py @@ -0,0 +1,819 @@ +"""Event loop and event loop policy.""" + +__all__ = ( + 'AbstractEventLoopPolicy', + 'AbstractEventLoop', 'AbstractServer', + 'Handle', 'TimerHandle', + 'get_event_loop_policy', 'set_event_loop_policy', + 'get_event_loop', 'set_event_loop', 'new_event_loop', + 'get_child_watcher', 'set_child_watcher', + '_set_running_loop', 'get_running_loop', + '_get_running_loop', +) + +import contextvars +import os +import socket +import subprocess +import sys +import threading + +from . import format_helpers + + +class Handle: + """Object returned by callback registration methods.""" + + __slots__ = ('_callback', '_args', '_cancelled', '_loop', + '_source_traceback', '_repr', '__weakref__', + '_context') + + def __init__(self, callback, args, loop, context=None): + if context is None: + context = contextvars.copy_context() + self._context = context + self._loop = loop + self._callback = callback + self._args = args + self._cancelled = False + self._repr = None + if self._loop.get_debug(): + self._source_traceback = format_helpers.extract_stack( + sys._getframe(1)) + else: + self._source_traceback = None + + def _repr_info(self): + info = [self.__class__.__name__] + if self._cancelled: + info.append('cancelled') + if self._callback is not None: + info.append(format_helpers._format_callback_source( + self._callback, self._args)) + if self._source_traceback: + frame = self._source_traceback[-1] + info.append(f'created at {frame[0]}:{frame[1]}') + return info + + def __repr__(self): + if self._repr is not None: + return self._repr + info = self._repr_info() + return '<{}>'.format(' '.join(info)) + + def cancel(self): + if not self._cancelled: + self._cancelled = True + if self._loop.get_debug(): + # Keep a representation in debug mode to keep callback and + # parameters. For example, to log the warning + # "Executing took 2.5 second" + self._repr = repr(self) + self._callback = None + self._args = None + + def cancelled(self): + return self._cancelled + + def _run(self): + try: + self._context.run(self._callback, *self._args) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + cb = format_helpers._format_callback_source( + self._callback, self._args) + msg = f'Exception in callback {cb}' + context = { + 'message': msg, + 'exception': exc, + 'handle': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self = None # Needed to break cycles when an exception occurs. + + +class TimerHandle(Handle): + """Object returned by timed callback registration methods.""" + + __slots__ = ['_scheduled', '_when'] + + def __init__(self, when, callback, args, loop, context=None): + assert when is not None + super().__init__(callback, args, loop, context) + if self._source_traceback: + del self._source_traceback[-1] + self._when = when + self._scheduled = False + + def _repr_info(self): + info = super()._repr_info() + pos = 2 if self._cancelled else 1 + info.insert(pos, f'when={self._when}') + return info + + def __hash__(self): + return hash(self._when) + + def __lt__(self, other): + if isinstance(other, TimerHandle): + return self._when < other._when + return NotImplemented + + def __le__(self, other): + if isinstance(other, TimerHandle): + return self._when < other._when or self.__eq__(other) + return NotImplemented + + def __gt__(self, other): + if isinstance(other, TimerHandle): + return self._when > other._when + return NotImplemented + + def __ge__(self, other): + if isinstance(other, TimerHandle): + return self._when > other._when or self.__eq__(other) + return NotImplemented + + def __eq__(self, other): + if isinstance(other, TimerHandle): + return (self._when == other._when and + self._callback == other._callback and + self._args == other._args and + self._cancelled == other._cancelled) + return NotImplemented + + def cancel(self): + if not self._cancelled: + self._loop._timer_handle_cancelled(self) + super().cancel() + + def when(self): + """Return a scheduled callback time. + + The time is an absolute timestamp, using the same time + reference as loop.time(). + """ + return self._when + + +class AbstractServer: + """Abstract server returned by create_server().""" + + def close(self): + """Stop serving. This leaves existing connections open.""" + raise NotImplementedError + + def get_loop(self): + """Get the event loop the Server object is attached to.""" + raise NotImplementedError + + def is_serving(self): + """Return True if the server is accepting connections.""" + raise NotImplementedError + + async def start_serving(self): + """Start accepting connections. + + This method is idempotent, so it can be called when + the server is already being serving. + """ + raise NotImplementedError + + async def serve_forever(self): + """Start accepting connections until the coroutine is cancelled. + + The server is closed when the coroutine is cancelled. + """ + raise NotImplementedError + + async def wait_closed(self): + """Coroutine to wait until service is closed.""" + raise NotImplementedError + + async def __aenter__(self): + return self + + async def __aexit__(self, *exc): + self.close() + await self.wait_closed() + + +class AbstractEventLoop: + """Abstract event loop.""" + + # Running and stopping the event loop. + + def run_forever(self): + """Run the event loop until stop() is called.""" + raise NotImplementedError + + def run_until_complete(self, future): + """Run the event loop until a Future is done. + + Return the Future's result, or raise its exception. + """ + raise NotImplementedError + + def stop(self): + """Stop the event loop as soon as reasonable. + + Exactly how soon that is may depend on the implementation, but + no more I/O callbacks should be scheduled. + """ + raise NotImplementedError + + def is_running(self): + """Return whether the event loop is currently running.""" + raise NotImplementedError + + def is_closed(self): + """Returns True if the event loop was closed.""" + raise NotImplementedError + + def close(self): + """Close the loop. + + The loop should not be running. + + This is idempotent and irreversible. + + No other methods should be called after this one. + """ + raise NotImplementedError + + async def shutdown_asyncgens(self): + """Shutdown all active asynchronous generators.""" + raise NotImplementedError + + async def shutdown_default_executor(self): + """Schedule the shutdown of the default executor.""" + raise NotImplementedError + + # Methods scheduling callbacks. All these return Handles. + + def _timer_handle_cancelled(self, handle): + """Notification that a TimerHandle has been cancelled.""" + raise NotImplementedError + + def call_soon(self, callback, *args, context=None): + return self.call_later(0, callback, *args, context=context) + + def call_later(self, delay, callback, *args, context=None): + raise NotImplementedError + + def call_at(self, when, callback, *args, context=None): + raise NotImplementedError + + def time(self): + raise NotImplementedError + + def create_future(self): + raise NotImplementedError + + # Method scheduling a coroutine object: create a task. + + def create_task(self, coro, *, name=None): + raise NotImplementedError + + # Methods for interacting with threads. + + def call_soon_threadsafe(self, callback, *args, context=None): + raise NotImplementedError + + def run_in_executor(self, executor, func, *args): + raise NotImplementedError + + def set_default_executor(self, executor): + raise NotImplementedError + + # Network I/O methods returning Futures. + + async def getaddrinfo(self, host, port, *, + family=0, type=0, proto=0, flags=0): + raise NotImplementedError + + async def getnameinfo(self, sockaddr, flags=0): + raise NotImplementedError + + async def create_connection( + self, protocol_factory, host=None, port=None, + *, ssl=None, family=0, proto=0, + flags=0, sock=None, local_addr=None, + server_hostname=None, + ssl_handshake_timeout=None, + happy_eyeballs_delay=None, interleave=None): + raise NotImplementedError + + async def create_server( + self, protocol_factory, host=None, port=None, + *, family=socket.AF_UNSPEC, + flags=socket.AI_PASSIVE, sock=None, backlog=100, + ssl=None, reuse_address=None, reuse_port=None, + ssl_handshake_timeout=None, + start_serving=True): + """A coroutine which creates a TCP server bound to host and port. + + The return value is a Server object which can be used to stop + the service. + + If host is an empty string or None all interfaces are assumed + and a list of multiple sockets will be returned (most likely + one for IPv4 and another one for IPv6). The host parameter can also be + a sequence (e.g. list) of hosts to bind to. + + family can be set to either AF_INET or AF_INET6 to force the + socket to use IPv4 or IPv6. If not set it will be determined + from host (defaults to AF_UNSPEC). + + flags is a bitmask for getaddrinfo(). + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for completion of the SSL handshake before aborting the + connection. Default is 60s. + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + """ + raise NotImplementedError + + async def sendfile(self, transport, file, offset=0, count=None, + *, fallback=True): + """Send a file through a transport. + + Return an amount of sent bytes. + """ + raise NotImplementedError + + async def start_tls(self, transport, protocol, sslcontext, *, + server_side=False, + server_hostname=None, + ssl_handshake_timeout=None): + """Upgrade a transport to TLS. + + Return a new transport that *protocol* should start using + immediately. + """ + raise NotImplementedError + + async def create_unix_connection( + self, protocol_factory, path=None, *, + ssl=None, sock=None, + server_hostname=None, + ssl_handshake_timeout=None): + raise NotImplementedError + + async def create_unix_server( + self, protocol_factory, path=None, *, + sock=None, backlog=100, ssl=None, + ssl_handshake_timeout=None, + start_serving=True): + """A coroutine which creates a UNIX Domain Socket server. + + The return value is a Server object, which can be used to stop + the service. + + path is a str, representing a file system path to bind the + server socket to. + + sock can optionally be specified in order to use a preexisting + socket object. + + backlog is the maximum number of queued connections passed to + listen() (defaults to 100). + + ssl can be set to an SSLContext to enable SSL over the + accepted connections. + + ssl_handshake_timeout is the time in seconds that an SSL server + will wait for the SSL handshake to complete (defaults to 60s). + + start_serving set to True (default) causes the created server + to start accepting connections immediately. When set to False, + the user should await Server.start_serving() or Server.serve_forever() + to make the server to start accepting connections. + """ + raise NotImplementedError + + async def connect_accepted_socket( + self, protocol_factory, sock, + *, ssl=None, + ssl_handshake_timeout=None): + """Handle an accepted connection. + + This is used by servers that accept connections outside of + asyncio, but use asyncio to handle connections. + + This method is a coroutine. When completed, the coroutine + returns a (transport, protocol) pair. + """ + raise NotImplementedError + + async def create_datagram_endpoint(self, protocol_factory, + local_addr=None, remote_addr=None, *, + family=0, proto=0, flags=0, + reuse_address=None, reuse_port=None, + allow_broadcast=None, sock=None): + """A coroutine which creates a datagram endpoint. + + This method will try to establish the endpoint in the background. + When successful, the coroutine returns a (transport, protocol) pair. + + protocol_factory must be a callable returning a protocol instance. + + socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on + host (or family if specified), socket type SOCK_DGRAM. + + reuse_address tells the kernel to reuse a local socket in + TIME_WAIT state, without waiting for its natural timeout to + expire. If not specified it will automatically be set to True on + UNIX. + + reuse_port tells the kernel to allow this endpoint to be bound to + the same port as other existing endpoints are bound to, so long as + they all set this flag when being created. This option is not + supported on Windows and some UNIX's. If the + :py:data:`~socket.SO_REUSEPORT` constant is not defined then this + capability is unsupported. + + allow_broadcast tells the kernel to allow this endpoint to send + messages to the broadcast address. + + sock can optionally be specified in order to use a preexisting + socket object. + """ + raise NotImplementedError + + # Pipes and subprocesses. + + async def connect_read_pipe(self, protocol_factory, pipe): + """Register read pipe in event loop. Set the pipe to non-blocking mode. + + protocol_factory should instantiate object with Protocol interface. + pipe is a file-like object. + Return pair (transport, protocol), where transport supports the + ReadTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vice versa. + raise NotImplementedError + + async def connect_write_pipe(self, protocol_factory, pipe): + """Register write pipe in event loop. + + protocol_factory should instantiate object with BaseProtocol interface. + Pipe is file-like object already switched to nonblocking. + Return pair (transport, protocol), where transport support + WriteTransport interface.""" + # The reason to accept file-like object instead of just file descriptor + # is: we need to own pipe and close it at transport finishing + # Can got complicated errors if pass f.fileno(), + # close fd in pipe transport then close f and vice versa. + raise NotImplementedError + + async def subprocess_shell(self, protocol_factory, cmd, *, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **kwargs): + raise NotImplementedError + + async def subprocess_exec(self, protocol_factory, *args, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + **kwargs): + raise NotImplementedError + + # Ready-based callback registration methods. + # The add_*() methods return None. + # The remove_*() methods return True if something was removed, + # False if there was nothing to delete. + + def add_reader(self, fd, callback, *args): + raise NotImplementedError + + def remove_reader(self, fd): + raise NotImplementedError + + def add_writer(self, fd, callback, *args): + raise NotImplementedError + + def remove_writer(self, fd): + raise NotImplementedError + + # Completion based I/O methods returning Futures. + + async def sock_recv(self, sock, nbytes): + raise NotImplementedError + + async def sock_recv_into(self, sock, buf): + raise NotImplementedError + + async def sock_sendall(self, sock, data): + raise NotImplementedError + + async def sock_connect(self, sock, address): + raise NotImplementedError + + async def sock_accept(self, sock): + raise NotImplementedError + + async def sock_sendfile(self, sock, file, offset=0, count=None, + *, fallback=None): + raise NotImplementedError + + # Signal handling. + + def add_signal_handler(self, sig, callback, *args): + raise NotImplementedError + + def remove_signal_handler(self, sig): + raise NotImplementedError + + # Task factory. + + def set_task_factory(self, factory): + raise NotImplementedError + + def get_task_factory(self): + raise NotImplementedError + + # Error handlers. + + def get_exception_handler(self): + raise NotImplementedError + + def set_exception_handler(self, handler): + raise NotImplementedError + + def default_exception_handler(self, context): + raise NotImplementedError + + def call_exception_handler(self, context): + raise NotImplementedError + + # Debug flag management. + + def get_debug(self): + raise NotImplementedError + + def set_debug(self, enabled): + raise NotImplementedError + + +class AbstractEventLoopPolicy: + """Abstract policy for accessing the event loop.""" + + def get_event_loop(self): + """Get the event loop for the current context. + + Returns an event loop object implementing the BaseEventLoop interface, + or raises an exception in case no event loop has been set for the + current context and the current policy does not specify to create one. + + It should never return None.""" + raise NotImplementedError + + def set_event_loop(self, loop): + """Set the event loop for the current context to loop.""" + raise NotImplementedError + + def new_event_loop(self): + """Create and return a new event loop object according to this + policy's rules. If there's need to set this loop as the event loop for + the current context, set_event_loop must be called explicitly.""" + raise NotImplementedError + + # Child processes handling (Unix only). + + def get_child_watcher(self): + "Get the watcher for child processes." + raise NotImplementedError + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + raise NotImplementedError + + +class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy): + """Default policy implementation for accessing the event loop. + + In this policy, each thread has its own event loop. However, we + only automatically create an event loop by default for the main + thread; other threads by default have no event loop. + + Other policies may have different rules (e.g. a single global + event loop, or automatically creating an event loop per thread, or + using some other notion of context to which an event loop is + associated). + """ + + _loop_factory = None + + class _Local(threading.local): + _loop = None + _set_called = False + + def __init__(self): + self._local = self._Local() + + def get_event_loop(self): + """Get the event loop for the current context. + + Returns an instance of EventLoop or raises an exception. + """ + if (self._local._loop is None and + not self._local._set_called and + threading.current_thread() is threading.main_thread()): + self.set_event_loop(self.new_event_loop()) + + if self._local._loop is None: + raise RuntimeError('There is no current event loop in thread %r.' + % threading.current_thread().name) + + return self._local._loop + + def set_event_loop(self, loop): + """Set the event loop.""" + self._local._set_called = True + assert loop is None or isinstance(loop, AbstractEventLoop) + self._local._loop = loop + + def new_event_loop(self): + """Create a new event loop. + + You must call set_event_loop() to make this the current event + loop. + """ + return self._loop_factory() + + +# Event loop policy. The policy itself is always global, even if the +# policy's rules say that there is an event loop per thread (or other +# notion of context). The default policy is installed by the first +# call to get_event_loop_policy(). +_event_loop_policy = None + +# Lock for protecting the on-the-fly creation of the event loop policy. +_lock = threading.Lock() + + +# A TLS for the running event loop, used by _get_running_loop. +class _RunningLoop(threading.local): + loop_pid = (None, None) + + +_running_loop = _RunningLoop() + + +def get_running_loop(): + """Return the running event loop. Raise a RuntimeError if there is none. + + This function is thread-specific. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + loop = _get_running_loop() + if loop is None: + raise RuntimeError('no running event loop') + return loop + + +def _get_running_loop(): + """Return the running event loop or None. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + running_loop, pid = _running_loop.loop_pid + if running_loop is not None and pid == os.getpid(): + return running_loop + + +def _set_running_loop(loop): + """Set the running event loop. + + This is a low-level function intended to be used by event loops. + This function is thread-specific. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + _running_loop.loop_pid = (loop, os.getpid()) + + +def _init_event_loop_policy(): + global _event_loop_policy + with _lock: + if _event_loop_policy is None: # pragma: no branch + from . import DefaultEventLoopPolicy + _event_loop_policy = DefaultEventLoopPolicy() + + +def get_event_loop_policy(): + """Get the current event loop policy.""" + if _event_loop_policy is None: + _init_event_loop_policy() + return _event_loop_policy + + +def set_event_loop_policy(policy): + """Set the current event loop policy. + + If policy is None, the default policy is restored.""" + global _event_loop_policy + assert policy is None or isinstance(policy, AbstractEventLoopPolicy) + _event_loop_policy = policy + + +def get_event_loop(): + """Return an asyncio event loop. + + When called from a coroutine or a callback (e.g. scheduled with call_soon + or similar API), this function will always return the running event loop. + + If there is no running event loop set, the function will return + the result of `get_event_loop_policy().get_event_loop()` call. + """ + # NOTE: this function is implemented in C (see _asynciomodule.c) + return _py__get_event_loop() + + +def _get_event_loop(stacklevel=3): + # This internal method is going away in Python 3.12, left here only for + # backwards compatibility with 3.10.0 - 3.10.8 and 3.11.0. + # Similarly, this method's C equivalent in _asyncio is going away as well. + # See GH-99949 for more details. + current_loop = _get_running_loop() + if current_loop is not None: + return current_loop + return get_event_loop_policy().get_event_loop() + + +def set_event_loop(loop): + """Equivalent to calling get_event_loop_policy().set_event_loop(loop).""" + get_event_loop_policy().set_event_loop(loop) + + +def new_event_loop(): + """Equivalent to calling get_event_loop_policy().new_event_loop().""" + return get_event_loop_policy().new_event_loop() + + +def get_child_watcher(): + """Equivalent to calling get_event_loop_policy().get_child_watcher().""" + return get_event_loop_policy().get_child_watcher() + + +def set_child_watcher(watcher): + """Equivalent to calling + get_event_loop_policy().set_child_watcher(watcher).""" + return get_event_loop_policy().set_child_watcher(watcher) + + +# Alias pure-Python implementations for testing purposes. +_py__get_running_loop = _get_running_loop +_py__set_running_loop = _set_running_loop +_py_get_running_loop = get_running_loop +_py_get_event_loop = get_event_loop +_py__get_event_loop = _get_event_loop + + +try: + # get_event_loop() is one of the most frequently called + # functions in asyncio. Pure Python implementation is + # about 4 times slower than C-accelerated. + from _asyncio import (_get_running_loop, _set_running_loop, + get_running_loop, get_event_loop, _get_event_loop) +except ImportError: + pass +else: + # Alias C implementations for testing purposes. + _c__get_running_loop = _get_running_loop + _c__set_running_loop = _set_running_loop + _c_get_running_loop = get_running_loop + _c_get_event_loop = get_event_loop + _c__get_event_loop = _get_event_loop diff --git a/python310/asyncio/exceptions.py b/python310/asyncio/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..f07e4486577381616e7e2f7f05747ed7374316a1 --- /dev/null +++ b/python310/asyncio/exceptions.py @@ -0,0 +1,58 @@ +"""asyncio exceptions.""" + + +__all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError', + 'IncompleteReadError', 'LimitOverrunError', + 'SendfileNotAvailableError') + + +class CancelledError(BaseException): + """The Future or Task was cancelled.""" + + +class TimeoutError(Exception): + """The operation exceeded the given deadline.""" + + +class InvalidStateError(Exception): + """The operation is not allowed in this state.""" + + +class SendfileNotAvailableError(RuntimeError): + """Sendfile syscall is not available. + + Raised if OS does not support sendfile syscall for given socket or + file type. + """ + + +class IncompleteReadError(EOFError): + """ + Incomplete read error. Attributes: + + - partial: read bytes string before the end of stream was reached + - expected: total number of expected bytes (or None if unknown) + """ + def __init__(self, partial, expected): + r_expected = 'undefined' if expected is None else repr(expected) + super().__init__(f'{len(partial)} bytes read on a total of ' + f'{r_expected} expected bytes') + self.partial = partial + self.expected = expected + + def __reduce__(self): + return type(self), (self.partial, self.expected) + + +class LimitOverrunError(Exception): + """Reached the buffer limit while looking for a separator. + + Attributes: + - consumed: total number of to be consumed bytes. + """ + def __init__(self, message, consumed): + super().__init__(message) + self.consumed = consumed + + def __reduce__(self): + return type(self), (self.args[0], self.consumed) diff --git a/python310/asyncio/format_helpers.py b/python310/asyncio/format_helpers.py new file mode 100644 index 0000000000000000000000000000000000000000..27d11fd4fa9553e21077bd4a93c523dbcb1d32cb --- /dev/null +++ b/python310/asyncio/format_helpers.py @@ -0,0 +1,76 @@ +import functools +import inspect +import reprlib +import sys +import traceback + +from . import constants + + +def _get_function_source(func): + func = inspect.unwrap(func) + if inspect.isfunction(func): + code = func.__code__ + return (code.co_filename, code.co_firstlineno) + if isinstance(func, functools.partial): + return _get_function_source(func.func) + if isinstance(func, functools.partialmethod): + return _get_function_source(func.func) + return None + + +def _format_callback_source(func, args): + func_repr = _format_callback(func, args, None) + source = _get_function_source(func) + if source: + func_repr += f' at {source[0]}:{source[1]}' + return func_repr + + +def _format_args_and_kwargs(args, kwargs): + """Format function arguments and keyword arguments. + + Special case for a single parameter: ('hello',) is formatted as ('hello'). + """ + # use reprlib to limit the length of the output + items = [] + if args: + items.extend(reprlib.repr(arg) for arg in args) + if kwargs: + items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items()) + return '({})'.format(', '.join(items)) + + +def _format_callback(func, args, kwargs, suffix=''): + if isinstance(func, functools.partial): + suffix = _format_args_and_kwargs(args, kwargs) + suffix + return _format_callback(func.func, func.args, func.keywords, suffix) + + if hasattr(func, '__qualname__') and func.__qualname__: + func_repr = func.__qualname__ + elif hasattr(func, '__name__') and func.__name__: + func_repr = func.__name__ + else: + func_repr = repr(func) + + func_repr += _format_args_and_kwargs(args, kwargs) + if suffix: + func_repr += suffix + return func_repr + + +def extract_stack(f=None, limit=None): + """Replacement for traceback.extract_stack() that only does the + necessary work for asyncio debug mode. + """ + if f is None: + f = sys._getframe().f_back + if limit is None: + # Limit the amount of work to a reasonable amount, as extract_stack() + # can be called for each coroutine and future in debug mode. + limit = constants.DEBUG_STACK_DEPTH + stack = traceback.StackSummary.extract(traceback.walk_stack(f), + limit=limit, + lookup_lines=False) + stack.reverse() + return stack diff --git a/python310/asyncio/futures.py b/python310/asyncio/futures.py new file mode 100644 index 0000000000000000000000000000000000000000..5d00ab4c8d3119bd4c2da81a1e90996f5da2af0c --- /dev/null +++ b/python310/asyncio/futures.py @@ -0,0 +1,426 @@ +"""A Future class similar to the one in PEP 3148.""" + +__all__ = ( + 'Future', 'wrap_future', 'isfuture', +) + +import concurrent.futures +import contextvars +import logging +import sys +from types import GenericAlias + +from . import base_futures +from . import events +from . import exceptions +from . import format_helpers + + +isfuture = base_futures.isfuture + + +_PENDING = base_futures._PENDING +_CANCELLED = base_futures._CANCELLED +_FINISHED = base_futures._FINISHED + + +STACK_DEBUG = logging.DEBUG - 1 # heavy-duty debugging + + +class Future: + """This class is *almost* compatible with concurrent.futures.Future. + + Differences: + + - This class is not thread-safe. + + - result() and exception() do not take a timeout argument and + raise an exception when the future isn't done yet. + + - Callbacks registered with add_done_callback() are always called + via the event loop's call_soon(). + + - This class is not compatible with the wait() and as_completed() + methods in the concurrent.futures package. + + (In Python 3.4 or later we may be able to unify the implementations.) + """ + + # Class variables serving as defaults for instance variables. + _state = _PENDING + _result = None + _exception = None + _loop = None + _source_traceback = None + _cancel_message = None + # A saved CancelledError for later chaining as an exception context. + _cancelled_exc = None + + # This field is used for a dual purpose: + # - Its presence is a marker to declare that a class implements + # the Future protocol (i.e. is intended to be duck-type compatible). + # The value must also be not-None, to enable a subclass to declare + # that it is not compatible by setting this to None. + # - It is set by __iter__() below so that Task._step() can tell + # the difference between + # `await Future()` or`yield from Future()` (correct) vs. + # `yield Future()` (incorrect). + _asyncio_future_blocking = False + + __log_traceback = False + + def __init__(self, *, loop=None): + """Initialize the future. + + The optional event_loop argument allows explicitly setting the event + loop object used by the future. If it's not provided, the future uses + the default event loop. + """ + if loop is None: + self._loop = events._get_event_loop() + else: + self._loop = loop + self._callbacks = [] + if self._loop.get_debug(): + self._source_traceback = format_helpers.extract_stack( + sys._getframe(1)) + + _repr_info = base_futures._future_repr_info + + def __repr__(self): + return '<{} {}>'.format(self.__class__.__name__, + ' '.join(self._repr_info())) + + def __del__(self): + if not self.__log_traceback: + # set_exception() was not called, or result() or exception() + # has consumed the exception + return + exc = self._exception + context = { + 'message': + f'{self.__class__.__name__} exception was never retrieved', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + + __class_getitem__ = classmethod(GenericAlias) + + @property + def _log_traceback(self): + return self.__log_traceback + + @_log_traceback.setter + def _log_traceback(self, val): + if val: + raise ValueError('_log_traceback can only be set to False') + self.__log_traceback = False + + def get_loop(self): + """Return the event loop the Future is bound to.""" + loop = self._loop + if loop is None: + raise RuntimeError("Future object is not initialized.") + return loop + + def _make_cancelled_error(self): + """Create the CancelledError to raise if the Future is cancelled. + + This should only be called once when handling a cancellation since + it erases the saved context exception value. + """ + if self._cancel_message is None: + exc = exceptions.CancelledError() + else: + exc = exceptions.CancelledError(self._cancel_message) + exc.__context__ = self._cancelled_exc + # Remove the reference since we don't need this anymore. + self._cancelled_exc = None + return exc + + def cancel(self, msg=None): + """Cancel the future and schedule callbacks. + + If the future is already done or cancelled, return False. Otherwise, + change the future's state to cancelled, schedule the callbacks and + return True. + """ + self.__log_traceback = False + if self._state != _PENDING: + return False + self._state = _CANCELLED + self._cancel_message = msg + self.__schedule_callbacks() + return True + + def __schedule_callbacks(self): + """Internal: Ask the event loop to call all callbacks. + + The callbacks are scheduled to be called as soon as possible. Also + clears the callback list. + """ + callbacks = self._callbacks[:] + if not callbacks: + return + + self._callbacks[:] = [] + for callback, ctx in callbacks: + self._loop.call_soon(callback, self, context=ctx) + + def cancelled(self): + """Return True if the future was cancelled.""" + return self._state == _CANCELLED + + # Don't implement running(); see http://bugs.python.org/issue18699 + + def done(self): + """Return True if the future is done. + + Done means either that a result / exception are available, or that the + future was cancelled. + """ + return self._state != _PENDING + + def result(self): + """Return the result this future represents. + + If the future has been cancelled, raises CancelledError. If the + future's result isn't yet available, raises InvalidStateError. If + the future is done and has an exception set, this exception is raised. + """ + if self._state == _CANCELLED: + exc = self._make_cancelled_error() + raise exc + if self._state != _FINISHED: + raise exceptions.InvalidStateError('Result is not ready.') + self.__log_traceback = False + if self._exception is not None: + raise self._exception.with_traceback(self._exception_tb) + return self._result + + def exception(self): + """Return the exception that was set on this future. + + The exception (or None if no exception was set) is returned only if + the future is done. If the future has been cancelled, raises + CancelledError. If the future isn't done yet, raises + InvalidStateError. + """ + if self._state == _CANCELLED: + exc = self._make_cancelled_error() + raise exc + if self._state != _FINISHED: + raise exceptions.InvalidStateError('Exception is not set.') + self.__log_traceback = False + return self._exception + + def add_done_callback(self, fn, *, context=None): + """Add a callback to be run when the future becomes done. + + The callback is called with a single argument - the future object. If + the future is already done when this is called, the callback is + scheduled with call_soon. + """ + if self._state != _PENDING: + self._loop.call_soon(fn, self, context=context) + else: + if context is None: + context = contextvars.copy_context() + self._callbacks.append((fn, context)) + + # New method not in PEP 3148. + + def remove_done_callback(self, fn): + """Remove all instances of a callback from the "call when done" list. + + Returns the number of callbacks removed. + """ + filtered_callbacks = [(f, ctx) + for (f, ctx) in self._callbacks + if f != fn] + removed_count = len(self._callbacks) - len(filtered_callbacks) + if removed_count: + self._callbacks[:] = filtered_callbacks + return removed_count + + # So-called internal methods (note: no set_running_or_notify_cancel()). + + def set_result(self, result): + """Mark the future done and set its result. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise exceptions.InvalidStateError(f'{self._state}: {self!r}') + self._result = result + self._state = _FINISHED + self.__schedule_callbacks() + + def set_exception(self, exception): + """Mark the future done and set an exception. + + If the future is already done when this method is called, raises + InvalidStateError. + """ + if self._state != _PENDING: + raise exceptions.InvalidStateError(f'{self._state}: {self!r}') + if isinstance(exception, type): + exception = exception() + if type(exception) is StopIteration: + raise TypeError("StopIteration interacts badly with generators " + "and cannot be raised into a Future") + self._exception = exception + self._exception_tb = exception.__traceback__ + self._state = _FINISHED + self.__schedule_callbacks() + self.__log_traceback = True + + def __await__(self): + if not self.done(): + self._asyncio_future_blocking = True + yield self # This tells Task to wait for completion. + if not self.done(): + raise RuntimeError("await wasn't used with future") + return self.result() # May raise too. + + __iter__ = __await__ # make compatible with 'yield from'. + + +# Needed for testing purposes. +_PyFuture = Future + + +def _get_loop(fut): + # Tries to call Future.get_loop() if it's available. + # Otherwise fallbacks to using the old '_loop' property. + try: + get_loop = fut.get_loop + except AttributeError: + pass + else: + return get_loop() + return fut._loop + + +def _set_result_unless_cancelled(fut, result): + """Helper setting the result only if the future was not cancelled.""" + if fut.cancelled(): + return + fut.set_result(result) + + +def _convert_future_exc(exc): + exc_class = type(exc) + if exc_class is concurrent.futures.CancelledError: + return exceptions.CancelledError(*exc.args) + elif exc_class is concurrent.futures.TimeoutError: + return exceptions.TimeoutError(*exc.args) + elif exc_class is concurrent.futures.InvalidStateError: + return exceptions.InvalidStateError(*exc.args) + else: + return exc + + +def _set_concurrent_future_state(concurrent, source): + """Copy state from a future to a concurrent.futures.Future.""" + assert source.done() + if source.cancelled(): + concurrent.cancel() + if not concurrent.set_running_or_notify_cancel(): + return + exception = source.exception() + if exception is not None: + concurrent.set_exception(_convert_future_exc(exception)) + else: + result = source.result() + concurrent.set_result(result) + + +def _copy_future_state(source, dest): + """Internal helper to copy state from another Future. + + The other Future may be a concurrent.futures.Future. + """ + assert source.done() + if dest.cancelled(): + return + assert not dest.done() + if source.cancelled(): + dest.cancel() + else: + exception = source.exception() + if exception is not None: + dest.set_exception(_convert_future_exc(exception)) + else: + result = source.result() + dest.set_result(result) + + +def _chain_future(source, destination): + """Chain two futures so that when one completes, so does the other. + + The result (or exception) of source will be copied to destination. + If destination is cancelled, source gets cancelled too. + Compatible with both asyncio.Future and concurrent.futures.Future. + """ + if not isfuture(source) and not isinstance(source, + concurrent.futures.Future): + raise TypeError('A future is required for source argument') + if not isfuture(destination) and not isinstance(destination, + concurrent.futures.Future): + raise TypeError('A future is required for destination argument') + source_loop = _get_loop(source) if isfuture(source) else None + dest_loop = _get_loop(destination) if isfuture(destination) else None + + def _set_state(future, other): + if isfuture(future): + _copy_future_state(other, future) + else: + _set_concurrent_future_state(future, other) + + def _call_check_cancel(destination): + if destination.cancelled(): + if source_loop is None or source_loop is dest_loop: + source.cancel() + else: + source_loop.call_soon_threadsafe(source.cancel) + + def _call_set_state(source): + if (destination.cancelled() and + dest_loop is not None and dest_loop.is_closed()): + return + if dest_loop is None or dest_loop is source_loop: + _set_state(destination, source) + else: + if dest_loop.is_closed(): + return + dest_loop.call_soon_threadsafe(_set_state, destination, source) + + destination.add_done_callback(_call_check_cancel) + source.add_done_callback(_call_set_state) + + +def wrap_future(future, *, loop=None): + """Wrap concurrent.futures.Future object.""" + if isfuture(future): + return future + assert isinstance(future, concurrent.futures.Future), \ + f'concurrent.futures.Future is expected, got {future!r}' + if loop is None: + loop = events._get_event_loop() + new_future = loop.create_future() + _chain_future(future, new_future) + return new_future + + +try: + import _asyncio +except ImportError: + pass +else: + # _CFuture is needed for tests. + Future = _CFuture = _asyncio.Future diff --git a/python310/asyncio/locks.py b/python310/asyncio/locks.py new file mode 100644 index 0000000000000000000000000000000000000000..e192159127a3f602b5649e9a4a48e841e572ec00 --- /dev/null +++ b/python310/asyncio/locks.py @@ -0,0 +1,438 @@ +"""Synchronization primitives.""" + +__all__ = ('Lock', 'Event', 'Condition', 'Semaphore', 'BoundedSemaphore') + +import collections + +from . import exceptions +from . import mixins +from . import tasks + + +class _ContextManagerMixin: + async def __aenter__(self): + await self.acquire() + # We have no use for the "as ..." clause in the with + # statement for locks. + return None + + async def __aexit__(self, exc_type, exc, tb): + self.release() + + +class Lock(_ContextManagerMixin, mixins._LoopBoundMixin): + """Primitive lock objects. + + A primitive lock is a synchronization primitive that is not owned + by a particular coroutine when locked. A primitive lock is in one + of two states, 'locked' or 'unlocked'. + + It is created in the unlocked state. It has two basic methods, + acquire() and release(). When the state is unlocked, acquire() + changes the state to locked and returns immediately. When the + state is locked, acquire() blocks until a call to release() in + another coroutine changes it to unlocked, then the acquire() call + resets it to locked and returns. The release() method should only + be called in the locked state; it changes the state to unlocked + and returns immediately. If an attempt is made to release an + unlocked lock, a RuntimeError will be raised. + + When more than one coroutine is blocked in acquire() waiting for + the state to turn to unlocked, only one coroutine proceeds when a + release() call resets the state to unlocked; first coroutine which + is blocked in acquire() is being processed. + + acquire() is a coroutine and should be called with 'await'. + + Locks also support the asynchronous context management protocol. + 'async with lock' statement should be used. + + Usage: + + lock = Lock() + ... + await lock.acquire() + try: + ... + finally: + lock.release() + + Context manager usage: + + lock = Lock() + ... + async with lock: + ... + + Lock objects can be tested for locking state: + + if not lock.locked(): + await lock.acquire() + else: + # lock is acquired + ... + + """ + + def __init__(self, *, loop=mixins._marker): + super().__init__(loop=loop) + self._waiters = None + self._locked = False + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self._locked else 'unlocked' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def locked(self): + """Return True if lock is acquired.""" + return self._locked + + async def acquire(self): + """Acquire a lock. + + This method blocks until the lock is unlocked, then sets it to + locked and returns True. + """ + if (not self._locked and (self._waiters is None or + all(w.cancelled() for w in self._waiters))): + self._locked = True + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._get_loop().create_future() + self._waiters.append(fut) + + # Finally block should be called before the CancelledError + # handling as we don't want CancelledError to call + # _wake_up_first() and attempt to wake up itself. + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + if not self._locked: + self._wake_up_first() + raise + + self._locked = True + return True + + def release(self): + """Release a lock. + + When the lock is locked, reset it to unlocked, and return. + If any other coroutines are blocked waiting for the lock to become + unlocked, allow exactly one of them to proceed. + + When invoked on an unlocked lock, a RuntimeError is raised. + + There is no return value. + """ + if self._locked: + self._locked = False + self._wake_up_first() + else: + raise RuntimeError('Lock is not acquired.') + + def _wake_up_first(self): + """Wake up the first waiter if it isn't done.""" + if not self._waiters: + return + try: + fut = next(iter(self._waiters)) + except StopIteration: + return + + # .done() necessarily means that a waiter will wake up later on and + # either take the lock, or, if it was cancelled and lock wasn't + # taken already, will hit this again and wake up a new waiter. + if not fut.done(): + fut.set_result(True) + + +class Event(mixins._LoopBoundMixin): + """Asynchronous equivalent to threading.Event. + + Class implementing event objects. An event manages a flag that can be set + to true with the set() method and reset to false with the clear() method. + The wait() method blocks until the flag is true. The flag is initially + false. + """ + + def __init__(self, *, loop=mixins._marker): + super().__init__(loop=loop) + self._waiters = collections.deque() + self._value = False + + def __repr__(self): + res = super().__repr__() + extra = 'set' if self._value else 'unset' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def is_set(self): + """Return True if and only if the internal flag is true.""" + return self._value + + def set(self): + """Set the internal flag to true. All coroutines waiting for it to + become true are awakened. Coroutine that call wait() once the flag is + true will not block at all. + """ + if not self._value: + self._value = True + + for fut in self._waiters: + if not fut.done(): + fut.set_result(True) + + def clear(self): + """Reset the internal flag to false. Subsequently, coroutines calling + wait() will block until set() is called to set the internal flag + to true again.""" + self._value = False + + async def wait(self): + """Block until the internal flag is true. + + If the internal flag is true on entry, return True + immediately. Otherwise, block until another coroutine calls + set() to set the flag to true, then return True. + """ + if self._value: + return True + + fut = self._get_loop().create_future() + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + +class Condition(_ContextManagerMixin, mixins._LoopBoundMixin): + """Asynchronous equivalent to threading.Condition. + + This class implements condition variable objects. A condition variable + allows one or more coroutines to wait until they are notified by another + coroutine. + + A new Lock object is created and used as the underlying lock. + """ + + def __init__(self, lock=None, *, loop=mixins._marker): + super().__init__(loop=loop) + if lock is None: + lock = Lock() + + self._lock = lock + # Export the lock's locked(), acquire() and release() methods. + self.locked = lock.locked + self.acquire = lock.acquire + self.release = lock.release + + self._waiters = collections.deque() + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else 'unlocked' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + async def wait(self): + """Wait until notified. + + If the calling coroutine has not acquired the lock when this + method is called, a RuntimeError is raised. + + This method releases the underlying lock, and then blocks + until it is awakened by a notify() or notify_all() call for + the same condition variable in another coroutine. Once + awakened, it re-acquires the lock and returns True. + """ + if not self.locked(): + raise RuntimeError('cannot wait on un-acquired lock') + + self.release() + try: + fut = self._get_loop().create_future() + self._waiters.append(fut) + try: + await fut + return True + finally: + self._waiters.remove(fut) + + finally: + # Must reacquire lock even if wait is cancelled + cancelled = False + while True: + try: + await self.acquire() + break + except exceptions.CancelledError: + cancelled = True + + if cancelled: + raise exceptions.CancelledError + + async def wait_for(self, predicate): + """Wait until a predicate becomes true. + + The predicate should be a callable which result will be + interpreted as a boolean value. The final predicate value is + the return value. + """ + result = predicate() + while not result: + await self.wait() + result = predicate() + return result + + def notify(self, n=1): + """By default, wake up one coroutine waiting on this condition, if any. + If the calling coroutine has not acquired the lock when this method + is called, a RuntimeError is raised. + + This method wakes up at most n of the coroutines waiting for the + condition variable; it is a no-op if no coroutines are waiting. + + Note: an awakened coroutine does not actually return from its + wait() call until it can reacquire the lock. Since notify() does + not release the lock, its caller should. + """ + if not self.locked(): + raise RuntimeError('cannot notify on un-acquired lock') + + idx = 0 + for fut in self._waiters: + if idx >= n: + break + + if not fut.done(): + idx += 1 + fut.set_result(False) + + def notify_all(self): + """Wake up all threads waiting on this condition. This method acts + like notify(), but wakes up all waiting threads instead of one. If the + calling thread has not acquired the lock when this method is called, + a RuntimeError is raised. + """ + self.notify(len(self._waiters)) + + +class Semaphore(_ContextManagerMixin, mixins._LoopBoundMixin): + """A Semaphore implementation. + + A semaphore manages an internal counter which is decremented by each + acquire() call and incremented by each release() call. The counter + can never go below zero; when acquire() finds that it is zero, it blocks, + waiting until some other thread calls release(). + + Semaphores also support the context management protocol. + + The optional argument gives the initial value for the internal + counter; it defaults to 1. If the value given is less than 0, + ValueError is raised. + """ + + def __init__(self, value=1, *, loop=mixins._marker): + super().__init__(loop=loop) + if value < 0: + raise ValueError("Semaphore initial value must be >= 0") + self._waiters = None + self._value = value + + def __repr__(self): + res = super().__repr__() + extra = 'locked' if self.locked() else f'unlocked, value:{self._value}' + if self._waiters: + extra = f'{extra}, waiters:{len(self._waiters)}' + return f'<{res[1:-1]} [{extra}]>' + + def locked(self): + """Returns True if semaphore cannot be acquired immediately.""" + return self._value == 0 or ( + any(not w.cancelled() for w in (self._waiters or ()))) + + async def acquire(self): + """Acquire a semaphore. + + If the internal counter is larger than zero on entry, + decrement it by one and return True immediately. If it is + zero on entry, block, waiting until some other coroutine has + called release() to make it larger than 0, and then return + True. + """ + if not self.locked(): + self._value -= 1 + return True + + if self._waiters is None: + self._waiters = collections.deque() + fut = self._get_loop().create_future() + self._waiters.append(fut) + + # Finally block should be called before the CancelledError + # handling as we don't want CancelledError to call + # _wake_up_first() and attempt to wake up itself. + try: + try: + await fut + finally: + self._waiters.remove(fut) + except exceptions.CancelledError: + if not fut.cancelled(): + self._value += 1 + self._wake_up_next() + raise + + if self._value > 0: + self._wake_up_next() + return True + + def release(self): + """Release a semaphore, incrementing the internal counter by one. + + When it was zero on entry and another coroutine is waiting for it to + become larger than zero again, wake up that coroutine. + """ + self._value += 1 + self._wake_up_next() + + def _wake_up_next(self): + """Wake up the first waiter that isn't done.""" + if not self._waiters: + return + + for fut in self._waiters: + if not fut.done(): + self._value -= 1 + fut.set_result(True) + return + + +class BoundedSemaphore(Semaphore): + """A bounded semaphore implementation. + + This raises ValueError in release() if it would increase the value + above the initial value. + """ + + def __init__(self, value=1, *, loop=mixins._marker): + self._bound_value = value + super().__init__(value, loop=loop) + + def release(self): + if self._value >= self._bound_value: + raise ValueError('BoundedSemaphore released too many times') + super().release() diff --git a/python310/asyncio/log.py b/python310/asyncio/log.py new file mode 100644 index 0000000000000000000000000000000000000000..23a7074afb0a53aa114341bdbf060ea68f543603 --- /dev/null +++ b/python310/asyncio/log.py @@ -0,0 +1,7 @@ +"""Logging configuration.""" + +import logging + + +# Name the logger after the package. +logger = logging.getLogger(__package__) diff --git a/python310/asyncio/mixins.py b/python310/asyncio/mixins.py new file mode 100644 index 0000000000000000000000000000000000000000..650df05ccc93ea652d22637b740b56d8a4bb60a3 --- /dev/null +++ b/python310/asyncio/mixins.py @@ -0,0 +1,31 @@ +"""Event loop mixins.""" + +import threading +from . import events + +_global_lock = threading.Lock() + +# Used as a sentinel for loop parameter +_marker = object() + + +class _LoopBoundMixin: + _loop = None + + def __init__(self, *, loop=_marker): + if loop is not _marker: + raise TypeError( + f'As of 3.10, the *loop* parameter was removed from ' + f'{type(self).__name__}() since it is no longer necessary' + ) + + def _get_loop(self): + loop = events._get_running_loop() + + if self._loop is None: + with _global_lock: + if self._loop is None: + self._loop = loop + if loop is not self._loop: + raise RuntimeError(f'{self!r} is bound to a different event loop') + return loop diff --git a/python310/asyncio/proactor_events.py b/python310/asyncio/proactor_events.py new file mode 100644 index 0000000000000000000000000000000000000000..0916d9eb5706ca48b2165e30a0f1c409cabc60b8 --- /dev/null +++ b/python310/asyncio/proactor_events.py @@ -0,0 +1,875 @@ +"""Event loop using a proactor and related classes. + +A proactor is a "notify-on-completion" multiplexer. Currently a +proactor is only implemented on Windows with IOCP. +""" + +__all__ = 'BaseProactorEventLoop', + +import io +import os +import socket +import warnings +import signal +import threading +import collections + +from . import base_events +from . import constants +from . import futures +from . import exceptions +from . import protocols +from . import sslproto +from . import transports +from . import trsock +from .log import logger + + +def _set_socket_extra(transport, sock): + transport._extra['socket'] = trsock.TransportSocket(sock) + + try: + transport._extra['sockname'] = sock.getsockname() + except socket.error: + if transport._loop.get_debug(): + logger.warning( + "getsockname() failed on %r", sock, exc_info=True) + + if 'peername' not in transport._extra: + try: + transport._extra['peername'] = sock.getpeername() + except socket.error: + # UDP sockets may not have a peer name + transport._extra['peername'] = None + + +class _ProactorBasePipeTransport(transports._FlowControlMixin, + transports.BaseTransport): + """Base class for pipe and socket transports.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super().__init__(extra, loop) + self._set_extra(sock) + self._sock = sock + self.set_protocol(protocol) + self._server = server + self._buffer = None # None or bytearray. + self._read_fut = None + self._write_fut = None + self._pending_write = 0 + self._conn_lost = 0 + self._closing = False # Set when close() called. + self._called_connection_lost = False + self._eof_written = False + if self._server is not None: + self._server._attach() + self._loop.call_soon(self._protocol.connection_made, self) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + if self._sock is not None: + info.append(f'fd={self._sock.fileno()}') + if self._read_fut is not None: + info.append(f'read={self._read_fut!r}') + if self._write_fut is not None: + info.append(f'write={self._write_fut!r}') + if self._buffer: + info.append(f'write_bufsize={len(self._buffer)}') + if self._eof_written: + info.append('EOF written') + return '<{}>'.format(' '.join(info)) + + def _set_extra(self, sock): + self._extra['pipe'] = sock + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._closing: + return + self._closing = True + self._conn_lost += 1 + if not self._buffer and self._write_fut is None: + self._loop.call_soon(self._call_connection_lost, None) + if self._read_fut is not None: + self._read_fut.cancel() + self._read_fut = None + + def __del__(self, _warn=warnings.warn): + if self._sock is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._sock.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + try: + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + finally: + self._force_close(exc) + + def _force_close(self, exc): + if self._empty_waiter is not None and not self._empty_waiter.done(): + if exc is None: + self._empty_waiter.set_result(None) + else: + self._empty_waiter.set_exception(exc) + if self._closing and self._called_connection_lost: + return + self._closing = True + self._conn_lost += 1 + if self._write_fut: + self._write_fut.cancel() + self._write_fut = None + if self._read_fut: + self._read_fut.cancel() + self._read_fut = None + self._pending_write = 0 + self._buffer = None + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + if self._called_connection_lost: + return + try: + self._protocol.connection_lost(exc) + finally: + # XXX If there is a pending overlapped read on the other + # end then it may fail with ERROR_NETNAME_DELETED if we + # just close our end. First calling shutdown() seems to + # cure it, but maybe using DisconnectEx() would be better. + if hasattr(self._sock, 'shutdown') and self._sock.fileno() != -1: + self._sock.shutdown(socket.SHUT_RDWR) + self._sock.close() + self._sock = None + server = self._server + if server is not None: + server._detach() + self._server = None + self._called_connection_lost = True + + def get_write_buffer_size(self): + size = self._pending_write + if self._buffer is not None: + size += len(self._buffer) + return size + + +class _ProactorReadPipeTransport(_ProactorBasePipeTransport, + transports.ReadTransport): + """Transport for read pipes.""" + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None, buffer_size=65536): + self._pending_data_length = -1 + self._paused = True + super().__init__(loop, sock, protocol, waiter, extra, server) + + self._data = bytearray(buffer_size) + self._loop.call_soon(self._loop_reading) + self._paused = False + + def is_reading(self): + return not self._paused and not self._closing + + def pause_reading(self): + if self._closing or self._paused: + return + self._paused = True + + # bpo-33694: Don't cancel self._read_fut because cancelling an + # overlapped WSASend() loss silently data with the current proactor + # implementation. + # + # If CancelIoEx() fails with ERROR_NOT_FOUND, it means that WSASend() + # completed (even if HasOverlappedIoCompleted() returns 0), but + # Overlapped.cancel() currently silently ignores the ERROR_NOT_FOUND + # error. Once the overlapped is ignored, the IOCP loop will ignores the + # completion I/O event and so not read the result of the overlapped + # WSARecv(). + + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + + self._paused = False + if self._read_fut is None: + self._loop.call_soon(self._loop_reading, None) + + length = self._pending_data_length + self._pending_data_length = -1 + if length > -1: + # Call the protocol methode after calling _loop_reading(), + # since the protocol can decide to pause reading again. + self._loop.call_soon(self._data_received, self._data[:length], length) + + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _eof_received(self): + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + try: + keep_open = self._protocol.eof_received() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.eof_received() call failed.') + return + + if not keep_open: + self.close() + + def _data_received(self, data, length): + if self._paused: + # Don't call any protocol method while reading is paused. + # The protocol will be called on resume_reading(). + assert self._pending_data_length == -1 + self._pending_data_length = length + return + + if length == 0: + self._eof_received() + return + + if isinstance(self._protocol, protocols.BufferedProtocol): + try: + protocols._feed_data_to_buffered_proto(self._protocol, data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, + 'Fatal error: protocol.buffer_updated() ' + 'call failed.') + return + else: + self._protocol.data_received(data) + + def _loop_reading(self, fut=None): + length = -1 + data = None + try: + if fut is not None: + assert self._read_fut is fut or (self._read_fut is None and + self._closing) + self._read_fut = None + if fut.done(): + # deliver data later in "finally" clause + length = fut.result() + if length == 0: + # we got end-of-file so no need to reschedule a new read + return + + data = self._data[:length] + else: + # the future will be replaced by next proactor.recv call + fut.cancel() + + if self._closing: + # since close() has been called we ignore any read data + return + + # bpo-33694: buffer_updated() has currently no fast path because of + # a data loss issue caused by overlapped WSASend() cancellation. + + if not self._paused: + # reschedule a new read + self._read_fut = self._loop._proactor.recv_into(self._sock, self._data) + except ConnectionAbortedError as exc: + if not self._closing: + self._fatal_error(exc, 'Fatal read error on pipe transport') + elif self._loop.get_debug(): + logger.debug("Read error on pipe transport while closing", + exc_info=True) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + except exceptions.CancelledError: + if not self._closing: + raise + else: + if not self._paused: + self._read_fut.add_done_callback(self._loop_reading) + finally: + if length > -1: + self._data_received(data, length) + + +class _ProactorBaseWritePipeTransport(_ProactorBasePipeTransport, + transports.WriteTransport): + """Transport for write pipes.""" + + _start_tls_compatible = True + + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self._empty_waiter = None + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError( + f"data argument must be a bytes-like object, " + f"not {type(data).__name__}") + if self._eof_written: + raise RuntimeError('write_eof() already called') + if self._empty_waiter is not None: + raise RuntimeError('unable to write; sendfile is in progress') + + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + # Observable states: + # 1. IDLE: _write_fut and _buffer both None + # 2. WRITING: _write_fut set; _buffer None + # 3. BACKED UP: _write_fut set; _buffer a bytearray + # We always copy the data, so the caller can't modify it + # while we're still waiting for the I/O to happen. + if self._write_fut is None: # IDLE -> WRITING + assert self._buffer is None + # Pass a copy, except if it's already immutable. + self._loop_writing(data=bytes(data)) + elif not self._buffer: # WRITING -> BACKED UP + # Make a mutable copy which we can extend. + self._buffer = bytearray(data) + self._maybe_pause_protocol() + else: # BACKED UP + # Append to buffer (also copies). + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _loop_writing(self, f=None, data=None): + try: + if f is not None and self._write_fut is None and self._closing: + # XXX most likely self._force_close() has been called, and + # it has set self._write_fut to None. + return + assert f is self._write_fut + self._write_fut = None + self._pending_write = 0 + if f: + f.result() + if data is None: + data = self._buffer + self._buffer = None + if not data: + if self._closing: + self._loop.call_soon(self._call_connection_lost, None) + if self._eof_written: + self._sock.shutdown(socket.SHUT_WR) + # Now that we've reduced the buffer size, tell the + # protocol to resume writing if it was paused. Note that + # we do this last since the callback is called immediately + # and it may add more data to the buffer (even causing the + # protocol to be paused again). + self._maybe_resume_protocol() + else: + self._write_fut = self._loop._proactor.send(self._sock, data) + if not self._write_fut.done(): + assert self._pending_write == 0 + self._pending_write = len(data) + self._write_fut.add_done_callback(self._loop_writing) + self._maybe_pause_protocol() + else: + self._write_fut.add_done_callback(self._loop_writing) + if self._empty_waiter is not None and self._write_fut is None: + self._empty_waiter.set_result(None) + except ConnectionResetError as exc: + self._force_close(exc) + except OSError as exc: + self._fatal_error(exc, 'Fatal write error on pipe transport') + + def can_write_eof(self): + return True + + def write_eof(self): + self.close() + + def abort(self): + self._force_close(None) + + def _make_empty_waiter(self): + if self._empty_waiter is not None: + raise RuntimeError("Empty waiter is already set") + self._empty_waiter = self._loop.create_future() + if self._write_fut is None: + self._empty_waiter.set_result(None) + return self._empty_waiter + + def _reset_empty_waiter(self): + self._empty_waiter = None + + +class _ProactorWritePipeTransport(_ProactorBaseWritePipeTransport): + def __init__(self, *args, **kw): + super().__init__(*args, **kw) + self._read_fut = self._loop._proactor.recv(self._sock, 16) + self._read_fut.add_done_callback(self._pipe_closed) + + def _pipe_closed(self, fut): + if fut.cancelled(): + # the transport has been closed + return + assert fut.result() == b'' + if self._closing: + assert self._read_fut is None + return + assert fut is self._read_fut, (fut, self._read_fut) + self._read_fut = None + if self._write_fut is not None: + self._force_close(BrokenPipeError()) + else: + self.close() + + +class _ProactorDatagramTransport(_ProactorBasePipeTransport, + transports.DatagramTransport): + max_size = 256 * 1024 + def __init__(self, loop, sock, protocol, address=None, + waiter=None, extra=None): + self._address = address + self._empty_waiter = None + # We don't need to call _protocol.connection_made() since our base + # constructor does it for us. + super().__init__(loop, sock, protocol, waiter=waiter, extra=extra) + + # The base constructor sets _buffer = None, so we set it here + self._buffer = collections.deque() + self._loop.call_soon(self._loop_reading) + + def _set_extra(self, sock): + _set_socket_extra(self, sock) + + def get_write_buffer_size(self): + return sum(len(data) for data, _ in self._buffer) + + def abort(self): + self._force_close(None) + + def sendto(self, data, addr=None): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError('data argument must be bytes-like object (%r)', + type(data)) + + if not data: + return + + if self._address is not None and addr not in (None, self._address): + raise ValueError( + f'Invalid address: must be None or {self._address}') + + if self._conn_lost and self._address: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.sendto() raised exception.') + self._conn_lost += 1 + return + + # Ensure that what we buffer is immutable. + self._buffer.append((bytes(data), addr)) + + if self._write_fut is None: + # No current write operations are active, kick one off + self._loop_writing() + # else: A write operation is already kicked off + + self._maybe_pause_protocol() + + def _loop_writing(self, fut=None): + try: + if self._conn_lost: + return + + assert fut is self._write_fut + self._write_fut = None + if fut: + # We are in a _loop_writing() done callback, get the result + fut.result() + + if not self._buffer or (self._conn_lost and self._address): + # The connection has been closed + if self._closing: + self._loop.call_soon(self._call_connection_lost, None) + return + + data, addr = self._buffer.popleft() + if self._address is not None: + self._write_fut = self._loop._proactor.send(self._sock, + data) + else: + self._write_fut = self._loop._proactor.sendto(self._sock, + data, + addr=addr) + except OSError as exc: + self._protocol.error_received(exc) + except Exception as exc: + self._fatal_error(exc, 'Fatal write error on datagram transport') + else: + self._write_fut.add_done_callback(self._loop_writing) + self._maybe_resume_protocol() + + def _loop_reading(self, fut=None): + data = None + try: + if self._conn_lost: + return + + assert self._read_fut is fut or (self._read_fut is None and + self._closing) + + self._read_fut = None + if fut is not None: + res = fut.result() + + if self._closing: + # since close() has been called we ignore any read data + data = None + return + + if self._address is not None: + data, addr = res, self._address + else: + data, addr = res + + if self._conn_lost: + return + if self._address is not None: + self._read_fut = self._loop._proactor.recv(self._sock, + self.max_size) + else: + self._read_fut = self._loop._proactor.recvfrom(self._sock, + self.max_size) + except OSError as exc: + self._protocol.error_received(exc) + except exceptions.CancelledError: + if not self._closing: + raise + else: + if self._read_fut is not None: + self._read_fut.add_done_callback(self._loop_reading) + finally: + if data: + self._protocol.datagram_received(data, addr) + + +class _ProactorDuplexPipeTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for duplex pipes.""" + + def can_write_eof(self): + return False + + def write_eof(self): + raise NotImplementedError + + +class _ProactorSocketTransport(_ProactorReadPipeTransport, + _ProactorBaseWritePipeTransport, + transports.Transport): + """Transport for connected sockets.""" + + _sendfile_compatible = constants._SendfileMode.TRY_NATIVE + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + super().__init__(loop, sock, protocol, waiter, extra, server) + base_events._set_nodelay(sock) + + def _set_extra(self, sock): + _set_socket_extra(self, sock) + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing or self._eof_written: + return + self._eof_written = True + if self._write_fut is None: + self._sock.shutdown(socket.SHUT_WR) + + +class BaseProactorEventLoop(base_events.BaseEventLoop): + + def __init__(self, proactor): + super().__init__() + logger.debug('Using proactor: %s', proactor.__class__.__name__) + self._proactor = proactor + self._selector = proactor # convenient alias + self._self_reading_future = None + self._accept_futures = {} # socket file descriptor => Future + proactor.set_loop(self) + self._make_self_pipe() + if threading.current_thread() is threading.main_thread(): + # wakeup fd can only be installed to a file descriptor from the main thread + signal.set_wakeup_fd(self._csock.fileno()) + + def _make_socket_transport(self, sock, protocol, waiter=None, + extra=None, server=None): + return _ProactorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport( + self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None, + ssl_handshake_timeout=None): + ssl_protocol = sslproto.SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + _ProactorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + return _ProactorDatagramTransport(self, sock, protocol, address, + waiter, extra) + + def _make_duplex_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorDuplexPipeTransport(self, + sock, protocol, waiter, extra) + + def _make_read_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + return _ProactorReadPipeTransport(self, sock, protocol, waiter, extra) + + def _make_write_pipe_transport(self, sock, protocol, waiter=None, + extra=None): + # We want connection_lost() to be called when other end closes + return _ProactorWritePipeTransport(self, + sock, protocol, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + + if threading.current_thread() is threading.main_thread(): + signal.set_wakeup_fd(-1) + # Call these methods before closing the event loop (before calling + # BaseEventLoop.close), because they can schedule callbacks with + # call_soon(), which is forbidden when the event loop is closed. + self._stop_accept_futures() + self._close_self_pipe() + self._proactor.close() + self._proactor = None + self._selector = None + + # Close the event loop + super().close() + + async def sock_recv(self, sock, n): + return await self._proactor.recv(sock, n) + + async def sock_recv_into(self, sock, buf): + return await self._proactor.recv_into(sock, buf) + + async def sock_sendall(self, sock, data): + return await self._proactor.send(sock, data) + + async def sock_connect(self, sock, address): + return await self._proactor.connect(sock, address) + + async def sock_accept(self, sock): + return await self._proactor.accept(sock) + + async def _sock_sendfile_native(self, sock, file, offset, count): + try: + fileno = file.fileno() + except (AttributeError, io.UnsupportedOperation) as err: + raise exceptions.SendfileNotAvailableError("not a regular file") + try: + fsize = os.fstat(fileno).st_size + except OSError: + raise exceptions.SendfileNotAvailableError("not a regular file") + blocksize = count if count else fsize + if not blocksize: + return 0 # empty file + + blocksize = min(blocksize, 0xffff_ffff) + end_pos = min(offset + count, fsize) if count else fsize + offset = min(offset, fsize) + total_sent = 0 + try: + while True: + blocksize = min(end_pos - offset, blocksize) + if blocksize <= 0: + return total_sent + await self._proactor.sendfile(sock, file, offset, blocksize) + offset += blocksize + total_sent += blocksize + finally: + if total_sent > 0: + file.seek(offset) + + async def _sendfile_native(self, transp, file, offset, count): + resume_reading = transp.is_reading() + transp.pause_reading() + await transp._make_empty_waiter() + try: + return await self.sock_sendfile(transp._sock, file, offset, count, + fallback=False) + finally: + transp._reset_empty_waiter() + if resume_reading: + transp.resume_reading() + + def _close_self_pipe(self): + if self._self_reading_future is not None: + self._self_reading_future.cancel() + self._self_reading_future = None + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = socket.socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + + def _loop_self_reading(self, f=None): + try: + if f is not None: + f.result() # may raise + if self._self_reading_future is not f: + # When we scheduled this Future, we assigned it to + # _self_reading_future. If it's not there now, something has + # tried to cancel the loop while this callback was still in the + # queue (see windows_events.ProactorEventLoop.run_forever). In + # that case stop here instead of continuing to schedule a new + # iteration. + return + f = self._proactor.recv(self._ssock, 4096) + except exceptions.CancelledError: + # _close_self_pipe() has been called, stop waiting for data + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self.call_exception_handler({ + 'message': 'Error on reading from the event loop self pipe', + 'exception': exc, + 'loop': self, + }) + else: + self._self_reading_future = f + f.add_done_callback(self._loop_self_reading) + + def _write_to_self(self): + # This may be called from a different thread, possibly after + # _close_self_pipe() has been called or even while it is + # running. Guard for self._csock being None or closed. When + # a socket is closed, send() raises OSError (with errno set to + # EBADF, but let's not rely on the exact error code). + csock = self._csock + if csock is None: + return + + try: + csock.send(b'\0') + except OSError: + if self._debug: + logger.debug("Fail to write a null byte into the " + "self-pipe socket", + exc_info=True) + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=None): + + def loop(f=None): + try: + if f is not None: + conn, addr = f.result() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + protocol = protocol_factory() + if sslcontext is not None: + self._make_ssl_transport( + conn, protocol, sslcontext, server_side=True, + extra={'peername': addr}, server=server, + ssl_handshake_timeout=ssl_handshake_timeout) + else: + self._make_socket_transport( + conn, protocol, + extra={'peername': addr}, server=server) + if self.is_closed(): + return + f = self._proactor.accept(sock) + except OSError as exc: + if sock.fileno() != -1: + self.call_exception_handler({ + 'message': 'Accept failed on a socket', + 'exception': exc, + 'socket': trsock.TransportSocket(sock), + }) + sock.close() + elif self._debug: + logger.debug("Accept failed on socket %r", + sock, exc_info=True) + except exceptions.CancelledError: + sock.close() + else: + self._accept_futures[sock.fileno()] = f + f.add_done_callback(loop) + + self.call_soon(loop) + + def _process_events(self, event_list): + # Events are processed in the IocpProactor._poll() method + pass + + def _stop_accept_futures(self): + for future in self._accept_futures.values(): + future.cancel() + self._accept_futures.clear() + + def _stop_serving(self, sock): + future = self._accept_futures.pop(sock.fileno(), None) + if future: + future.cancel() + self._proactor._stop_serving(sock) + sock.close() diff --git a/python310/asyncio/protocols.py b/python310/asyncio/protocols.py new file mode 100644 index 0000000000000000000000000000000000000000..09987b164c66e57f8bd12190899a86947cd065b6 --- /dev/null +++ b/python310/asyncio/protocols.py @@ -0,0 +1,216 @@ +"""Abstract Protocol base classes.""" + +__all__ = ( + 'BaseProtocol', 'Protocol', 'DatagramProtocol', + 'SubprocessProtocol', 'BufferedProtocol', +) + + +class BaseProtocol: + """Common base class for protocol interfaces. + + Usually user implements protocols that derived from BaseProtocol + like Protocol or ProcessProtocol. + + The only case when BaseProtocol should be implemented directly is + write-only transport like write pipe + """ + + __slots__ = () + + def connection_made(self, transport): + """Called when a connection is made. + + The argument is the transport representing the pipe connection. + To receive data, wait for data_received() calls. + When the connection is closed, connection_lost() is called. + """ + + def connection_lost(self, exc): + """Called when the connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + + def pause_writing(self): + """Called when the transport's buffer goes over the high-water mark. + + Pause and resume calls are paired -- pause_writing() is called + once when the buffer goes strictly over the high-water mark + (even if subsequent writes increases the buffer size even + more), and eventually resume_writing() is called once when the + buffer size reaches the low-water mark. + + Note that if the buffer size equals the high-water mark, + pause_writing() is not called -- it must go strictly over. + Conversely, resume_writing() is called when the buffer size is + equal or lower than the low-water mark. These end conditions + are important to ensure that things go as expected when either + mark is zero. + + NOTE: This is the only Protocol callback that is not called + through EventLoop.call_soon() -- if it were, it would have no + effect when it's most needed (when the app keeps writing + without yielding until pause_writing() is called). + """ + + def resume_writing(self): + """Called when the transport's buffer drains below the low-water mark. + + See pause_writing() for details. + """ + + +class Protocol(BaseProtocol): + """Interface for stream protocol. + + The user should implement this interface. They can inherit from + this class but don't need to. The implementations here do + nothing (they don't raise exceptions). + + When the user wants to requests a transport, they pass a protocol + factory to a utility function (e.g., EventLoop.create_connection()). + + When the connection is made successfully, connection_made() is + called with a suitable transport object. Then data_received() + will be called 0 or more times with data (bytes) received from the + transport; finally, connection_lost() will be called exactly once + with either an exception object or None as an argument. + + State machine of calls: + + start -> CM [-> DR*] [-> ER?] -> CL -> end + + * CM: connection_made() + * DR: data_received() + * ER: eof_received() + * CL: connection_lost() + """ + + __slots__ = () + + def data_received(self, data): + """Called when some data is received. + + The argument is a bytes object. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class BufferedProtocol(BaseProtocol): + """Interface for stream protocol with manual buffer control. + + Event methods, such as `create_server` and `create_connection`, + accept factories that return protocols that implement this interface. + + The idea of BufferedProtocol is that it allows to manually allocate + and control the receive buffer. Event loops can then use the buffer + provided by the protocol to avoid unnecessary data copies. This + can result in noticeable performance improvement for protocols that + receive big amounts of data. Sophisticated protocols can allocate + the buffer only once at creation time. + + State machine of calls: + + start -> CM [-> GB [-> BU?]]* [-> ER?] -> CL -> end + + * CM: connection_made() + * GB: get_buffer() + * BU: buffer_updated() + * ER: eof_received() + * CL: connection_lost() + """ + + __slots__ = () + + def get_buffer(self, sizehint): + """Called to allocate a new receive buffer. + + *sizehint* is a recommended minimal size for the returned + buffer. When set to -1, the buffer size can be arbitrary. + + Must return an object that implements the + :ref:`buffer protocol `. + It is an error to return a zero-sized buffer. + """ + + def buffer_updated(self, nbytes): + """Called when the buffer was updated with the received data. + + *nbytes* is the total number of bytes that were written to + the buffer. + """ + + def eof_received(self): + """Called when the other end calls write_eof() or equivalent. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + + +class DatagramProtocol(BaseProtocol): + """Interface for datagram protocol.""" + + __slots__ = () + + def datagram_received(self, data, addr): + """Called when some datagram is received.""" + + def error_received(self, exc): + """Called when a send or receive operation raises an OSError. + + (Other than BlockingIOError or InterruptedError.) + """ + + +class SubprocessProtocol(BaseProtocol): + """Interface for protocol for subprocess calls.""" + + __slots__ = () + + def pipe_data_received(self, fd, data): + """Called when the subprocess writes data into stdout/stderr pipe. + + fd is int file descriptor. + data is bytes object. + """ + + def pipe_connection_lost(self, fd, exc): + """Called when a file descriptor associated with the child process is + closed. + + fd is the int file descriptor that was closed. + """ + + def process_exited(self): + """Called when subprocess has exited.""" + + +def _feed_data_to_buffered_proto(proto, data): + data_len = len(data) + while data_len: + buf = proto.get_buffer(data_len) + buf_len = len(buf) + if not buf_len: + raise RuntimeError('get_buffer() returned an empty buffer') + + if buf_len >= data_len: + buf[:data_len] = data + proto.buffer_updated(data_len) + return + else: + buf[:buf_len] = data[:buf_len] + proto.buffer_updated(buf_len) + data = data[buf_len:] + data_len = len(data) diff --git a/python310/asyncio/queues.py b/python310/asyncio/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..10dd689bbb2efa66400a7048db3edb034ef2bebe --- /dev/null +++ b/python310/asyncio/queues.py @@ -0,0 +1,245 @@ +__all__ = ('Queue', 'PriorityQueue', 'LifoQueue', 'QueueFull', 'QueueEmpty') + +import collections +import heapq +from types import GenericAlias + +from . import locks +from . import mixins + + +class QueueEmpty(Exception): + """Raised when Queue.get_nowait() is called on an empty Queue.""" + pass + + +class QueueFull(Exception): + """Raised when the Queue.put_nowait() method is called on a full Queue.""" + pass + + +class Queue(mixins._LoopBoundMixin): + """A queue, useful for coordinating producer and consumer coroutines. + + If maxsize is less than or equal to zero, the queue size is infinite. If it + is an integer greater than 0, then "await put()" will block when the + queue reaches maxsize, until an item is removed by get(). + + Unlike the standard library Queue, you can reliably know this Queue's size + with qsize(), since your single-threaded asyncio application won't be + interrupted between calling qsize() and doing an operation on the Queue. + """ + + def __init__(self, maxsize=0, *, loop=mixins._marker): + super().__init__(loop=loop) + self._maxsize = maxsize + + # Futures. + self._getters = collections.deque() + # Futures. + self._putters = collections.deque() + self._unfinished_tasks = 0 + self._finished = locks.Event() + self._finished.set() + self._init(maxsize) + + # These three are overridable in subclasses. + + def _init(self, maxsize): + self._queue = collections.deque() + + def _get(self): + return self._queue.popleft() + + def _put(self, item): + self._queue.append(item) + + # End of the overridable methods. + + def _wakeup_next(self, waiters): + # Wake up the next waiter (if any) that isn't cancelled. + while waiters: + waiter = waiters.popleft() + if not waiter.done(): + waiter.set_result(None) + break + + def __repr__(self): + return f'<{type(self).__name__} at {id(self):#x} {self._format()}>' + + def __str__(self): + return f'<{type(self).__name__} {self._format()}>' + + __class_getitem__ = classmethod(GenericAlias) + + def _format(self): + result = f'maxsize={self._maxsize!r}' + if getattr(self, '_queue', None): + result += f' _queue={list(self._queue)!r}' + if self._getters: + result += f' _getters[{len(self._getters)}]' + if self._putters: + result += f' _putters[{len(self._putters)}]' + if self._unfinished_tasks: + result += f' tasks={self._unfinished_tasks}' + return result + + def qsize(self): + """Number of items in the queue.""" + return len(self._queue) + + @property + def maxsize(self): + """Number of items allowed in the queue.""" + return self._maxsize + + def empty(self): + """Return True if the queue is empty, False otherwise.""" + return not self._queue + + def full(self): + """Return True if there are maxsize items in the queue. + + Note: if the Queue was initialized with maxsize=0 (the default), + then full() is never True. + """ + if self._maxsize <= 0: + return False + else: + return self.qsize() >= self._maxsize + + async def put(self, item): + """Put an item into the queue. + + Put an item into the queue. If the queue is full, wait until a free + slot is available before adding item. + """ + while self.full(): + putter = self._get_loop().create_future() + self._putters.append(putter) + try: + await putter + except: + putter.cancel() # Just in case putter is not done yet. + try: + # Clean self._putters from canceled putters. + self._putters.remove(putter) + except ValueError: + # The putter could be removed from self._putters by a + # previous get_nowait call. + pass + if not self.full() and not putter.cancelled(): + # We were woken up by get_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._putters) + raise + return self.put_nowait(item) + + def put_nowait(self, item): + """Put an item into the queue without blocking. + + If no free slot is immediately available, raise QueueFull. + """ + if self.full(): + raise QueueFull + self._put(item) + self._unfinished_tasks += 1 + self._finished.clear() + self._wakeup_next(self._getters) + + async def get(self): + """Remove and return an item from the queue. + + If queue is empty, wait until an item is available. + """ + while self.empty(): + getter = self._get_loop().create_future() + self._getters.append(getter) + try: + await getter + except: + getter.cancel() # Just in case getter is not done yet. + try: + # Clean self._getters from canceled getters. + self._getters.remove(getter) + except ValueError: + # The getter could be removed from self._getters by a + # previous put_nowait call. + pass + if not self.empty() and not getter.cancelled(): + # We were woken up by put_nowait(), but can't take + # the call. Wake up the next in line. + self._wakeup_next(self._getters) + raise + return self.get_nowait() + + def get_nowait(self): + """Remove and return an item from the queue. + + Return an item if one is immediately available, else raise QueueEmpty. + """ + if self.empty(): + raise QueueEmpty + item = self._get() + self._wakeup_next(self._putters) + return item + + def task_done(self): + """Indicate that a formerly enqueued task is complete. + + Used by queue consumers. For each get() used to fetch a task, + a subsequent call to task_done() tells the queue that the processing + on the task is complete. + + If a join() is currently blocking, it will resume when all items have + been processed (meaning that a task_done() call was received for every + item that had been put() into the queue). + + Raises ValueError if called more times than there were items placed in + the queue. + """ + if self._unfinished_tasks <= 0: + raise ValueError('task_done() called too many times') + self._unfinished_tasks -= 1 + if self._unfinished_tasks == 0: + self._finished.set() + + async def join(self): + """Block until all items in the queue have been gotten and processed. + + The count of unfinished tasks goes up whenever an item is added to the + queue. The count goes down whenever a consumer calls task_done() to + indicate that the item was retrieved and all work on it is complete. + When the count of unfinished tasks drops to zero, join() unblocks. + """ + if self._unfinished_tasks > 0: + await self._finished.wait() + + +class PriorityQueue(Queue): + """A subclass of Queue; retrieves entries in priority order (lowest first). + + Entries are typically tuples of the form: (priority number, data). + """ + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item, heappush=heapq.heappush): + heappush(self._queue, item) + + def _get(self, heappop=heapq.heappop): + return heappop(self._queue) + + +class LifoQueue(Queue): + """A subclass of Queue that retrieves most recently added entries first.""" + + def _init(self, maxsize): + self._queue = [] + + def _put(self, item): + self._queue.append(item) + + def _get(self): + return self._queue.pop() diff --git a/python310/asyncio/runners.py b/python310/asyncio/runners.py new file mode 100644 index 0000000000000000000000000000000000000000..9a5e9a48479ef78e7d8c7a8706d56603f234f2ba --- /dev/null +++ b/python310/asyncio/runners.py @@ -0,0 +1,73 @@ +__all__ = 'run', + +from . import coroutines +from . import events +from . import tasks + + +def run(main, *, debug=None): + """Execute the coroutine and return the result. + + This function runs the passed coroutine, taking care of + managing the asyncio event loop and finalizing asynchronous + generators. + + This function cannot be called when another asyncio event loop is + running in the same thread. + + If debug is True, the event loop will be run in debug mode. + + This function always creates a new event loop and closes it at the end. + It should be used as a main entry point for asyncio programs, and should + ideally only be called once. + + Example: + + async def main(): + await asyncio.sleep(1) + print('hello') + + asyncio.run(main()) + """ + if events._get_running_loop() is not None: + raise RuntimeError( + "asyncio.run() cannot be called from a running event loop") + + if not coroutines.iscoroutine(main): + raise ValueError("a coroutine was expected, got {!r}".format(main)) + + loop = events.new_event_loop() + try: + events.set_event_loop(loop) + if debug is not None: + loop.set_debug(debug) + return loop.run_until_complete(main) + finally: + try: + _cancel_all_tasks(loop) + loop.run_until_complete(loop.shutdown_asyncgens()) + loop.run_until_complete(loop.shutdown_default_executor()) + finally: + events.set_event_loop(None) + loop.close() + + +def _cancel_all_tasks(loop): + to_cancel = tasks.all_tasks(loop) + if not to_cancel: + return + + for task in to_cancel: + task.cancel() + + loop.run_until_complete(tasks.gather(*to_cancel, return_exceptions=True)) + + for task in to_cancel: + if task.cancelled(): + continue + if task.exception() is not None: + loop.call_exception_handler({ + 'message': 'unhandled exception during asyncio.run() shutdown', + 'exception': task.exception(), + 'task': task, + }) diff --git a/python310/asyncio/selector_events.py b/python310/asyncio/selector_events.py new file mode 100644 index 0000000000000000000000000000000000000000..8282f28037a62a25b79343d8b7b955f700ed83c9 --- /dev/null +++ b/python310/asyncio/selector_events.py @@ -0,0 +1,1105 @@ +"""Event loop using a selector and related classes. + +A selector is a "notify-when-ready" multiplexer. For a subclass which +also includes support for signal handling, see the unix_events sub-module. +""" + +__all__ = 'BaseSelectorEventLoop', + +import collections +import errno +import functools +import selectors +import socket +import warnings +import weakref +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import base_events +from . import constants +from . import events +from . import futures +from . import protocols +from . import sslproto +from . import transports +from . import trsock +from .log import logger + + +def _test_selector_event(selector, fd, event): + # Test if the selector is monitoring 'event' events + # for the file descriptor 'fd'. + try: + key = selector.get_key(fd) + except KeyError: + return False + else: + return bool(key.events & event) + + +class BaseSelectorEventLoop(base_events.BaseEventLoop): + """Selector event loop. + + See events.EventLoop for API specification. + """ + + def __init__(self, selector=None): + super().__init__() + + if selector is None: + selector = selectors.DefaultSelector() + logger.debug('Using selector: %s', selector.__class__.__name__) + self._selector = selector + self._make_self_pipe() + self._transports = weakref.WeakValueDictionary() + + def _make_socket_transport(self, sock, protocol, waiter=None, *, + extra=None, server=None): + return _SelectorSocketTransport(self, sock, protocol, waiter, + extra, server) + + def _make_ssl_transport( + self, rawsock, protocol, sslcontext, waiter=None, + *, server_side=False, server_hostname=None, + extra=None, server=None, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + ssl_protocol = sslproto.SSLProtocol( + self, protocol, sslcontext, waiter, + server_side, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + _SelectorSocketTransport(self, rawsock, ssl_protocol, + extra=extra, server=server) + return ssl_protocol._app_transport + + def _make_datagram_transport(self, sock, protocol, + address=None, waiter=None, extra=None): + return _SelectorDatagramTransport(self, sock, protocol, + address, waiter, extra) + + def close(self): + if self.is_running(): + raise RuntimeError("Cannot close a running event loop") + if self.is_closed(): + return + self._close_self_pipe() + super().close() + if self._selector is not None: + self._selector.close() + self._selector = None + + def _close_self_pipe(self): + self._remove_reader(self._ssock.fileno()) + self._ssock.close() + self._ssock = None + self._csock.close() + self._csock = None + self._internal_fds -= 1 + + def _make_self_pipe(self): + # A self-socket, really. :-) + self._ssock, self._csock = socket.socketpair() + self._ssock.setblocking(False) + self._csock.setblocking(False) + self._internal_fds += 1 + self._add_reader(self._ssock.fileno(), self._read_from_self) + + def _process_self_data(self, data): + pass + + def _read_from_self(self): + while True: + try: + data = self._ssock.recv(4096) + if not data: + break + self._process_self_data(data) + except InterruptedError: + continue + except BlockingIOError: + break + + def _write_to_self(self): + # This may be called from a different thread, possibly after + # _close_self_pipe() has been called or even while it is + # running. Guard for self._csock being None or closed. When + # a socket is closed, send() raises OSError (with errno set to + # EBADF, but let's not rely on the exact error code). + csock = self._csock + if csock is None: + return + + try: + csock.send(b'\0') + except OSError: + if self._debug: + logger.debug("Fail to write a null byte into the " + "self-pipe socket", + exc_info=True) + + def _start_serving(self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + self._add_reader(sock.fileno(), self._accept_connection, + protocol_factory, sock, sslcontext, server, backlog, + ssl_handshake_timeout) + + def _accept_connection( + self, protocol_factory, sock, + sslcontext=None, server=None, backlog=100, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + # This method is only called once for each event loop tick where the + # listening socket has triggered an EVENT_READ. There may be multiple + # connections waiting for an .accept() so it is called in a loop. + # See https://bugs.python.org/issue27906 for more details. + for _ in range(backlog): + try: + conn, addr = sock.accept() + if self._debug: + logger.debug("%r got a new connection from %r: %r", + server, addr, conn) + conn.setblocking(False) + except (BlockingIOError, InterruptedError, ConnectionAbortedError): + # Early exit because the socket accept buffer is empty. + return None + except OSError as exc: + # There's nowhere to send the error, so just log it. + if exc.errno in (errno.EMFILE, errno.ENFILE, + errno.ENOBUFS, errno.ENOMEM): + # Some platforms (e.g. Linux keep reporting the FD as + # ready, so we remove the read handler temporarily. + # We'll try again in a while. + self.call_exception_handler({ + 'message': 'socket.accept() out of system resource', + 'exception': exc, + 'socket': trsock.TransportSocket(sock), + }) + self._remove_reader(sock.fileno()) + self.call_later(constants.ACCEPT_RETRY_DELAY, + self._start_serving, + protocol_factory, sock, sslcontext, server, + backlog, ssl_handshake_timeout) + else: + raise # The event loop will catch, log and ignore it. + else: + extra = {'peername': addr} + accept = self._accept_connection2( + protocol_factory, conn, extra, sslcontext, server, + ssl_handshake_timeout) + self.create_task(accept) + + async def _accept_connection2( + self, protocol_factory, conn, extra, + sslcontext=None, server=None, + ssl_handshake_timeout=constants.SSL_HANDSHAKE_TIMEOUT): + protocol = None + transport = None + try: + protocol = protocol_factory() + waiter = self.create_future() + if sslcontext: + transport = self._make_ssl_transport( + conn, protocol, sslcontext, waiter=waiter, + server_side=True, extra=extra, server=server, + ssl_handshake_timeout=ssl_handshake_timeout) + else: + transport = self._make_socket_transport( + conn, protocol, waiter=waiter, extra=extra, + server=server) + + try: + await waiter + except BaseException: + transport.close() + raise + # It's now up to the protocol to handle the connection. + + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if self._debug: + context = { + 'message': + 'Error on transport creation for incoming connection', + 'exception': exc, + } + if protocol is not None: + context['protocol'] = protocol + if transport is not None: + context['transport'] = transport + self.call_exception_handler(context) + + def _ensure_fd_no_transport(self, fd): + fileno = fd + if not isinstance(fileno, int): + try: + fileno = int(fileno.fileno()) + except (AttributeError, TypeError, ValueError): + # This code matches selectors._fileobj_to_fd function. + raise ValueError(f"Invalid file object: {fd!r}") from None + try: + transport = self._transports[fileno] + except KeyError: + pass + else: + if not transport.is_closing(): + raise RuntimeError( + f'File descriptor {fd!r} is used by transport ' + f'{transport!r}') + + def _add_reader(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self, None) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_READ, + (handle, None)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_READ, + (handle, writer)) + if reader is not None: + reader.cancel() + return handle + + def _remove_reader(self, fd): + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + mask &= ~selectors.EVENT_READ + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (None, writer)) + + if reader is not None: + reader.cancel() + return True + else: + return False + + def _add_writer(self, fd, callback, *args): + self._check_closed() + handle = events.Handle(callback, args, self, None) + try: + key = self._selector.get_key(fd) + except KeyError: + self._selector.register(fd, selectors.EVENT_WRITE, + (None, handle)) + else: + mask, (reader, writer) = key.events, key.data + self._selector.modify(fd, mask | selectors.EVENT_WRITE, + (reader, handle)) + if writer is not None: + writer.cancel() + return handle + + def _remove_writer(self, fd): + """Remove a writer callback.""" + if self.is_closed(): + return False + try: + key = self._selector.get_key(fd) + except KeyError: + return False + else: + mask, (reader, writer) = key.events, key.data + # Remove both writer and connector. + mask &= ~selectors.EVENT_WRITE + if not mask: + self._selector.unregister(fd) + else: + self._selector.modify(fd, mask, (reader, None)) + + if writer is not None: + writer.cancel() + return True + else: + return False + + def add_reader(self, fd, callback, *args): + """Add a reader callback.""" + self._ensure_fd_no_transport(fd) + self._add_reader(fd, callback, *args) + + def remove_reader(self, fd): + """Remove a reader callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_reader(fd) + + def add_writer(self, fd, callback, *args): + """Add a writer callback..""" + self._ensure_fd_no_transport(fd) + self._add_writer(fd, callback, *args) + + def remove_writer(self, fd): + """Remove a writer callback.""" + self._ensure_fd_no_transport(fd) + return self._remove_writer(fd) + + async def sock_recv(self, sock, n): + """Receive data from the socket. + + The return value is a bytes object representing the data received. + The maximum amount of data to be received at once is specified by + nbytes. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + return sock.recv(n) + except (BlockingIOError, InterruptedError): + pass + fut = self.create_future() + fd = sock.fileno() + self._ensure_fd_no_transport(fd) + handle = self._add_reader(fd, self._sock_recv, fut, sock, n) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd, handle=handle)) + return await fut + + def _sock_read_done(self, fd, fut, handle=None): + if handle is None or not handle.cancelled(): + self.remove_reader(fd) + + def _sock_recv(self, fut, sock, n): + # _sock_recv() can add itself as an I/O callback if the operation can't + # be done immediately. Don't use it directly, call sock_recv(). + if fut.done(): + return + try: + data = sock.recv(n) + except (BlockingIOError, InterruptedError): + return # try again next time + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(data) + + async def sock_recv_into(self, sock, buf): + """Receive data from the socket. + + The received data is written into *buf* (a writable buffer). + The return value is the number of bytes written. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + return sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + pass + fut = self.create_future() + fd = sock.fileno() + self._ensure_fd_no_transport(fd) + handle = self._add_reader(fd, self._sock_recv_into, fut, sock, buf) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd, handle=handle)) + return await fut + + def _sock_recv_into(self, fut, sock, buf): + # _sock_recv_into() can add itself as an I/O callback if the operation + # can't be done immediately. Don't use it directly, call + # sock_recv_into(). + if fut.done(): + return + try: + nbytes = sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return # try again next time + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(nbytes) + + async def sock_sendall(self, sock, data): + """Send data to the socket. + + The socket must be connected to a remote socket. This method continues + to send data from data until either all data has been sent or an + error occurs. None is returned on success. On error, an exception is + raised, and there is no way to determine how much data, if any, was + successfully processed by the receiving end of the connection. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + try: + n = sock.send(data) + except (BlockingIOError, InterruptedError): + n = 0 + + if n == len(data): + # all data sent + return + + fut = self.create_future() + fd = sock.fileno() + self._ensure_fd_no_transport(fd) + # use a trick with a list in closure to store a mutable state + handle = self._add_writer(fd, self._sock_sendall, fut, sock, + memoryview(data), [n]) + fut.add_done_callback( + functools.partial(self._sock_write_done, fd, handle=handle)) + return await fut + + def _sock_sendall(self, fut, sock, view, pos): + if fut.done(): + # Future cancellation can be scheduled on previous loop iteration + return + start = pos[0] + try: + n = sock.send(view[start:]) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + return + + start += n + + if start == len(view): + fut.set_result(None) + else: + pos[0] = start + + async def sock_connect(self, sock, address): + """Connect to a remote socket at address. + + This method is a coroutine. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + + if sock.family == socket.AF_INET or ( + base_events._HAS_IPv6 and sock.family == socket.AF_INET6): + resolved = await self._ensure_resolved( + address, family=sock.family, type=sock.type, proto=sock.proto, + loop=self, + ) + _, _, _, _, address = resolved[0] + + fut = self.create_future() + self._sock_connect(fut, sock, address) + try: + return await fut + finally: + # Needed to break cycles when an exception occurs. + fut = None + + def _sock_connect(self, fut, sock, address): + fd = sock.fileno() + try: + sock.connect(address) + except (BlockingIOError, InterruptedError): + # Issue #23618: When the C function connect() fails with EINTR, the + # connection runs in background. We have to wait until the socket + # becomes writable to be notified when the connection succeed or + # fails. + self._ensure_fd_no_transport(fd) + handle = self._add_writer( + fd, self._sock_connect_cb, fut, sock, address) + fut.add_done_callback( + functools.partial(self._sock_write_done, fd, handle=handle)) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + finally: + fut = None + + def _sock_write_done(self, fd, fut, handle=None): + if handle is None or not handle.cancelled(): + self.remove_writer(fd) + + def _sock_connect_cb(self, fut, sock, address): + if fut.done(): + return + + try: + err = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # Jump to any except clause below. + raise OSError(err, f'Connect call failed {address}') + except (BlockingIOError, InterruptedError): + # socket is still registered, the callback will be retried later + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result(None) + finally: + fut = None + + async def sock_accept(self, sock): + """Accept a connection. + + The socket must be bound to an address and listening for connections. + The return value is a pair (conn, address) where conn is a new socket + object usable to send and receive data on the connection, and address + is the address bound to the socket on the other end of the connection. + """ + base_events._check_ssl_socket(sock) + if self._debug and sock.gettimeout() != 0: + raise ValueError("the socket must be non-blocking") + fut = self.create_future() + self._sock_accept(fut, sock) + return await fut + + def _sock_accept(self, fut, sock): + fd = sock.fileno() + try: + conn, address = sock.accept() + conn.setblocking(False) + except (BlockingIOError, InterruptedError): + self._ensure_fd_no_transport(fd) + handle = self._add_reader(fd, self._sock_accept, fut, sock) + fut.add_done_callback( + functools.partial(self._sock_read_done, fd, handle=handle)) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + fut.set_exception(exc) + else: + fut.set_result((conn, address)) + + async def _sendfile_native(self, transp, file, offset, count): + del self._transports[transp._sock_fd] + resume_reading = transp.is_reading() + transp.pause_reading() + await transp._make_empty_waiter() + try: + return await self.sock_sendfile(transp._sock, file, offset, count, + fallback=False) + finally: + transp._reset_empty_waiter() + if resume_reading: + transp.resume_reading() + self._transports[transp._sock_fd] = transp + + def _process_events(self, event_list): + for key, mask in event_list: + fileobj, (reader, writer) = key.fileobj, key.data + if mask & selectors.EVENT_READ and reader is not None: + if reader._cancelled: + self._remove_reader(fileobj) + else: + self._add_callback(reader) + if mask & selectors.EVENT_WRITE and writer is not None: + if writer._cancelled: + self._remove_writer(fileobj) + else: + self._add_callback(writer) + + def _stop_serving(self, sock): + self._remove_reader(sock.fileno()) + sock.close() + + +class _SelectorTransport(transports._FlowControlMixin, + transports.Transport): + + max_size = 256 * 1024 # Buffer size passed to recv(). + + _buffer_factory = bytearray # Constructs initial value for self._buffer. + + # Attribute used in the destructor: it must be set even if the constructor + # is not called (see _SelectorSslTransport which may start by raising an + # exception) + _sock = None + + def __init__(self, loop, sock, protocol, extra=None, server=None): + super().__init__(extra, loop) + self._extra['socket'] = trsock.TransportSocket(sock) + try: + self._extra['sockname'] = sock.getsockname() + except OSError: + self._extra['sockname'] = None + if 'peername' not in self._extra: + try: + self._extra['peername'] = sock.getpeername() + except socket.error: + self._extra['peername'] = None + self._sock = sock + self._sock_fd = sock.fileno() + + self._protocol_connected = False + self.set_protocol(protocol) + + self._server = server + self._buffer = self._buffer_factory() + self._conn_lost = 0 # Set when call to connection_lost scheduled. + self._closing = False # Set when close() called. + if self._server is not None: + self._server._attach() + loop._transports[self._sock_fd] = self + + def __repr__(self): + info = [self.__class__.__name__] + if self._sock is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append(f'fd={self._sock_fd}') + # test if the transport was closed + if self._loop is not None and not self._loop.is_closed(): + polling = _test_selector_event(self._loop._selector, + self._sock_fd, selectors.EVENT_READ) + if polling: + info.append('read=polling') + else: + info.append('read=idle') + + polling = _test_selector_event(self._loop._selector, + self._sock_fd, + selectors.EVENT_WRITE) + if polling: + state = 'polling' + else: + state = 'idle' + + bufsize = self.get_write_buffer_size() + info.append(f'write=<{state}, bufsize={bufsize}>') + return '<{}>'.format(' '.join(info)) + + def abort(self): + self._force_close(None) + + def set_protocol(self, protocol): + self._protocol = protocol + self._protocol_connected = True + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._closing: + return + self._closing = True + self._loop._remove_reader(self._sock_fd) + if not self._buffer: + self._conn_lost += 1 + self._loop._remove_writer(self._sock_fd) + self._loop.call_soon(self._call_connection_lost, None) + + def __del__(self, _warn=warnings.warn): + if self._sock is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._sock.close() + + def _fatal_error(self, exc, message='Fatal error on transport'): + # Should be called from exception handler only. + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._force_close(exc) + + def _force_close(self, exc): + if self._conn_lost: + return + if self._buffer: + self._buffer.clear() + self._loop._remove_writer(self._sock_fd) + if not self._closing: + self._closing = True + self._loop._remove_reader(self._sock_fd) + self._conn_lost += 1 + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + if self._protocol_connected: + self._protocol.connection_lost(exc) + finally: + self._sock.close() + self._sock = None + self._protocol = None + self._loop = None + server = self._server + if server is not None: + server._detach() + self._server = None + + def get_write_buffer_size(self): + return len(self._buffer) + + def _add_reader(self, fd, callback, *args): + if self._closing: + return + + self._loop._add_reader(fd, callback, *args) + + +class _SelectorSocketTransport(_SelectorTransport): + + _start_tls_compatible = True + _sendfile_compatible = constants._SendfileMode.TRY_NATIVE + + def __init__(self, loop, sock, protocol, waiter=None, + extra=None, server=None): + + self._read_ready_cb = None + super().__init__(loop, sock, protocol, extra, server) + self._eof = False + self._paused = False + self._empty_waiter = None + + # Disable the Nagle algorithm -- small writes will be + # sent without waiting for the TCP ACK. This generally + # decreases the latency (in some cases significantly.) + base_events._set_nodelay(self._sock) + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def set_protocol(self, protocol): + if isinstance(protocol, protocols.BufferedProtocol): + self._read_ready_cb = self._read_ready__get_buffer + else: + self._read_ready_cb = self._read_ready__data_received + + super().set_protocol(protocol) + + def is_reading(self): + return not self._paused and not self._closing + + def pause_reading(self): + if self._closing or self._paused: + return + self._paused = True + self._loop._remove_reader(self._sock_fd) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + self._paused = False + self._add_reader(self._sock_fd, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def _read_ready(self): + self._read_ready_cb() + + def _read_ready__get_buffer(self): + if self._conn_lost: + return + + try: + buf = self._protocol.get_buffer(-1) + if not len(buf): + raise RuntimeError('get_buffer() returned an empty buffer') + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.get_buffer() call failed.') + return + + try: + nbytes = self._sock.recv_into(buf) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not nbytes: + self._read_ready__on_eof() + return + + try: + self._protocol.buffer_updated(nbytes) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.buffer_updated() call failed.') + + def _read_ready__data_received(self): + if self._conn_lost: + return + try: + data = self._sock.recv(self.max_size) + except (BlockingIOError, InterruptedError): + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on socket transport') + return + + if not data: + self._read_ready__on_eof() + return + + try: + self._protocol.data_received(data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.data_received() call failed.') + + def _read_ready__on_eof(self): + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + try: + keep_open = self._protocol.eof_received() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal error: protocol.eof_received() call failed.') + return + + if keep_open: + # We're keeping the connection open so the + # protocol can write more, but we still can't + # receive more, so remove the reader callback. + self._loop._remove_reader(self._sock_fd) + else: + self.close() + + def write(self, data): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f'data argument must be a bytes-like object, ' + f'not {type(data).__name__!r}') + if self._eof: + raise RuntimeError('Cannot call write() after write_eof()') + if self._empty_waiter is not None: + raise RuntimeError('unable to write; sendfile is in progress') + if not data: + return + + if self._conn_lost: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Optimization: try to send now. + try: + n = self._sock.send(data) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal write error on socket transport') + return + else: + data = data[n:] + if not data: + return + # Not all was written; register write handler. + self._loop._add_writer(self._sock_fd, self._write_ready) + + # Add it to the buffer. + self._buffer.extend(data) + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + if self._conn_lost: + return + try: + n = self._sock.send(self._buffer) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop._remove_writer(self._sock_fd) + self._buffer.clear() + self._fatal_error(exc, 'Fatal write error on socket transport') + if self._empty_waiter is not None: + self._empty_waiter.set_exception(exc) + else: + if n: + del self._buffer[:n] + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._empty_waiter is not None: + self._empty_waiter.set_result(None) + if self._closing: + self._call_connection_lost(None) + elif self._eof: + self._sock.shutdown(socket.SHUT_WR) + + def write_eof(self): + if self._closing or self._eof: + return + self._eof = True + if not self._buffer: + self._sock.shutdown(socket.SHUT_WR) + + def can_write_eof(self): + return True + + def _call_connection_lost(self, exc): + super()._call_connection_lost(exc) + if self._empty_waiter is not None: + self._empty_waiter.set_exception( + ConnectionError("Connection is closed by peer")) + + def _make_empty_waiter(self): + if self._empty_waiter is not None: + raise RuntimeError("Empty waiter is already set") + self._empty_waiter = self._loop.create_future() + if not self._buffer: + self._empty_waiter.set_result(None) + return self._empty_waiter + + def _reset_empty_waiter(self): + self._empty_waiter = None + + +class _SelectorDatagramTransport(_SelectorTransport): + + _buffer_factory = collections.deque + + def __init__(self, loop, sock, protocol, address=None, + waiter=None, extra=None): + super().__init__(loop, sock, protocol, extra) + self._address = address + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._add_reader, + self._sock_fd, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def get_write_buffer_size(self): + return sum(len(data) for data, _ in self._buffer) + + def _read_ready(self): + if self._conn_lost: + return + try: + data, addr = self._sock.recvfrom(self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._protocol.error_received(exc) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error(exc, 'Fatal read error on datagram transport') + else: + self._protocol.datagram_received(data, addr) + + def sendto(self, data, addr=None): + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f'data argument must be a bytes-like object, ' + f'not {type(data).__name__!r}') + if not data: + return + + if self._address: + if addr not in (None, self._address): + raise ValueError( + f'Invalid address: must be None or {self._address}') + addr = self._address + + if self._conn_lost and self._address: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('socket.send() raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + if self._extra['peername']: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + return + except (BlockingIOError, InterruptedError): + self._loop._add_writer(self._sock_fd, self._sendto_ready) + except OSError as exc: + self._protocol.error_received(exc) + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal write error on datagram transport') + return + + # Ensure that what we buffer is immutable. + self._buffer.append((bytes(data), addr)) + self._maybe_pause_protocol() + + def _sendto_ready(self): + while self._buffer: + data, addr = self._buffer.popleft() + try: + if self._extra['peername']: + self._sock.send(data) + else: + self._sock.sendto(data, addr) + except (BlockingIOError, InterruptedError): + self._buffer.appendleft((data, addr)) # Try again later. + break + except OSError as exc: + self._protocol.error_received(exc) + return + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._fatal_error( + exc, 'Fatal write error on datagram transport') + return + + self._maybe_resume_protocol() # May append to buffer. + if not self._buffer: + self._loop._remove_writer(self._sock_fd) + if self._closing: + self._call_connection_lost(None) diff --git a/python310/asyncio/sslproto.py b/python310/asyncio/sslproto.py new file mode 100644 index 0000000000000000000000000000000000000000..00fc16c014c9b2d666e81d7085860df8a62dd67c --- /dev/null +++ b/python310/asyncio/sslproto.py @@ -0,0 +1,739 @@ +import collections +import warnings +try: + import ssl +except ImportError: # pragma: no cover + ssl = None + +from . import constants +from . import protocols +from . import transports +from .log import logger + + +def _create_transport_context(server_side, server_hostname): + if server_side: + raise ValueError('Server side SSL needs a valid SSLContext') + + # Client side may pass ssl=True to use a default + # context; in that case the sslcontext passed is None. + # The default is secure for client connections. + # Python 3.4+: use up-to-date strong settings. + sslcontext = ssl.create_default_context() + if not server_hostname: + sslcontext.check_hostname = False + return sslcontext + + +# States of an _SSLPipe. +_UNWRAPPED = "UNWRAPPED" +_DO_HANDSHAKE = "DO_HANDSHAKE" +_WRAPPED = "WRAPPED" +_SHUTDOWN = "SHUTDOWN" + + +class _SSLPipe(object): + """An SSL "Pipe". + + An SSL pipe allows you to communicate with an SSL/TLS protocol instance + through memory buffers. It can be used to implement a security layer for an + existing connection where you don't have access to the connection's file + descriptor, or for some reason you don't want to use it. + + An SSL pipe can be in "wrapped" and "unwrapped" mode. In unwrapped mode, + data is passed through untransformed. In wrapped mode, application level + data is encrypted to SSL record level data and vice versa. The SSL record + level is the lowest level in the SSL protocol suite and is what travels + as-is over the wire. + + An SslPipe initially is in "unwrapped" mode. To start SSL, call + do_handshake(). To shutdown SSL again, call unwrap(). + """ + + max_size = 256 * 1024 # Buffer size passed to read() + + def __init__(self, context, server_side, server_hostname=None): + """ + The *context* argument specifies the ssl.SSLContext to use. + + The *server_side* argument indicates whether this is a server side or + client side transport. + + The optional *server_hostname* argument can be used to specify the + hostname you are connecting to. You may only specify this parameter if + the _ssl module supports Server Name Indication (SNI). + """ + self._context = context + self._server_side = server_side + self._server_hostname = server_hostname + self._state = _UNWRAPPED + self._incoming = ssl.MemoryBIO() + self._outgoing = ssl.MemoryBIO() + self._sslobj = None + self._need_ssldata = False + self._handshake_cb = None + self._shutdown_cb = None + + @property + def context(self): + """The SSL context passed to the constructor.""" + return self._context + + @property + def ssl_object(self): + """The internal ssl.SSLObject instance. + + Return None if the pipe is not wrapped. + """ + return self._sslobj + + @property + def need_ssldata(self): + """Whether more record level data is needed to complete a handshake + that is currently in progress.""" + return self._need_ssldata + + @property + def wrapped(self): + """ + Whether a security layer is currently in effect. + + Return False during handshake. + """ + return self._state == _WRAPPED + + def do_handshake(self, callback=None): + """Start the SSL handshake. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the handshake is complete. The callback will be + called with None if successful, else an exception instance. + """ + if self._state != _UNWRAPPED: + raise RuntimeError('handshake in progress or completed') + self._sslobj = self._context.wrap_bio( + self._incoming, self._outgoing, + server_side=self._server_side, + server_hostname=self._server_hostname) + self._state = _DO_HANDSHAKE + self._handshake_cb = callback + ssldata, appdata = self.feed_ssldata(b'', only_handshake=True) + assert len(appdata) == 0 + return ssldata + + def shutdown(self, callback=None): + """Start the SSL shutdown sequence. + + Return a list of ssldata. A ssldata element is a list of buffers + + The optional *callback* argument can be used to install a callback that + will be called when the shutdown is complete. The callback will be + called without arguments. + """ + if self._state == _UNWRAPPED: + raise RuntimeError('no security layer present') + if self._state == _SHUTDOWN: + raise RuntimeError('shutdown in progress') + assert self._state in (_WRAPPED, _DO_HANDSHAKE) + self._state = _SHUTDOWN + self._shutdown_cb = callback + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + return ssldata + + def feed_eof(self): + """Send a potentially "ragged" EOF. + + This method will raise an SSL_ERROR_EOF exception if the EOF is + unexpected. + """ + self._incoming.write_eof() + ssldata, appdata = self.feed_ssldata(b'') + assert appdata == [] or appdata == [b''] + + def feed_ssldata(self, data, only_handshake=False): + """Feed SSL record level data into the pipe. + + The data must be a bytes instance. It is OK to send an empty bytes + instance. This can be used to get ssldata for a handshake initiated by + this endpoint. + + Return a (ssldata, appdata) tuple. The ssldata element is a list of + buffers containing SSL data that needs to be sent to the remote SSL. + + The appdata element is a list of buffers containing plaintext data that + needs to be forwarded to the application. The appdata list may contain + an empty buffer indicating an SSL "close_notify" alert. This alert must + be acknowledged by calling shutdown(). + """ + if self._state == _UNWRAPPED: + # If unwrapped, pass plaintext data straight through. + if data: + appdata = [data] + else: + appdata = [] + return ([], appdata) + + self._need_ssldata = False + if data: + self._incoming.write(data) + + ssldata = [] + appdata = [] + try: + if self._state == _DO_HANDSHAKE: + # Call do_handshake() until it doesn't raise anymore. + self._sslobj.do_handshake() + self._state = _WRAPPED + if self._handshake_cb: + self._handshake_cb(None) + if only_handshake: + return (ssldata, appdata) + # Handshake done: execute the wrapped block + + if self._state == _WRAPPED: + # Main state: read data from SSL until close_notify + while True: + chunk = self._sslobj.read(self.max_size) + appdata.append(chunk) + if not chunk: # close_notify + break + + elif self._state == _SHUTDOWN: + # Call shutdown() until it doesn't raise anymore. + self._sslobj.unwrap() + self._sslobj = None + self._state = _UNWRAPPED + if self._shutdown_cb: + self._shutdown_cb() + + elif self._state == _UNWRAPPED: + # Drain possible plaintext data after close_notify. + appdata.append(self._incoming.read()) + except (ssl.SSLError, ssl.CertificateError) as exc: + exc_errno = getattr(exc, 'errno', None) + if exc_errno not in ( + ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + if self._state == _DO_HANDSHAKE and self._handshake_cb: + self._handshake_cb(exc) + raise + self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ) + + # Check for record level data that needs to be sent back. + # Happens for the initial handshake and renegotiations. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + return (ssldata, appdata) + + def feed_appdata(self, data, offset=0): + """Feed plaintext data into the pipe. + + Return an (ssldata, offset) tuple. The ssldata element is a list of + buffers containing record level data that needs to be sent to the + remote SSL instance. The offset is the number of plaintext bytes that + were processed, which may be less than the length of data. + + NOTE: In case of short writes, this call MUST be retried with the SAME + buffer passed into the *data* argument (i.e. the id() must be the + same). This is an OpenSSL requirement. A further particularity is that + a short write will always have offset == 0, because the _ssl module + does not enable partial writes. And even though the offset is zero, + there will still be encrypted data in ssldata. + """ + assert 0 <= offset <= len(data) + if self._state == _UNWRAPPED: + # pass through data in unwrapped mode + if offset < len(data): + ssldata = [data[offset:]] + else: + ssldata = [] + return (ssldata, len(data)) + + ssldata = [] + view = memoryview(data) + while True: + self._need_ssldata = False + try: + if offset < len(view): + offset += self._sslobj.write(view[offset:]) + except ssl.SSLError as exc: + # It is not allowed to call write() after unwrap() until the + # close_notify is acknowledged. We return the condition to the + # caller as a short write. + exc_errno = getattr(exc, 'errno', None) + if exc.reason == 'PROTOCOL_IS_SHUTDOWN': + exc_errno = exc.errno = ssl.SSL_ERROR_WANT_READ + if exc_errno not in (ssl.SSL_ERROR_WANT_READ, + ssl.SSL_ERROR_WANT_WRITE, + ssl.SSL_ERROR_SYSCALL): + raise + self._need_ssldata = (exc_errno == ssl.SSL_ERROR_WANT_READ) + + # See if there's any record level data back for us. + if self._outgoing.pending: + ssldata.append(self._outgoing.read()) + if offset == len(view) or self._need_ssldata: + break + return (ssldata, offset) + + +class _SSLProtocolTransport(transports._FlowControlMixin, + transports.Transport): + + _sendfile_compatible = constants._SendfileMode.FALLBACK + + def __init__(self, loop, ssl_protocol): + self._loop = loop + # SSLProtocol instance + self._ssl_protocol = ssl_protocol + self._closed = False + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._ssl_protocol._get_extra_info(name, default) + + def set_protocol(self, protocol): + self._ssl_protocol._set_app_protocol(protocol) + + def get_protocol(self): + return self._ssl_protocol._app_protocol + + def is_closing(self): + return self._closed + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) called + with None as its argument. + """ + self._closed = True + self._ssl_protocol._start_shutdown() + + def __del__(self, _warn=warnings.warn): + if not self._closed: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self.close() + + def is_reading(self): + tr = self._ssl_protocol._transport + if tr is None: + raise RuntimeError('SSL transport has not been initialized yet') + return tr.is_reading() + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + self._ssl_protocol._transport.pause_reading() + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + self._ssl_protocol._transport.resume_reading() + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + self._ssl_protocol._transport.set_write_buffer_limits(high, low) + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + return self._ssl_protocol._transport.get_write_buffer_size() + + def get_write_buffer_limits(self): + """Get the high and low watermarks for write flow control. + Return a tuple (low, high) where low and high are + positive number of bytes.""" + return self._ssl_protocol._transport.get_write_buffer_limits() + + @property + def _protocol_paused(self): + # Required for sendfile fallback pause_writing/resume_writing logic + return self._ssl_protocol._transport._protocol_paused + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + if not isinstance(data, (bytes, bytearray, memoryview)): + raise TypeError(f"data: expecting a bytes-like instance, " + f"got {type(data).__name__}") + if not data: + return + self._ssl_protocol._write_appdata(data) + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + return False + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + self._ssl_protocol._abort() + self._closed = True + + +class SSLProtocol(protocols.Protocol): + """SSL protocol. + + Implementation of SSL on top of a socket using incoming and outgoing + buffers which are ssl.MemoryBIO objects. + """ + + def __init__(self, loop, app_protocol, sslcontext, waiter, + server_side=False, server_hostname=None, + call_connection_made=True, + ssl_handshake_timeout=None): + if ssl is None: + raise RuntimeError('stdlib ssl module not available') + + if ssl_handshake_timeout is None: + ssl_handshake_timeout = constants.SSL_HANDSHAKE_TIMEOUT + elif ssl_handshake_timeout <= 0: + raise ValueError( + f"ssl_handshake_timeout should be a positive number, " + f"got {ssl_handshake_timeout}") + + if not sslcontext: + sslcontext = _create_transport_context( + server_side, server_hostname) + + self._server_side = server_side + if server_hostname and not server_side: + self._server_hostname = server_hostname + else: + self._server_hostname = None + self._sslcontext = sslcontext + # SSL-specific extra info. More info are set when the handshake + # completes. + self._extra = dict(sslcontext=sslcontext) + + # App data write buffering + self._write_backlog = collections.deque() + self._write_buffer_size = 0 + + self._waiter = waiter + self._loop = loop + self._set_app_protocol(app_protocol) + self._app_transport = _SSLProtocolTransport(self._loop, self) + # _SSLPipe instance (None until the connection is made) + self._sslpipe = None + self._session_established = False + self._in_handshake = False + self._in_shutdown = False + # transport, ex: SelectorSocketTransport + self._transport = None + self._call_connection_made = call_connection_made + self._ssl_handshake_timeout = ssl_handshake_timeout + + def _set_app_protocol(self, app_protocol): + self._app_protocol = app_protocol + self._app_protocol_is_buffer = \ + isinstance(app_protocol, protocols.BufferedProtocol) + + def _wakeup_waiter(self, exc=None): + if self._waiter is None: + return + if not self._waiter.cancelled(): + if exc is not None: + self._waiter.set_exception(exc) + else: + self._waiter.set_result(None) + self._waiter = None + + def connection_made(self, transport): + """Called when the low-level connection is made. + + Start the SSL handshake. + """ + self._transport = transport + self._sslpipe = _SSLPipe(self._sslcontext, + self._server_side, + self._server_hostname) + self._start_handshake() + + def connection_lost(self, exc): + """Called when the low-level connection is lost or closed. + + The argument is an exception object or None (the latter + meaning a regular EOF is received or the connection was + aborted or closed). + """ + if self._session_established: + self._session_established = False + self._loop.call_soon(self._app_protocol.connection_lost, exc) + else: + # Most likely an exception occurred while in SSL handshake. + # Just mark the app transport as closed so that its __del__ + # doesn't complain. + if self._app_transport is not None: + self._app_transport._closed = True + self._transport = None + self._app_transport = None + if getattr(self, '_handshake_timeout_handle', None): + self._handshake_timeout_handle.cancel() + self._wakeup_waiter(exc) + self._app_protocol = None + self._sslpipe = None + + def pause_writing(self): + """Called when the low-level transport's buffer goes over + the high-water mark. + """ + self._app_protocol.pause_writing() + + def resume_writing(self): + """Called when the low-level transport's buffer drains below + the low-water mark. + """ + self._app_protocol.resume_writing() + + def data_received(self, data): + """Called when some SSL data is received. + + The argument is a bytes object. + """ + if self._sslpipe is None: + # transport closing, sslpipe is destroyed + return + + try: + ssldata, appdata = self._sslpipe.feed_ssldata(data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as e: + self._fatal_error(e, 'SSL error in data received') + return + + for chunk in ssldata: + self._transport.write(chunk) + + for chunk in appdata: + if chunk: + try: + if self._app_protocol_is_buffer: + protocols._feed_data_to_buffered_proto( + self._app_protocol, chunk) + else: + self._app_protocol.data_received(chunk) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as ex: + self._fatal_error( + ex, 'application protocol failed to receive SSL data') + return + else: + self._start_shutdown() + break + + def eof_received(self): + """Called when the other end of the low-level stream + is half-closed. + + If this returns a false value (including None), the transport + will close itself. If it returns a true value, closing the + transport is up to the protocol. + """ + try: + if self._loop.get_debug(): + logger.debug("%r received EOF", self) + + self._wakeup_waiter(ConnectionResetError) + + if not self._in_handshake: + keep_open = self._app_protocol.eof_received() + if keep_open: + logger.warning('returning true from eof_received() ' + 'has no effect when using ssl') + finally: + self._transport.close() + + def _get_extra_info(self, name, default=None): + if name in self._extra: + return self._extra[name] + elif self._transport is not None: + return self._transport.get_extra_info(name, default) + else: + return default + + def _start_shutdown(self): + if self._in_shutdown: + return + if self._in_handshake: + self._abort() + else: + self._in_shutdown = True + self._write_appdata(b'') + + def _write_appdata(self, data): + self._write_backlog.append((data, 0)) + self._write_buffer_size += len(data) + self._process_write_backlog() + + def _start_handshake(self): + if self._loop.get_debug(): + logger.debug("%r starts SSL handshake", self) + self._handshake_start_time = self._loop.time() + else: + self._handshake_start_time = None + self._in_handshake = True + # (b'', 1) is a special value in _process_write_backlog() to do + # the SSL handshake + self._write_backlog.append((b'', 1)) + self._handshake_timeout_handle = \ + self._loop.call_later(self._ssl_handshake_timeout, + self._check_handshake_timeout) + self._process_write_backlog() + + def _check_handshake_timeout(self): + if self._in_handshake is True: + msg = ( + f"SSL handshake is taking longer than " + f"{self._ssl_handshake_timeout} seconds: " + f"aborting the connection" + ) + self._fatal_error(ConnectionAbortedError(msg)) + + def _on_handshake_complete(self, handshake_exc): + self._in_handshake = False + self._handshake_timeout_handle.cancel() + + sslobj = self._sslpipe.ssl_object + try: + if handshake_exc is not None: + raise handshake_exc + + peercert = sslobj.getpeercert() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if isinstance(exc, ssl.CertificateError): + msg = 'SSL handshake failed on verifying the certificate' + else: + msg = 'SSL handshake failed' + self._fatal_error(exc, msg) + return + + if self._loop.get_debug(): + dt = self._loop.time() - self._handshake_start_time + logger.debug("%r: SSL handshake took %.1f ms", self, dt * 1e3) + + # Add extra info that becomes available after handshake. + self._extra.update(peercert=peercert, + cipher=sslobj.cipher(), + compression=sslobj.compression(), + ssl_object=sslobj, + ) + if self._call_connection_made: + self._app_protocol.connection_made(self._app_transport) + self._wakeup_waiter() + self._session_established = True + # In case transport.write() was already called. Don't call + # immediately _process_write_backlog(), but schedule it: + # _on_handshake_complete() can be called indirectly from + # _process_write_backlog(), and _process_write_backlog() is not + # reentrant. + self._loop.call_soon(self._process_write_backlog) + + def _process_write_backlog(self): + # Try to make progress on the write backlog. + if self._transport is None or self._sslpipe is None: + return + + try: + for i in range(len(self._write_backlog)): + data, offset = self._write_backlog[0] + if data: + ssldata, offset = self._sslpipe.feed_appdata(data, offset) + elif offset: + ssldata = self._sslpipe.do_handshake( + self._on_handshake_complete) + offset = 1 + else: + ssldata = self._sslpipe.shutdown(self._finalize) + offset = 1 + + for chunk in ssldata: + self._transport.write(chunk) + + if offset < len(data): + self._write_backlog[0] = (data, offset) + # A short write means that a write is blocked on a read + # We need to enable reading if it is paused! + assert self._sslpipe.need_ssldata + if self._transport._paused: + self._transport.resume_reading() + break + + # An entire chunk from the backlog was processed. We can + # delete it and reduce the outstanding buffer size. + del self._write_backlog[0] + self._write_buffer_size -= len(data) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if self._in_handshake: + # Exceptions will be re-raised in _on_handshake_complete. + self._on_handshake_complete(exc) + else: + self._fatal_error(exc, 'Fatal error on SSL transport') + + def _fatal_error(self, exc, message='Fatal error on transport'): + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self._transport, + 'protocol': self, + }) + if self._transport: + self._transport._force_close(exc) + + def _finalize(self): + self._sslpipe = None + + if self._transport is not None: + self._transport.close() + + def _abort(self): + try: + if self._transport is not None: + self._transport.abort() + finally: + self._finalize() diff --git a/python310/asyncio/staggered.py b/python310/asyncio/staggered.py new file mode 100644 index 0000000000000000000000000000000000000000..451a53a16f3831611b6fc9b55f9eff5468924793 --- /dev/null +++ b/python310/asyncio/staggered.py @@ -0,0 +1,149 @@ +"""Support for running coroutines in parallel with staggered start times.""" + +__all__ = 'staggered_race', + +import contextlib +import typing + +from . import events +from . import exceptions as exceptions_mod +from . import locks +from . import tasks + + +async def staggered_race( + coro_fns: typing.Iterable[typing.Callable[[], typing.Awaitable]], + delay: typing.Optional[float], + *, + loop: events.AbstractEventLoop = None, +) -> typing.Tuple[ + typing.Any, + typing.Optional[int], + typing.List[typing.Optional[Exception]] +]: + """Run coroutines with staggered start times and take the first to finish. + + This method takes an iterable of coroutine functions. The first one is + started immediately. From then on, whenever the immediately preceding one + fails (raises an exception), or when *delay* seconds has passed, the next + coroutine is started. This continues until one of the coroutines complete + successfully, in which case all others are cancelled, or until all + coroutines fail. + + The coroutines provided should be well-behaved in the following way: + + * They should only ``return`` if completed successfully. + + * They should always raise an exception if they did not complete + successfully. In particular, if they handle cancellation, they should + probably reraise, like this:: + + try: + # do work + except asyncio.CancelledError: + # undo partially completed work + raise + + Args: + coro_fns: an iterable of coroutine functions, i.e. callables that + return a coroutine object when called. Use ``functools.partial`` or + lambdas to pass arguments. + + delay: amount of time, in seconds, between starting coroutines. If + ``None``, the coroutines will run sequentially. + + loop: the event loop to use. + + Returns: + tuple *(winner_result, winner_index, exceptions)* where + + - *winner_result*: the result of the winning coroutine, or ``None`` + if no coroutines won. + + - *winner_index*: the index of the winning coroutine in + ``coro_fns``, or ``None`` if no coroutines won. If the winning + coroutine may return None on success, *winner_index* can be used + to definitively determine whether any coroutine won. + + - *exceptions*: list of exceptions returned by the coroutines. + ``len(exceptions)`` is equal to the number of coroutines actually + started, and the order is the same as in ``coro_fns``. The winning + coroutine's entry is ``None``. + + """ + # TODO: when we have aiter() and anext(), allow async iterables in coro_fns. + loop = loop or events.get_running_loop() + enum_coro_fns = enumerate(coro_fns) + winner_result = None + winner_index = None + exceptions = [] + running_tasks = [] + + async def run_one_coro( + previous_failed: typing.Optional[locks.Event]) -> None: + # Wait for the previous task to finish, or for delay seconds + if previous_failed is not None: + with contextlib.suppress(exceptions_mod.TimeoutError): + # Use asyncio.wait_for() instead of asyncio.wait() here, so + # that if we get cancelled at this point, Event.wait() is also + # cancelled, otherwise there will be a "Task destroyed but it is + # pending" later. + await tasks.wait_for(previous_failed.wait(), delay) + # Get the next coroutine to run + try: + this_index, coro_fn = next(enum_coro_fns) + except StopIteration: + return + # Start task that will run the next coroutine + this_failed = locks.Event() + next_task = loop.create_task(run_one_coro(this_failed)) + running_tasks.append(next_task) + assert len(running_tasks) == this_index + 2 + # Prepare place to put this coroutine's exceptions if not won + exceptions.append(None) + assert len(exceptions) == this_index + 1 + + try: + result = await coro_fn() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as e: + exceptions[this_index] = e + this_failed.set() # Kickstart the next coroutine + else: + # Store winner's results + nonlocal winner_index, winner_result + assert winner_index is None + winner_index = this_index + winner_result = result + # Cancel all other tasks. We take care to not cancel the current + # task as well. If we do so, then since there is no `await` after + # here and CancelledError are usually thrown at one, we will + # encounter a curious corner case where the current task will end + # up as done() == True, cancelled() == False, exception() == + # asyncio.CancelledError. This behavior is specified in + # https://bugs.python.org/issue30048 + for i, t in enumerate(running_tasks): + if i != this_index: + t.cancel() + + first_task = loop.create_task(run_one_coro(None)) + running_tasks.append(first_task) + try: + # Wait for a growing list of tasks to all finish: poor man's version of + # curio's TaskGroup or trio's nursery + done_count = 0 + while done_count != len(running_tasks): + done, _ = await tasks.wait(running_tasks) + done_count = len(done) + # If run_one_coro raises an unhandled exception, it's probably a + # programming error, and I want to see it. + if __debug__: + for d in done: + if d.done() and not d.cancelled() and d.exception(): + raise d.exception() + return winner_result, winner_index, exceptions + finally: + # Make sure no tasks are left running if we leave this function + for t in running_tasks: + t.cancel() diff --git a/python310/asyncio/streams.py b/python310/asyncio/streams.py new file mode 100644 index 0000000000000000000000000000000000000000..37fa7dca7d8a3ca79efde98ed3354557f185d08d --- /dev/null +++ b/python310/asyncio/streams.py @@ -0,0 +1,726 @@ +__all__ = ( + 'StreamReader', 'StreamWriter', 'StreamReaderProtocol', + 'open_connection', 'start_server') + +import collections +import socket +import sys +import warnings +import weakref + +if hasattr(socket, 'AF_UNIX'): + __all__ += ('open_unix_connection', 'start_unix_server') + +from . import coroutines +from . import events +from . import exceptions +from . import format_helpers +from . import protocols +from .log import logger +from .tasks import sleep + + +_DEFAULT_LIMIT = 2 ** 16 # 64 KiB + + +async def open_connection(host=None, port=None, *, + limit=_DEFAULT_LIMIT, **kwds): + """A wrapper for create_connection() returning a (reader, writer) pair. + + The reader returned is a StreamReader instance; the writer is a + StreamWriter instance. + + The arguments are all the usual arguments to create_connection() + except protocol_factory; most common are positional host and port, + with various optional keyword arguments following. + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + (If you want to customize the StreamReader and/or + StreamReaderProtocol classes, just copy the code -- there's + really nothing special here except some convenience.) + """ + loop = events.get_running_loop() + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = await loop.create_connection( + lambda: protocol, host, port, **kwds) + writer = StreamWriter(transport, protocol, reader, loop) + return reader, writer + + +async def start_server(client_connected_cb, host=None, port=None, *, + limit=_DEFAULT_LIMIT, **kwds): + """Start a socket server, call back for each client connected. + + The first parameter, `client_connected_cb`, takes two parameters: + client_reader, client_writer. client_reader is a StreamReader + object, while client_writer is a StreamWriter object. This + parameter can either be a plain callback function or a coroutine; + if it is a coroutine, it will be automatically converted into a + Task. + + The rest of the arguments are all the usual arguments to + loop.create_server() except protocol_factory; most common are + positional host and port, with various optional keyword arguments + following. The return value is the same as loop.create_server(). + + Additional optional keyword arguments are loop (to set the event loop + instance to use) and limit (to set the buffer limit passed to the + StreamReader). + + The return value is the same as loop.create_server(), i.e. a + Server object which can be used to stop the service. + """ + loop = events.get_running_loop() + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + return await loop.create_server(factory, host, port, **kwds) + + +if hasattr(socket, 'AF_UNIX'): + # UNIX Domain Sockets are supported on this platform + + async def open_unix_connection(path=None, *, + limit=_DEFAULT_LIMIT, **kwds): + """Similar to `open_connection` but works with UNIX Domain Sockets.""" + loop = events.get_running_loop() + + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, loop=loop) + transport, _ = await loop.create_unix_connection( + lambda: protocol, path, **kwds) + writer = StreamWriter(transport, protocol, reader, loop) + return reader, writer + + async def start_unix_server(client_connected_cb, path=None, *, + limit=_DEFAULT_LIMIT, **kwds): + """Similar to `start_server` but works with UNIX Domain Sockets.""" + loop = events.get_running_loop() + + def factory(): + reader = StreamReader(limit=limit, loop=loop) + protocol = StreamReaderProtocol(reader, client_connected_cb, + loop=loop) + return protocol + + return await loop.create_unix_server(factory, path, **kwds) + + +class FlowControlMixin(protocols.Protocol): + """Reusable flow control logic for StreamWriter.drain(). + + This implements the protocol methods pause_writing(), + resume_writing() and connection_lost(). If the subclass overrides + these it must call the super methods. + + StreamWriter.drain() must wait for _drain_helper() coroutine. + """ + + def __init__(self, loop=None): + if loop is None: + self._loop = events._get_event_loop(stacklevel=4) + else: + self._loop = loop + self._paused = False + self._drain_waiters = collections.deque() + self._connection_lost = False + + def pause_writing(self): + assert not self._paused + self._paused = True + if self._loop.get_debug(): + logger.debug("%r pauses writing", self) + + def resume_writing(self): + assert self._paused + self._paused = False + if self._loop.get_debug(): + logger.debug("%r resumes writing", self) + + for waiter in self._drain_waiters: + if not waiter.done(): + waiter.set_result(None) + + def connection_lost(self, exc): + self._connection_lost = True + # Wake up the writer(s) if currently paused. + if not self._paused: + return + + for waiter in self._drain_waiters: + if not waiter.done(): + if exc is None: + waiter.set_result(None) + else: + waiter.set_exception(exc) + + async def _drain_helper(self): + if self._connection_lost: + raise ConnectionResetError('Connection lost') + if not self._paused: + return + waiter = self._loop.create_future() + self._drain_waiters.append(waiter) + try: + await waiter + finally: + self._drain_waiters.remove(waiter) + + def _get_close_waiter(self, stream): + raise NotImplementedError + + +class StreamReaderProtocol(FlowControlMixin, protocols.Protocol): + """Helper class to adapt between Protocol and StreamReader. + + (This is a helper class instead of making StreamReader itself a + Protocol subclass, because the StreamReader has other potential + uses, and to prevent the user of the StreamReader to accidentally + call inappropriate methods of the protocol.) + """ + + _source_traceback = None + + def __init__(self, stream_reader, client_connected_cb=None, loop=None): + super().__init__(loop=loop) + if stream_reader is not None: + self._stream_reader_wr = weakref.ref(stream_reader) + self._source_traceback = stream_reader._source_traceback + else: + self._stream_reader_wr = None + if client_connected_cb is not None: + # This is a stream created by the `create_server()` function. + # Keep a strong reference to the reader until a connection + # is established. + self._strong_reader = stream_reader + self._reject_connection = False + self._stream_writer = None + self._task = None + self._transport = None + self._client_connected_cb = client_connected_cb + self._over_ssl = False + self._closed = self._loop.create_future() + + @property + def _stream_reader(self): + if self._stream_reader_wr is None: + return None + return self._stream_reader_wr() + + def connection_made(self, transport): + if self._reject_connection: + context = { + 'message': ('An open stream was garbage collected prior to ' + 'establishing network connection; ' + 'call "stream.close()" explicitly.') + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + transport.abort() + return + self._transport = transport + reader = self._stream_reader + if reader is not None: + reader.set_transport(transport) + self._over_ssl = transport.get_extra_info('sslcontext') is not None + if self._client_connected_cb is not None: + self._stream_writer = StreamWriter(transport, self, + reader, + self._loop) + res = self._client_connected_cb(reader, + self._stream_writer) + if coroutines.iscoroutine(res): + self._task = self._loop.create_task(res) + self._strong_reader = None + + def connection_lost(self, exc): + reader = self._stream_reader + if reader is not None: + if exc is None: + reader.feed_eof() + else: + reader.set_exception(exc) + if not self._closed.done(): + if exc is None: + self._closed.set_result(None) + else: + self._closed.set_exception(exc) + super().connection_lost(exc) + self._stream_reader_wr = None + self._stream_writer = None + self._task = None + self._transport = None + + def data_received(self, data): + reader = self._stream_reader + if reader is not None: + reader.feed_data(data) + + def eof_received(self): + reader = self._stream_reader + if reader is not None: + reader.feed_eof() + if self._over_ssl: + # Prevent a warning in SSLProtocol.eof_received: + # "returning true from eof_received() + # has no effect when using ssl" + return False + return True + + def _get_close_waiter(self, stream): + return self._closed + + def __del__(self): + # Prevent reports about unhandled exceptions. + # Better than self._closed._log_traceback = False hack + try: + closed = self._closed + except AttributeError: + pass # failed constructor + else: + if closed.done() and not closed.cancelled(): + closed.exception() + + +class StreamWriter: + """Wraps a Transport. + + This exposes write(), writelines(), [can_]write_eof(), + get_extra_info() and close(). It adds drain() which returns an + optional Future on which you can wait for flow control. It also + adds a transport property which references the Transport + directly. + """ + + def __init__(self, transport, protocol, reader, loop): + self._transport = transport + self._protocol = protocol + # drain() expects that the reader has an exception() method + assert reader is None or isinstance(reader, StreamReader) + self._reader = reader + self._loop = loop + self._complete_fut = self._loop.create_future() + self._complete_fut.set_result(None) + + def __repr__(self): + info = [self.__class__.__name__, f'transport={self._transport!r}'] + if self._reader is not None: + info.append(f'reader={self._reader!r}') + return '<{}>'.format(' '.join(info)) + + @property + def transport(self): + return self._transport + + def write(self, data): + self._transport.write(data) + + def writelines(self, data): + self._transport.writelines(data) + + def write_eof(self): + return self._transport.write_eof() + + def can_write_eof(self): + return self._transport.can_write_eof() + + def close(self): + return self._transport.close() + + def is_closing(self): + return self._transport.is_closing() + + async def wait_closed(self): + await self._protocol._get_close_waiter(self) + + def get_extra_info(self, name, default=None): + return self._transport.get_extra_info(name, default) + + async def drain(self): + """Flush the write buffer. + + The intended use is to write + + w.write(data) + await w.drain() + """ + if self._reader is not None: + exc = self._reader.exception() + if exc is not None: + raise exc + if self._transport.is_closing(): + # Wait for protocol.connection_lost() call + # Raise connection closing error if any, + # ConnectionResetError otherwise + # Yield to the event loop so connection_lost() may be + # called. Without this, _drain_helper() would return + # immediately, and code that calls + # write(...); await drain() + # in a loop would never call connection_lost(), so it + # would not see an error when the socket is closed. + await sleep(0) + await self._protocol._drain_helper() + + +class StreamReader: + + _source_traceback = None + + def __init__(self, limit=_DEFAULT_LIMIT, loop=None): + # The line length limit is a security feature; + # it also doubles as half the buffer limit. + + if limit <= 0: + raise ValueError('Limit cannot be <= 0') + + self._limit = limit + if loop is None: + self._loop = events._get_event_loop() + else: + self._loop = loop + self._buffer = bytearray() + self._eof = False # Whether we're done. + self._waiter = None # A future used by _wait_for_data() + self._exception = None + self._transport = None + self._paused = False + if self._loop.get_debug(): + self._source_traceback = format_helpers.extract_stack( + sys._getframe(1)) + + def __repr__(self): + info = ['StreamReader'] + if self._buffer: + info.append(f'{len(self._buffer)} bytes') + if self._eof: + info.append('eof') + if self._limit != _DEFAULT_LIMIT: + info.append(f'limit={self._limit}') + if self._waiter: + info.append(f'waiter={self._waiter!r}') + if self._exception: + info.append(f'exception={self._exception!r}') + if self._transport: + info.append(f'transport={self._transport!r}') + if self._paused: + info.append('paused') + return '<{}>'.format(' '.join(info)) + + def exception(self): + return self._exception + + def set_exception(self, exc): + self._exception = exc + + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_exception(exc) + + def _wakeup_waiter(self): + """Wakeup read*() functions waiting for data or EOF.""" + waiter = self._waiter + if waiter is not None: + self._waiter = None + if not waiter.cancelled(): + waiter.set_result(None) + + def set_transport(self, transport): + assert self._transport is None, 'Transport already set' + self._transport = transport + + def _maybe_resume_transport(self): + if self._paused and len(self._buffer) <= self._limit: + self._paused = False + self._transport.resume_reading() + + def feed_eof(self): + self._eof = True + self._wakeup_waiter() + + def at_eof(self): + """Return True if the buffer is empty and 'feed_eof' was called.""" + return self._eof and not self._buffer + + def feed_data(self, data): + assert not self._eof, 'feed_data after feed_eof' + + if not data: + return + + self._buffer.extend(data) + self._wakeup_waiter() + + if (self._transport is not None and + not self._paused and + len(self._buffer) > 2 * self._limit): + try: + self._transport.pause_reading() + except NotImplementedError: + # The transport can't be paused. + # We'll just have to buffer all data. + # Forget the transport so we don't keep trying. + self._transport = None + else: + self._paused = True + + async def _wait_for_data(self, func_name): + """Wait until feed_data() or feed_eof() is called. + + If stream was paused, automatically resume it. + """ + # StreamReader uses a future to link the protocol feed_data() method + # to a read coroutine. Running two read coroutines at the same time + # would have an unexpected behaviour. It would not possible to know + # which coroutine would get the next data. + if self._waiter is not None: + raise RuntimeError( + f'{func_name}() called while another coroutine is ' + f'already waiting for incoming data') + + assert not self._eof, '_wait_for_data after EOF' + + # Waiting for data while paused will make deadlock, so prevent it. + # This is essential for readexactly(n) for case when n > self._limit. + if self._paused: + self._paused = False + self._transport.resume_reading() + + self._waiter = self._loop.create_future() + try: + await self._waiter + finally: + self._waiter = None + + async def readline(self): + """Read chunk of data from the stream until newline (b'\n') is found. + + On success, return chunk that ends with newline. If only partial + line can be read due to EOF, return incomplete line without + terminating newline. When EOF was reached while no bytes read, empty + bytes object is returned. + + If limit is reached, ValueError will be raised. In that case, if + newline was found, complete line including newline will be removed + from internal buffer. Else, internal buffer will be cleared. Limit is + compared against part of the line without newline. + + If stream was paused, this function will automatically resume it if + needed. + """ + sep = b'\n' + seplen = len(sep) + try: + line = await self.readuntil(sep) + except exceptions.IncompleteReadError as e: + return e.partial + except exceptions.LimitOverrunError as e: + if self._buffer.startswith(sep, e.consumed): + del self._buffer[:e.consumed + seplen] + else: + self._buffer.clear() + self._maybe_resume_transport() + raise ValueError(e.args[0]) + return line + + async def readuntil(self, separator=b'\n'): + """Read data from the stream until ``separator`` is found. + + On success, the data and separator will be removed from the + internal buffer (consumed). Returned data will include the + separator at the end. + + Configured stream limit is used to check result. Limit sets the + maximal length of data that can be returned, not counting the + separator. + + If an EOF occurs and the complete separator is still not found, + an IncompleteReadError exception will be raised, and the internal + buffer will be reset. The IncompleteReadError.partial attribute + may contain the separator partially. + + If the data cannot be read because of over limit, a + LimitOverrunError exception will be raised, and the data + will be left in the internal buffer, so it can be read again. + """ + seplen = len(separator) + if seplen == 0: + raise ValueError('Separator should be at least one-byte string') + + if self._exception is not None: + raise self._exception + + # Consume whole buffer except last bytes, which length is + # one less than seplen. Let's check corner cases with + # separator='SEPARATOR': + # * we have received almost complete separator (without last + # byte). i.e buffer='some textSEPARATO'. In this case we + # can safely consume len(separator) - 1 bytes. + # * last byte of buffer is first byte of separator, i.e. + # buffer='abcdefghijklmnopqrS'. We may safely consume + # everything except that last byte, but this require to + # analyze bytes of buffer that match partial separator. + # This is slow and/or require FSM. For this case our + # implementation is not optimal, since require rescanning + # of data that is known to not belong to separator. In + # real world, separator will not be so long to notice + # performance problems. Even when reading MIME-encoded + # messages :) + + # `offset` is the number of bytes from the beginning of the buffer + # where there is no occurrence of `separator`. + offset = 0 + + # Loop until we find `separator` in the buffer, exceed the buffer size, + # or an EOF has happened. + while True: + buflen = len(self._buffer) + + # Check if we now have enough data in the buffer for `separator` to + # fit. + if buflen - offset >= seplen: + isep = self._buffer.find(separator, offset) + + if isep != -1: + # `separator` is in the buffer. `isep` will be used later + # to retrieve the data. + break + + # see upper comment for explanation. + offset = buflen + 1 - seplen + if offset > self._limit: + raise exceptions.LimitOverrunError( + 'Separator is not found, and chunk exceed the limit', + offset) + + # Complete message (with full separator) may be present in buffer + # even when EOF flag is set. This may happen when the last chunk + # adds data which makes separator be found. That's why we check for + # EOF *ater* inspecting the buffer. + if self._eof: + chunk = bytes(self._buffer) + self._buffer.clear() + raise exceptions.IncompleteReadError(chunk, None) + + # _wait_for_data() will resume reading if stream was paused. + await self._wait_for_data('readuntil') + + if isep > self._limit: + raise exceptions.LimitOverrunError( + 'Separator is found, but chunk is longer than limit', isep) + + chunk = self._buffer[:isep + seplen] + del self._buffer[:isep + seplen] + self._maybe_resume_transport() + return bytes(chunk) + + async def read(self, n=-1): + """Read up to `n` bytes from the stream. + + If `n` is not provided or set to -1, + read until EOF, then return all read bytes. + If EOF was received and the internal buffer is empty, + return an empty bytes object. + + If `n` is 0, return an empty bytes object immediately. + + If `n` is positive, return at most `n` available bytes + as soon as at least 1 byte is available in the internal buffer. + If EOF is received before any byte is read, return an empty + bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + + if self._exception is not None: + raise self._exception + + if n == 0: + return b'' + + if n < 0: + # This used to just loop creating a new waiter hoping to + # collect everything in self._buffer, but that would + # deadlock if the subprocess sends more than self.limit + # bytes. So just call self.read(self._limit) until EOF. + blocks = [] + while True: + block = await self.read(self._limit) + if not block: + break + blocks.append(block) + return b''.join(blocks) + + if not self._buffer and not self._eof: + await self._wait_for_data('read') + + # This will work right even if buffer is less than n bytes + data = bytes(self._buffer[:n]) + del self._buffer[:n] + + self._maybe_resume_transport() + return data + + async def readexactly(self, n): + """Read exactly `n` bytes. + + Raise an IncompleteReadError if EOF is reached before `n` bytes can be + read. The IncompleteReadError.partial attribute of the exception will + contain the partial read bytes. + + if n is zero, return empty bytes object. + + Returned value is not limited with limit, configured at stream + creation. + + If stream was paused, this function will automatically resume it if + needed. + """ + if n < 0: + raise ValueError('readexactly size can not be less than zero') + + if self._exception is not None: + raise self._exception + + if n == 0: + return b'' + + while len(self._buffer) < n: + if self._eof: + incomplete = bytes(self._buffer) + self._buffer.clear() + raise exceptions.IncompleteReadError(incomplete, n) + + await self._wait_for_data('readexactly') + + if len(self._buffer) == n: + data = bytes(self._buffer) + self._buffer.clear() + else: + data = bytes(self._buffer[:n]) + del self._buffer[:n] + self._maybe_resume_transport() + return data + + def __aiter__(self): + return self + + async def __anext__(self): + val = await self.readline() + if val == b'': + raise StopAsyncIteration + return val diff --git a/python310/asyncio/subprocess.py b/python310/asyncio/subprocess.py new file mode 100644 index 0000000000000000000000000000000000000000..cd10231f710f1139590545b3b9e3bd88c12544f2 --- /dev/null +++ b/python310/asyncio/subprocess.py @@ -0,0 +1,223 @@ +__all__ = 'create_subprocess_exec', 'create_subprocess_shell' + +import subprocess + +from . import events +from . import protocols +from . import streams +from . import tasks +from .log import logger + + +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +DEVNULL = subprocess.DEVNULL + + +class SubprocessStreamProtocol(streams.FlowControlMixin, + protocols.SubprocessProtocol): + """Like StreamReaderProtocol, but for a subprocess.""" + + def __init__(self, limit, loop): + super().__init__(loop=loop) + self._limit = limit + self.stdin = self.stdout = self.stderr = None + self._transport = None + self._process_exited = False + self._pipe_fds = [] + self._stdin_closed = self._loop.create_future() + + def __repr__(self): + info = [self.__class__.__name__] + if self.stdin is not None: + info.append(f'stdin={self.stdin!r}') + if self.stdout is not None: + info.append(f'stdout={self.stdout!r}') + if self.stderr is not None: + info.append(f'stderr={self.stderr!r}') + return '<{}>'.format(' '.join(info)) + + def connection_made(self, transport): + self._transport = transport + + stdout_transport = transport.get_pipe_transport(1) + if stdout_transport is not None: + self.stdout = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stdout.set_transport(stdout_transport) + self._pipe_fds.append(1) + + stderr_transport = transport.get_pipe_transport(2) + if stderr_transport is not None: + self.stderr = streams.StreamReader(limit=self._limit, + loop=self._loop) + self.stderr.set_transport(stderr_transport) + self._pipe_fds.append(2) + + stdin_transport = transport.get_pipe_transport(0) + if stdin_transport is not None: + self.stdin = streams.StreamWriter(stdin_transport, + protocol=self, + reader=None, + loop=self._loop) + + def pipe_data_received(self, fd, data): + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + reader.feed_data(data) + + def pipe_connection_lost(self, fd, exc): + if fd == 0: + pipe = self.stdin + if pipe is not None: + pipe.close() + self.connection_lost(exc) + if exc is None: + self._stdin_closed.set_result(None) + else: + self._stdin_closed.set_exception(exc) + return + if fd == 1: + reader = self.stdout + elif fd == 2: + reader = self.stderr + else: + reader = None + if reader is not None: + if exc is None: + reader.feed_eof() + else: + reader.set_exception(exc) + + if fd in self._pipe_fds: + self._pipe_fds.remove(fd) + self._maybe_close_transport() + + def process_exited(self): + self._process_exited = True + self._maybe_close_transport() + + def _maybe_close_transport(self): + if len(self._pipe_fds) == 0 and self._process_exited: + self._transport.close() + self._transport = None + + def _get_close_waiter(self, stream): + if stream is self.stdin: + return self._stdin_closed + + +class Process: + def __init__(self, transport, protocol, loop): + self._transport = transport + self._protocol = protocol + self._loop = loop + self.stdin = protocol.stdin + self.stdout = protocol.stdout + self.stderr = protocol.stderr + self.pid = transport.get_pid() + + def __repr__(self): + return f'<{self.__class__.__name__} {self.pid}>' + + @property + def returncode(self): + return self._transport.get_returncode() + + async def wait(self): + """Wait until the process exit and return the process return code.""" + return await self._transport._wait() + + def send_signal(self, signal): + self._transport.send_signal(signal) + + def terminate(self): + self._transport.terminate() + + def kill(self): + self._transport.kill() + + async def _feed_stdin(self, input): + debug = self._loop.get_debug() + self.stdin.write(input) + if debug: + logger.debug( + '%r communicate: feed stdin (%s bytes)', self, len(input)) + try: + await self.stdin.drain() + except (BrokenPipeError, ConnectionResetError) as exc: + # communicate() ignores BrokenPipeError and ConnectionResetError + if debug: + logger.debug('%r communicate: stdin got %r', self, exc) + + if debug: + logger.debug('%r communicate: close stdin', self) + self.stdin.close() + + async def _noop(self): + return None + + async def _read_stream(self, fd): + transport = self._transport.get_pipe_transport(fd) + if fd == 2: + stream = self.stderr + else: + assert fd == 1 + stream = self.stdout + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: read %s', self, name) + output = await stream.read() + if self._loop.get_debug(): + name = 'stdout' if fd == 1 else 'stderr' + logger.debug('%r communicate: close %s', self, name) + transport.close() + return output + + async def communicate(self, input=None): + if input is not None: + stdin = self._feed_stdin(input) + else: + stdin = self._noop() + if self.stdout is not None: + stdout = self._read_stream(1) + else: + stdout = self._noop() + if self.stderr is not None: + stderr = self._read_stream(2) + else: + stderr = self._noop() + stdin, stdout, stderr = await tasks.gather(stdin, stdout, stderr) + await self.wait() + return (stdout, stderr) + + +async def create_subprocess_shell(cmd, stdin=None, stdout=None, stderr=None, + limit=streams._DEFAULT_LIMIT, **kwds): + loop = events.get_running_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = await loop.subprocess_shell( + protocol_factory, + cmd, stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) + + +async def create_subprocess_exec(program, *args, stdin=None, stdout=None, + stderr=None, limit=streams._DEFAULT_LIMIT, + **kwds): + loop = events.get_running_loop() + protocol_factory = lambda: SubprocessStreamProtocol(limit=limit, + loop=loop) + transport, protocol = await loop.subprocess_exec( + protocol_factory, + program, *args, + stdin=stdin, stdout=stdout, + stderr=stderr, **kwds) + return Process(transport, protocol, loop) diff --git a/python310/asyncio/tasks.py b/python310/asyncio/tasks.py new file mode 100644 index 0000000000000000000000000000000000000000..f391586d128730328c424541420db6a02fcf39c7 --- /dev/null +++ b/python310/asyncio/tasks.py @@ -0,0 +1,946 @@ +"""Support for tasks, coroutines and the scheduler.""" + +__all__ = ( + 'Task', 'create_task', + 'FIRST_COMPLETED', 'FIRST_EXCEPTION', 'ALL_COMPLETED', + 'wait', 'wait_for', 'as_completed', 'sleep', + 'gather', 'shield', 'ensure_future', 'run_coroutine_threadsafe', + 'current_task', 'all_tasks', + '_register_task', '_unregister_task', '_enter_task', '_leave_task', +) + +import concurrent.futures +import contextvars +import functools +import inspect +import itertools +import types +import warnings +import weakref +from types import GenericAlias + +from . import base_tasks +from . import coroutines +from . import events +from . import exceptions +from . import futures +from .coroutines import _is_coroutine + +# Helper to generate new task names +# This uses itertools.count() instead of a "+= 1" operation because the latter +# is not thread safe. See bpo-11866 for a longer explanation. +_task_name_counter = itertools.count(1).__next__ + + +def current_task(loop=None): + """Return a currently executed task.""" + if loop is None: + loop = events.get_running_loop() + return _current_tasks.get(loop) + + +def all_tasks(loop=None): + """Return a set of all tasks for the loop.""" + if loop is None: + loop = events.get_running_loop() + # Looping over a WeakSet (_all_tasks) isn't safe as it can be updated from another + # thread while we do so. Therefore we cast it to list prior to filtering. The list + # cast itself requires iteration, so we repeat it several times ignoring + # RuntimeErrors (which are not very likely to occur). See issues 34970 and 36607 for + # details. + i = 0 + while True: + try: + tasks = list(_all_tasks) + except RuntimeError: + i += 1 + if i >= 1000: + raise + else: + break + return {t for t in tasks + if futures._get_loop(t) is loop and not t.done()} + + +def _set_task_name(task, name): + if name is not None: + try: + set_name = task.set_name + except AttributeError: + pass + else: + set_name(name) + + +class Task(futures._PyFuture): # Inherit Python Task implementation + # from a Python Future implementation. + + """A coroutine wrapped in a Future.""" + + # An important invariant maintained while a Task not done: + # + # - Either _fut_waiter is None, and _step() is scheduled; + # - or _fut_waiter is some Future, and _step() is *not* scheduled. + # + # The only transition from the latter to the former is through + # _wakeup(). When _fut_waiter is not None, one of its callbacks + # must be _wakeup(). + + # If False, don't log a message if the task is destroyed whereas its + # status is still pending + _log_destroy_pending = True + + def __init__(self, coro, *, loop=None, name=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + if not coroutines.iscoroutine(coro): + # raise after Future.__init__(), attrs are required for __del__ + # prevent logging for pending task in __del__ + self._log_destroy_pending = False + raise TypeError(f"a coroutine was expected, got {coro!r}") + + if name is None: + self._name = f'Task-{_task_name_counter()}' + else: + self._name = str(name) + + self._must_cancel = False + self._fut_waiter = None + self._coro = coro + self._context = contextvars.copy_context() + + self._loop.call_soon(self.__step, context=self._context) + _register_task(self) + + def __del__(self): + if self._state == futures._PENDING and self._log_destroy_pending: + context = { + 'task': self, + 'message': 'Task was destroyed but it is pending!', + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + super().__del__() + + __class_getitem__ = classmethod(GenericAlias) + + def _repr_info(self): + return base_tasks._task_repr_info(self) + + def get_coro(self): + return self._coro + + def get_name(self): + return self._name + + def set_name(self, value): + self._name = str(value) + + def set_result(self, result): + raise RuntimeError('Task does not support set_result operation') + + def set_exception(self, exception): + raise RuntimeError('Task does not support set_exception operation') + + def get_stack(self, *, limit=None): + """Return the list of stack frames for this task's coroutine. + + If the coroutine is not done, this returns the stack where it is + suspended. If the coroutine has completed successfully or was + cancelled, this returns an empty list. If the coroutine was + terminated by an exception, this returns the list of traceback + frames. + + The frames are always ordered from oldest to newest. + + The optional limit gives the maximum number of frames to + return; by default all available frames are returned. Its + meaning differs depending on whether a stack or a traceback is + returned: the newest frames of a stack are returned, but the + oldest frames of a traceback are returned. (This matches the + behavior of the traceback module.) + + For reasons beyond our control, only one stack frame is + returned for a suspended coroutine. + """ + return base_tasks._task_get_stack(self, limit) + + def print_stack(self, *, limit=None, file=None): + """Print the stack or traceback for this task's coroutine. + + This produces output similar to that of the traceback module, + for the frames retrieved by get_stack(). The limit argument + is passed to get_stack(). The file argument is an I/O stream + to which the output is written; by default output is written + to sys.stderr. + """ + return base_tasks._task_print_stack(self, limit, file) + + def cancel(self, msg=None): + """Request that this task cancel itself. + + This arranges for a CancelledError to be thrown into the + wrapped coroutine on the next cycle through the event loop. + The coroutine then has a chance to clean up or even deny + the request using try/except/finally. + + Unlike Future.cancel, this does not guarantee that the + task will be cancelled: the exception might be caught and + acted upon, delaying cancellation of the task or preventing + cancellation completely. The task may also return a value or + raise a different exception. + + Immediately after this method is called, Task.cancelled() will + not return True (unless the task was already cancelled). A + task will be marked as cancelled when the wrapped coroutine + terminates with a CancelledError exception (even if cancel() + was not called). + """ + self._log_traceback = False + if self.done(): + return False + if self._fut_waiter is not None: + if self._fut_waiter.cancel(msg=msg): + # Leave self._fut_waiter; it may be a Task that + # catches and ignores the cancellation so we may have + # to cancel it again later. + return True + # It must be the case that self.__step is already scheduled. + self._must_cancel = True + self._cancel_message = msg + return True + + def __step(self, exc=None): + if self.done(): + raise exceptions.InvalidStateError( + f'_step(): already done: {self!r}, {exc!r}') + if self._must_cancel: + if not isinstance(exc, exceptions.CancelledError): + exc = self._make_cancelled_error() + self._must_cancel = False + coro = self._coro + self._fut_waiter = None + + _enter_task(self._loop, self) + # Call either coro.throw(exc) or coro.send(None). + try: + if exc is None: + # We use the `send` method directly, because coroutines + # don't have `__iter__` and `__next__` methods. + result = coro.send(None) + else: + result = coro.throw(exc) + except StopIteration as exc: + if self._must_cancel: + # Task is cancelled right before coro stops. + self._must_cancel = False + super().cancel(msg=self._cancel_message) + else: + super().set_result(exc.value) + except exceptions.CancelledError as exc: + # Save the original exception so we can chain it later. + self._cancelled_exc = exc + super().cancel() # I.e., Future.cancel(self). + except (KeyboardInterrupt, SystemExit) as exc: + super().set_exception(exc) + raise + except BaseException as exc: + super().set_exception(exc) + else: + blocking = getattr(result, '_asyncio_future_blocking', None) + if blocking is not None: + # Yielded Future must come from Future.__iter__(). + if futures._get_loop(result) is not self._loop: + new_exc = RuntimeError( + f'Task {self!r} got Future ' + f'{result!r} attached to a different loop') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + elif blocking: + if result is self: + new_exc = RuntimeError( + f'Task cannot await on itself: {self!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + else: + result._asyncio_future_blocking = False + result.add_done_callback( + self.__wakeup, context=self._context) + self._fut_waiter = result + if self._must_cancel: + if self._fut_waiter.cancel( + msg=self._cancel_message): + self._must_cancel = False + else: + new_exc = RuntimeError( + f'yield was used instead of yield from ' + f'in task {self!r} with {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + + elif result is None: + # Bare yield relinquishes control for one event loop iteration. + self._loop.call_soon(self.__step, context=self._context) + elif inspect.isgenerator(result): + # Yielding a generator is just wrong. + new_exc = RuntimeError( + f'yield was used instead of yield from for ' + f'generator in task {self!r} with {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + else: + # Yielding something else is an error. + new_exc = RuntimeError(f'Task got bad yield: {result!r}') + self._loop.call_soon( + self.__step, new_exc, context=self._context) + finally: + _leave_task(self._loop, self) + self = None # Needed to break cycles when an exception occurs. + + def __wakeup(self, future): + try: + future.result() + except BaseException as exc: + # This may also be a cancellation. + self.__step(exc) + else: + # Don't pass the value of `future.result()` explicitly, + # as `Future.__iter__` and `Future.__await__` don't need it. + # If we call `_step(value, None)` instead of `_step()`, + # Python eval loop would use `.send(value)` method call, + # instead of `__next__()`, which is slower for futures + # that return non-generator iterators from their `__iter__`. + self.__step() + self = None # Needed to break cycles when an exception occurs. + + +_PyTask = Task + + +try: + import _asyncio +except ImportError: + pass +else: + # _CTask is needed for tests. + Task = _CTask = _asyncio.Task + + +def create_task(coro, *, name=None): + """Schedule the execution of a coroutine object in a spawn task. + + Return a Task object. + """ + loop = events.get_running_loop() + task = loop.create_task(coro) + _set_task_name(task, name) + return task + + +# wait() and as_completed() similar to those in PEP 3148. + +FIRST_COMPLETED = concurrent.futures.FIRST_COMPLETED +FIRST_EXCEPTION = concurrent.futures.FIRST_EXCEPTION +ALL_COMPLETED = concurrent.futures.ALL_COMPLETED + + +async def wait(fs, *, timeout=None, return_when=ALL_COMPLETED): + """Wait for the Futures and coroutines given by fs to complete. + + The fs iterable must not be empty. + + Coroutines will be wrapped in Tasks. + + Returns two sets of Future: (done, pending). + + Usage: + + done, pending = await asyncio.wait(fs) + + Note: This does not raise TimeoutError! Futures that aren't done + when the timeout occurs are returned in the second set. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError(f"expect a list of futures, not {type(fs).__name__}") + if not fs: + raise ValueError('Set of coroutines/Futures is empty.') + if return_when not in (FIRST_COMPLETED, FIRST_EXCEPTION, ALL_COMPLETED): + raise ValueError(f'Invalid return_when value: {return_when}') + + loop = events.get_running_loop() + + fs = set(fs) + + if any(coroutines.iscoroutine(f) for f in fs): + warnings.warn("The explicit passing of coroutine objects to " + "asyncio.wait() is deprecated since Python 3.8, and " + "scheduled for removal in Python 3.11.", + DeprecationWarning, stacklevel=2) + + fs = {ensure_future(f, loop=loop) for f in fs} + + return await _wait(fs, timeout, return_when, loop) + + +def _release_waiter(waiter, *args): + if not waiter.done(): + waiter.set_result(None) + + +async def wait_for(fut, timeout): + """Wait for the single Future or coroutine to complete, with timeout. + + Coroutine will be wrapped in Task. + + Returns result of the Future or coroutine. When a timeout occurs, + it cancels the task and raises TimeoutError. To avoid the task + cancellation, wrap it in shield(). + + If the wait is cancelled, the task is also cancelled. + + This function is a coroutine. + """ + loop = events.get_running_loop() + + if timeout is None: + return await fut + + if timeout <= 0: + fut = ensure_future(fut, loop=loop) + + if fut.done(): + return fut.result() + + await _cancel_and_wait(fut, loop=loop) + try: + return fut.result() + except exceptions.CancelledError as exc: + raise exceptions.TimeoutError() from exc + + waiter = loop.create_future() + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + cb = functools.partial(_release_waiter, waiter) + + fut = ensure_future(fut, loop=loop) + fut.add_done_callback(cb) + + try: + # wait until the future completes or the timeout + try: + await waiter + except exceptions.CancelledError: + if fut.done(): + return fut.result() + else: + fut.remove_done_callback(cb) + # We must ensure that the task is not running + # after wait_for() returns. + # See https://bugs.python.org/issue32751 + await _cancel_and_wait(fut, loop=loop) + raise + + if fut.done(): + return fut.result() + else: + fut.remove_done_callback(cb) + # We must ensure that the task is not running + # after wait_for() returns. + # See https://bugs.python.org/issue32751 + await _cancel_and_wait(fut, loop=loop) + # In case task cancellation failed with some + # exception, we should re-raise it + # See https://bugs.python.org/issue40607 + try: + return fut.result() + except exceptions.CancelledError as exc: + raise exceptions.TimeoutError() from exc + finally: + timeout_handle.cancel() + + +async def _wait(fs, timeout, return_when, loop): + """Internal helper for wait(). + + The fs argument must be a collection of Futures. + """ + assert fs, 'Set of Futures is empty.' + waiter = loop.create_future() + timeout_handle = None + if timeout is not None: + timeout_handle = loop.call_later(timeout, _release_waiter, waiter) + counter = len(fs) + + def _on_completion(f): + nonlocal counter + counter -= 1 + if (counter <= 0 or + return_when == FIRST_COMPLETED or + return_when == FIRST_EXCEPTION and (not f.cancelled() and + f.exception() is not None)): + if timeout_handle is not None: + timeout_handle.cancel() + if not waiter.done(): + waiter.set_result(None) + + for f in fs: + f.add_done_callback(_on_completion) + + try: + await waiter + finally: + if timeout_handle is not None: + timeout_handle.cancel() + for f in fs: + f.remove_done_callback(_on_completion) + + done, pending = set(), set() + for f in fs: + if f.done(): + done.add(f) + else: + pending.add(f) + return done, pending + + +async def _cancel_and_wait(fut, loop): + """Cancel the *fut* future or task and wait until it completes.""" + + waiter = loop.create_future() + cb = functools.partial(_release_waiter, waiter) + fut.add_done_callback(cb) + + try: + fut.cancel() + # We cannot wait on *fut* directly to make + # sure _cancel_and_wait itself is reliably cancellable. + await waiter + finally: + fut.remove_done_callback(cb) + + +# This is *not* a @coroutine! It is just an iterator (yielding Futures). +def as_completed(fs, *, timeout=None): + """Return an iterator whose values are coroutines. + + When waiting for the yielded coroutines you'll get the results (or + exceptions!) of the original Futures (or coroutines), in the order + in which and as soon as they complete. + + This differs from PEP 3148; the proper way to use this is: + + for f in as_completed(fs): + result = await f # The 'await' may raise. + # Use result. + + If a timeout is specified, the 'await' will raise + TimeoutError when the timeout occurs before all Futures are done. + + Note: The futures 'f' are not necessarily members of fs. + """ + if futures.isfuture(fs) or coroutines.iscoroutine(fs): + raise TypeError(f"expect an iterable of futures, not {type(fs).__name__}") + + from .queues import Queue # Import here to avoid circular import problem. + done = Queue() + + loop = events._get_event_loop() + todo = {ensure_future(f, loop=loop) for f in set(fs)} + timeout_handle = None + + def _on_timeout(): + for f in todo: + f.remove_done_callback(_on_completion) + done.put_nowait(None) # Queue a dummy value for _wait_for_one(). + todo.clear() # Can't do todo.remove(f) in the loop. + + def _on_completion(f): + if not todo: + return # _on_timeout() was here first. + todo.remove(f) + done.put_nowait(f) + if not todo and timeout_handle is not None: + timeout_handle.cancel() + + async def _wait_for_one(): + f = await done.get() + if f is None: + # Dummy value from _on_timeout(). + raise exceptions.TimeoutError + return f.result() # May raise f.exception(). + + for f in todo: + f.add_done_callback(_on_completion) + if todo and timeout is not None: + timeout_handle = loop.call_later(timeout, _on_timeout) + for _ in range(len(todo)): + yield _wait_for_one() + + +@types.coroutine +def __sleep0(): + """Skip one event loop run cycle. + + This is a private helper for 'asyncio.sleep()', used + when the 'delay' is set to 0. It uses a bare 'yield' + expression (which Task.__step knows how to handle) + instead of creating a Future object. + """ + yield + + +async def sleep(delay, result=None): + """Coroutine that completes after a given time (in seconds).""" + if delay <= 0: + await __sleep0() + return result + + loop = events.get_running_loop() + future = loop.create_future() + h = loop.call_later(delay, + futures._set_result_unless_cancelled, + future, result) + try: + return await future + finally: + h.cancel() + + +def ensure_future(coro_or_future, *, loop=None): + """Wrap a coroutine or an awaitable in a future. + + If the argument is a Future, it is returned directly. + """ + return _ensure_future(coro_or_future, loop=loop) + + +def _ensure_future(coro_or_future, *, loop=None): + if futures.isfuture(coro_or_future): + if loop is not None and loop is not futures._get_loop(coro_or_future): + raise ValueError('The future belongs to a different loop than ' + 'the one specified as the loop argument') + return coro_or_future + called_wrap_awaitable = False + if not coroutines.iscoroutine(coro_or_future): + if inspect.isawaitable(coro_or_future): + coro_or_future = _wrap_awaitable(coro_or_future) + called_wrap_awaitable = True + else: + raise TypeError('An asyncio.Future, a coroutine or an awaitable ' + 'is required') + + if loop is None: + loop = events._get_event_loop(stacklevel=4) + try: + return loop.create_task(coro_or_future) + except RuntimeError: + if not called_wrap_awaitable: + coro_or_future.close() + raise + + +@types.coroutine +def _wrap_awaitable(awaitable): + """Helper for asyncio.ensure_future(). + + Wraps awaitable (an object with __await__) into a coroutine + that will later be wrapped in a Task by ensure_future(). + """ + return (yield from awaitable.__await__()) + +_wrap_awaitable._is_coroutine = _is_coroutine + + +class _GatheringFuture(futures.Future): + """Helper for gather(). + + This overrides cancel() to cancel all the children and act more + like Task.cancel(), which doesn't immediately mark itself as + cancelled. + """ + + def __init__(self, children, *, loop): + assert loop is not None + super().__init__(loop=loop) + self._children = children + self._cancel_requested = False + + def cancel(self, msg=None): + if self.done(): + return False + ret = False + for child in self._children: + if child.cancel(msg=msg): + ret = True + if ret: + # If any child tasks were actually cancelled, we should + # propagate the cancellation request regardless of + # *return_exceptions* argument. See issue 32684. + self._cancel_requested = True + return ret + + +def gather(*coros_or_futures, return_exceptions=False): + """Return a future aggregating results from the given coroutines/futures. + + Coroutines will be wrapped in a future and scheduled in the event + loop. They will not necessarily be scheduled in the same order as + passed in. + + All futures must share the same event loop. If all the tasks are + done successfully, the returned future's result is the list of + results (in the order of the original sequence, not necessarily + the order of results arrival). If *return_exceptions* is True, + exceptions in the tasks are treated the same as successful + results, and gathered in the result list; otherwise, the first + raised exception will be immediately propagated to the returned + future. + + Cancellation: if the outer Future is cancelled, all children (that + have not completed yet) are also cancelled. If any child is + cancelled, this is treated as if it raised CancelledError -- + the outer Future is *not* cancelled in this case. (This is to + prevent the cancellation of one child to cause other children to + be cancelled.) + + If *return_exceptions* is False, cancelling gather() after it + has been marked done won't cancel any submitted awaitables. + For instance, gather can be marked done after propagating an + exception to the caller, therefore, calling ``gather.cancel()`` + after catching an exception (raised by one of the awaitables) from + gather won't cancel any other awaitables. + """ + if not coros_or_futures: + loop = events._get_event_loop() + outer = loop.create_future() + outer.set_result([]) + return outer + + def _done_callback(fut): + nonlocal nfinished + nfinished += 1 + + if outer is None or outer.done(): + if not fut.cancelled(): + # Mark exception retrieved. + fut.exception() + return + + if not return_exceptions: + if fut.cancelled(): + # Check if 'fut' is cancelled first, as + # 'fut.exception()' will *raise* a CancelledError + # instead of returning it. + exc = fut._make_cancelled_error() + outer.set_exception(exc) + return + else: + exc = fut.exception() + if exc is not None: + outer.set_exception(exc) + return + + if nfinished == nfuts: + # All futures are done; create a list of results + # and set it to the 'outer' future. + results = [] + + for fut in children: + if fut.cancelled(): + # Check if 'fut' is cancelled first, as 'fut.exception()' + # will *raise* a CancelledError instead of returning it. + # Also, since we're adding the exception return value + # to 'results' instead of raising it, don't bother + # setting __context__. This also lets us preserve + # calling '_make_cancelled_error()' at most once. + res = exceptions.CancelledError( + '' if fut._cancel_message is None else + fut._cancel_message) + else: + res = fut.exception() + if res is None: + res = fut.result() + results.append(res) + + if outer._cancel_requested: + # If gather is being cancelled we must propagate the + # cancellation regardless of *return_exceptions* argument. + # See issue 32684. + exc = fut._make_cancelled_error() + outer.set_exception(exc) + else: + outer.set_result(results) + + arg_to_fut = {} + children = [] + nfuts = 0 + nfinished = 0 + loop = None + outer = None # bpo-46672 + for arg in coros_or_futures: + if arg not in arg_to_fut: + fut = _ensure_future(arg, loop=loop) + if loop is None: + loop = futures._get_loop(fut) + if fut is not arg: + # 'arg' was not a Future, therefore, 'fut' is a new + # Future created specifically for 'arg'. Since the caller + # can't control it, disable the "destroy pending task" + # warning. + fut._log_destroy_pending = False + + nfuts += 1 + arg_to_fut[arg] = fut + fut.add_done_callback(_done_callback) + + else: + # There's a duplicate Future object in coros_or_futures. + fut = arg_to_fut[arg] + + children.append(fut) + + outer = _GatheringFuture(children, loop=loop) + return outer + + +def shield(arg): + """Wait for a future, shielding it from cancellation. + + The statement + + task = asyncio.create_task(something()) + res = await shield(task) + + is exactly equivalent to the statement + + res = await something() + + *except* that if the coroutine containing it is cancelled, the + task running in something() is not cancelled. From the POV of + something(), the cancellation did not happen. But its caller is + still cancelled, so the yield-from expression still raises + CancelledError. Note: If something() is cancelled by other means + this will still cancel shield(). + + If you want to completely ignore cancellation (not recommended) + you can combine shield() with a try/except clause, as follows: + + task = asyncio.create_task(something()) + try: + res = await shield(task) + except CancelledError: + res = None + + Save a reference to tasks passed to this function, to avoid + a task disappearing mid-execution. The event loop only keeps + weak references to tasks. A task that isn't referenced elsewhere + may get garbage collected at any time, even before it's done. + """ + inner = _ensure_future(arg) + if inner.done(): + # Shortcut. + return inner + loop = futures._get_loop(inner) + outer = loop.create_future() + + def _inner_done_callback(inner): + if outer.cancelled(): + if not inner.cancelled(): + # Mark inner's result as retrieved. + inner.exception() + return + + if inner.cancelled(): + outer.cancel() + else: + exc = inner.exception() + if exc is not None: + outer.set_exception(exc) + else: + outer.set_result(inner.result()) + + + def _outer_done_callback(outer): + if not inner.done(): + inner.remove_done_callback(_inner_done_callback) + + inner.add_done_callback(_inner_done_callback) + outer.add_done_callback(_outer_done_callback) + return outer + + +def run_coroutine_threadsafe(coro, loop): + """Submit a coroutine object to a given event loop. + + Return a concurrent.futures.Future to access the result. + """ + if not coroutines.iscoroutine(coro): + raise TypeError('A coroutine object is required') + future = concurrent.futures.Future() + + def callback(): + try: + futures._chain_future(ensure_future(coro, loop=loop), future) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + if future.set_running_or_notify_cancel(): + future.set_exception(exc) + raise + + loop.call_soon_threadsafe(callback) + return future + + +# WeakSet containing all alive tasks. +_all_tasks = weakref.WeakSet() + +# Dictionary containing tasks that are currently active in +# all running event loops. {EventLoop: Task} +_current_tasks = {} + + +def _register_task(task): + """Register a new task in asyncio as executed by loop.""" + _all_tasks.add(task) + + +def _enter_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not None: + raise RuntimeError(f"Cannot enter into task {task!r} while another " + f"task {current_task!r} is being executed.") + _current_tasks[loop] = task + + +def _leave_task(loop, task): + current_task = _current_tasks.get(loop) + if current_task is not task: + raise RuntimeError(f"Leaving task {task!r} does not match " + f"the current task {current_task!r}.") + del _current_tasks[loop] + + +def _unregister_task(task): + """Unregister a task.""" + _all_tasks.discard(task) + + +_py_register_task = _register_task +_py_unregister_task = _unregister_task +_py_enter_task = _enter_task +_py_leave_task = _leave_task + + +try: + from _asyncio import (_register_task, _unregister_task, + _enter_task, _leave_task, + _all_tasks, _current_tasks) +except ImportError: + pass +else: + _c_register_task = _register_task + _c_unregister_task = _unregister_task + _c_enter_task = _enter_task + _c_leave_task = _leave_task diff --git a/python310/asyncio/threads.py b/python310/asyncio/threads.py new file mode 100644 index 0000000000000000000000000000000000000000..db048a8231de166e2af4f2e06092d98def9b1703 --- /dev/null +++ b/python310/asyncio/threads.py @@ -0,0 +1,25 @@ +"""High-level support for working with threads in asyncio""" + +import functools +import contextvars + +from . import events + + +__all__ = "to_thread", + + +async def to_thread(func, /, *args, **kwargs): + """Asynchronously run function *func* in a separate thread. + + Any *args and **kwargs supplied for this function are directly passed + to *func*. Also, the current :class:`contextvars.Context` is propagated, + allowing context variables from the main thread to be accessed in the + separate thread. + + Return a coroutine that can be awaited to get the eventual result of *func*. + """ + loop = events.get_running_loop() + ctx = contextvars.copy_context() + func_call = functools.partial(ctx.run, func, *args, **kwargs) + return await loop.run_in_executor(None, func_call) diff --git a/python310/asyncio/transports.py b/python310/asyncio/transports.py new file mode 100644 index 0000000000000000000000000000000000000000..73b1fa2de416c7012518df6306ba0845e49d3eaf --- /dev/null +++ b/python310/asyncio/transports.py @@ -0,0 +1,335 @@ +"""Abstract Transport class.""" + +__all__ = ( + 'BaseTransport', 'ReadTransport', 'WriteTransport', + 'Transport', 'DatagramTransport', 'SubprocessTransport', +) + + +class BaseTransport: + """Base class for transports.""" + + __slots__ = ('_extra',) + + def __init__(self, extra=None): + if extra is None: + extra = {} + self._extra = extra + + def get_extra_info(self, name, default=None): + """Get optional transport information.""" + return self._extra.get(name, default) + + def is_closing(self): + """Return True if the transport is closing or closed.""" + raise NotImplementedError + + def close(self): + """Close the transport. + + Buffered data will be flushed asynchronously. No more data + will be received. After all buffered data is flushed, the + protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + def set_protocol(self, protocol): + """Set a new protocol.""" + raise NotImplementedError + + def get_protocol(self): + """Return the current protocol.""" + raise NotImplementedError + + +class ReadTransport(BaseTransport): + """Interface for read-only transports.""" + + __slots__ = () + + def is_reading(self): + """Return True if the transport is receiving.""" + raise NotImplementedError + + def pause_reading(self): + """Pause the receiving end. + + No data will be passed to the protocol's data_received() + method until resume_reading() is called. + """ + raise NotImplementedError + + def resume_reading(self): + """Resume the receiving end. + + Data received will once again be passed to the protocol's + data_received() method. + """ + raise NotImplementedError + + +class WriteTransport(BaseTransport): + """Interface for write-only transports.""" + + __slots__ = () + + def set_write_buffer_limits(self, high=None, low=None): + """Set the high- and low-water limits for write flow control. + + These two values control when to call the protocol's + pause_writing() and resume_writing() methods. If specified, + the low-water limit must be less than or equal to the + high-water limit. Neither value can be negative. + + The defaults are implementation-specific. If only the + high-water limit is given, the low-water limit defaults to an + implementation-specific value less than or equal to the + high-water limit. Setting high to zero forces low to zero as + well, and causes pause_writing() to be called whenever the + buffer becomes non-empty. Setting low to zero causes + resume_writing() to be called only once the buffer is empty. + Use of zero for either limit is generally sub-optimal as it + reduces opportunities for doing I/O and computation + concurrently. + """ + raise NotImplementedError + + def get_write_buffer_size(self): + """Return the current size of the write buffer.""" + raise NotImplementedError + + def get_write_buffer_limits(self): + """Get the high and low watermarks for write flow control. + Return a tuple (low, high) where low and high are + positive number of bytes.""" + raise NotImplementedError + + def write(self, data): + """Write some data bytes to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + """ + raise NotImplementedError + + def writelines(self, list_of_data): + """Write a list (or any iterable) of data bytes to the transport. + + The default implementation concatenates the arguments and + calls write() on the result. + """ + data = b''.join(list_of_data) + self.write(data) + + def write_eof(self): + """Close the write end after flushing buffered data. + + (This is like typing ^D into a UNIX program reading from stdin.) + + Data may still be received. + """ + raise NotImplementedError + + def can_write_eof(self): + """Return True if this transport supports write_eof(), False if not.""" + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class Transport(ReadTransport, WriteTransport): + """Interface representing a bidirectional transport. + + There may be several implementations, but typically, the user does + not implement new transports; rather, the platform provides some + useful transports that are implemented using the platform's best + practices. + + The user never instantiates a transport directly; they call a + utility function, passing it a protocol factory and other + information necessary to create the transport and protocol. (E.g. + EventLoop.create_connection() or EventLoop.create_server().) + + The utility function will asynchronously create a transport and a + protocol and hook them up by calling the protocol's + connection_made() method, passing it the transport. + + The implementation here raises NotImplemented for every method + except writelines(), which calls write() in a loop. + """ + + __slots__ = () + + +class DatagramTransport(BaseTransport): + """Interface for datagram (UDP) transports.""" + + __slots__ = () + + def sendto(self, data, addr=None): + """Send data to the transport. + + This does not block; it buffers the data and arranges for it + to be sent out asynchronously. + addr is target socket address. + If addr is None use target address pointed on transport creation. + """ + raise NotImplementedError + + def abort(self): + """Close the transport immediately. + + Buffered data will be lost. No more data will be received. + The protocol's connection_lost() method will (eventually) be + called with None as its argument. + """ + raise NotImplementedError + + +class SubprocessTransport(BaseTransport): + + __slots__ = () + + def get_pid(self): + """Get subprocess id.""" + raise NotImplementedError + + def get_returncode(self): + """Get subprocess returncode. + + See also + http://docs.python.org/3/library/subprocess#subprocess.Popen.returncode + """ + raise NotImplementedError + + def get_pipe_transport(self, fd): + """Get transport for pipe with number fd.""" + raise NotImplementedError + + def send_signal(self, signal): + """Send signal to subprocess. + + See also: + docs.python.org/3/library/subprocess#subprocess.Popen.send_signal + """ + raise NotImplementedError + + def terminate(self): + """Stop the subprocess. + + Alias for close() method. + + On Posix OSs the method sends SIGTERM to the subprocess. + On Windows the Win32 API function TerminateProcess() + is called to stop the subprocess. + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.terminate + """ + raise NotImplementedError + + def kill(self): + """Kill the subprocess. + + On Posix OSs the function sends SIGKILL to the subprocess. + On Windows kill() is an alias for terminate(). + + See also: + http://docs.python.org/3/library/subprocess#subprocess.Popen.kill + """ + raise NotImplementedError + + +class _FlowControlMixin(Transport): + """All the logic for (write) flow control in a mix-in base class. + + The subclass must implement get_write_buffer_size(). It must call + _maybe_pause_protocol() whenever the write buffer size increases, + and _maybe_resume_protocol() whenever it decreases. It may also + override set_write_buffer_limits() (e.g. to specify different + defaults). + + The subclass constructor must call super().__init__(extra). This + will call set_write_buffer_limits(). + + The user may call set_write_buffer_limits() and + get_write_buffer_size(), and their protocol's pause_writing() and + resume_writing() may be called. + """ + + __slots__ = ('_loop', '_protocol_paused', '_high_water', '_low_water') + + def __init__(self, extra=None, loop=None): + super().__init__(extra) + assert loop is not None + self._loop = loop + self._protocol_paused = False + self._set_write_buffer_limits() + + def _maybe_pause_protocol(self): + size = self.get_write_buffer_size() + if size <= self._high_water: + return + if not self._protocol_paused: + self._protocol_paused = True + try: + self._protocol.pause_writing() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.pause_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def _maybe_resume_protocol(self): + if (self._protocol_paused and + self.get_write_buffer_size() <= self._low_water): + self._protocol_paused = False + try: + self._protocol.resume_writing() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._loop.call_exception_handler({ + 'message': 'protocol.resume_writing() failed', + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + + def get_write_buffer_limits(self): + return (self._low_water, self._high_water) + + def _set_write_buffer_limits(self, high=None, low=None): + if high is None: + if low is None: + high = 64 * 1024 + else: + high = 4 * low + if low is None: + low = high // 4 + + if not high >= low >= 0: + raise ValueError( + f'high ({high!r}) must be >= low ({low!r}) must be >= 0') + + self._high_water = high + self._low_water = low + + def set_write_buffer_limits(self, high=None, low=None): + self._set_write_buffer_limits(high=high, low=low) + self._maybe_pause_protocol() + + def get_write_buffer_size(self): + raise NotImplementedError diff --git a/python310/asyncio/trsock.py b/python310/asyncio/trsock.py new file mode 100644 index 0000000000000000000000000000000000000000..e9ebcc326142596b289ef74ee3c35460f598b138 --- /dev/null +++ b/python310/asyncio/trsock.py @@ -0,0 +1,206 @@ +import socket +import warnings + + +class TransportSocket: + + """A socket-like wrapper for exposing real transport sockets. + + These objects can be safely returned by APIs like + `transport.get_extra_info('socket')`. All potentially disruptive + operations (like "socket.close()") are banned. + """ + + __slots__ = ('_sock',) + + def __init__(self, sock: socket.socket): + self._sock = sock + + def _na(self, what): + warnings.warn( + f"Using {what} on sockets returned from get_extra_info('socket') " + f"will be prohibited in asyncio 3.9. Please report your use case " + f"to bugs.python.org.", + DeprecationWarning, source=self) + + @property + def family(self): + return self._sock.family + + @property + def type(self): + return self._sock.type + + @property + def proto(self): + return self._sock.proto + + def __repr__(self): + s = ( + f"" + + def __getstate__(self): + raise TypeError("Cannot serialize asyncio.TransportSocket object") + + def fileno(self): + return self._sock.fileno() + + def dup(self): + return self._sock.dup() + + def get_inheritable(self): + return self._sock.get_inheritable() + + def shutdown(self, how): + # asyncio doesn't currently provide a high-level transport API + # to shutdown the connection. + self._sock.shutdown(how) + + def getsockopt(self, *args, **kwargs): + return self._sock.getsockopt(*args, **kwargs) + + def setsockopt(self, *args, **kwargs): + self._sock.setsockopt(*args, **kwargs) + + def getpeername(self): + return self._sock.getpeername() + + def getsockname(self): + return self._sock.getsockname() + + def getsockbyname(self): + return self._sock.getsockbyname() + + def accept(self): + self._na('accept() method') + return self._sock.accept() + + def connect(self, *args, **kwargs): + self._na('connect() method') + return self._sock.connect(*args, **kwargs) + + def connect_ex(self, *args, **kwargs): + self._na('connect_ex() method') + return self._sock.connect_ex(*args, **kwargs) + + def bind(self, *args, **kwargs): + self._na('bind() method') + return self._sock.bind(*args, **kwargs) + + def ioctl(self, *args, **kwargs): + self._na('ioctl() method') + return self._sock.ioctl(*args, **kwargs) + + def listen(self, *args, **kwargs): + self._na('listen() method') + return self._sock.listen(*args, **kwargs) + + def makefile(self): + self._na('makefile() method') + return self._sock.makefile() + + def sendfile(self, *args, **kwargs): + self._na('sendfile() method') + return self._sock.sendfile(*args, **kwargs) + + def close(self): + self._na('close() method') + return self._sock.close() + + def detach(self): + self._na('detach() method') + return self._sock.detach() + + def sendmsg_afalg(self, *args, **kwargs): + self._na('sendmsg_afalg() method') + return self._sock.sendmsg_afalg(*args, **kwargs) + + def sendmsg(self, *args, **kwargs): + self._na('sendmsg() method') + return self._sock.sendmsg(*args, **kwargs) + + def sendto(self, *args, **kwargs): + self._na('sendto() method') + return self._sock.sendto(*args, **kwargs) + + def send(self, *args, **kwargs): + self._na('send() method') + return self._sock.send(*args, **kwargs) + + def sendall(self, *args, **kwargs): + self._na('sendall() method') + return self._sock.sendall(*args, **kwargs) + + def set_inheritable(self, *args, **kwargs): + self._na('set_inheritable() method') + return self._sock.set_inheritable(*args, **kwargs) + + def share(self, process_id): + self._na('share() method') + return self._sock.share(process_id) + + def recv_into(self, *args, **kwargs): + self._na('recv_into() method') + return self._sock.recv_into(*args, **kwargs) + + def recvfrom_into(self, *args, **kwargs): + self._na('recvfrom_into() method') + return self._sock.recvfrom_into(*args, **kwargs) + + def recvmsg_into(self, *args, **kwargs): + self._na('recvmsg_into() method') + return self._sock.recvmsg_into(*args, **kwargs) + + def recvmsg(self, *args, **kwargs): + self._na('recvmsg() method') + return self._sock.recvmsg(*args, **kwargs) + + def recvfrom(self, *args, **kwargs): + self._na('recvfrom() method') + return self._sock.recvfrom(*args, **kwargs) + + def recv(self, *args, **kwargs): + self._na('recv() method') + return self._sock.recv(*args, **kwargs) + + def settimeout(self, value): + if value == 0: + return + raise ValueError( + 'settimeout(): only 0 timeout is allowed on transport sockets') + + def gettimeout(self): + return 0 + + def setblocking(self, flag): + if not flag: + return + raise ValueError( + 'setblocking(): transport sockets cannot be blocking') + + def __enter__(self): + self._na('context manager protocol') + return self._sock.__enter__() + + def __exit__(self, *err): + self._na('context manager protocol') + return self._sock.__exit__(*err) diff --git a/python310/asyncio/unix_events.py b/python310/asyncio/unix_events.py new file mode 100644 index 0000000000000000000000000000000000000000..faaca305c98da775d278fd29220bc2bcd6fab8ab --- /dev/null +++ b/python310/asyncio/unix_events.py @@ -0,0 +1,1466 @@ +"""Selector event loop for Unix with signal handling.""" + +import errno +import io +import itertools +import os +import selectors +import signal +import socket +import stat +import subprocess +import sys +import threading +import warnings + +from . import base_events +from . import base_subprocess +from . import constants +from . import coroutines +from . import events +from . import exceptions +from . import futures +from . import selector_events +from . import tasks +from . import transports +from .log import logger + + +__all__ = ( + 'SelectorEventLoop', + 'AbstractChildWatcher', 'SafeChildWatcher', + 'FastChildWatcher', 'PidfdChildWatcher', + 'MultiLoopChildWatcher', 'ThreadedChildWatcher', + 'DefaultEventLoopPolicy', +) + + +if sys.platform == 'win32': # pragma: no cover + raise ImportError('Signals are not really supported on Windows') + + +def _sighandler_noop(signum, frame): + """Dummy signal handler.""" + pass + + +def waitstatus_to_exitcode(status): + try: + return os.waitstatus_to_exitcode(status) + except ValueError: + # The child exited, but we don't understand its status. + # This shouldn't happen, but if it does, let's just + # return that status; perhaps that helps debug it. + return status + + +class _UnixSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Unix event loop. + + Adds signal handling and UNIX Domain Socket support to SelectorEventLoop. + """ + + def __init__(self, selector=None): + super().__init__(selector) + self._signal_handlers = {} + + def close(self): + super().close() + if not sys.is_finalizing(): + for sig in list(self._signal_handlers): + self.remove_signal_handler(sig) + else: + if self._signal_handlers: + warnings.warn(f"Closing the loop {self!r} " + f"on interpreter shutdown " + f"stage, skipping signal handlers removal", + ResourceWarning, + source=self) + self._signal_handlers.clear() + + def _process_self_data(self, data): + for signum in data: + if not signum: + # ignore null bytes written by _write_to_self() + continue + self._handle_signal(signum) + + def add_signal_handler(self, sig, callback, *args): + """Add a handler for a signal. UNIX only. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if (coroutines.iscoroutine(callback) or + coroutines.iscoroutinefunction(callback)): + raise TypeError("coroutines cannot be used " + "with add_signal_handler()") + self._check_signal(sig) + self._check_closed() + try: + # set_wakeup_fd() raises ValueError if this is not the + # main thread. By calling it early we ensure that an + # event loop running in another thread cannot add a signal + # handler. + signal.set_wakeup_fd(self._csock.fileno()) + except (ValueError, OSError) as exc: + raise RuntimeError(str(exc)) + + handle = events.Handle(callback, args, self, None) + self._signal_handlers[sig] = handle + + try: + # Register a dummy signal handler to ask Python to write the signal + # number in the wakeup file descriptor. _process_self_data() will + # read signal numbers from this file descriptor to handle signals. + signal.signal(sig, _sighandler_noop) + + # Set SA_RESTART to limit EINTR occurrences. + signal.siginterrupt(sig, False) + except OSError as exc: + del self._signal_handlers[sig] + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as nexc: + logger.info('set_wakeup_fd(-1) failed: %s', nexc) + + if exc.errno == errno.EINVAL: + raise RuntimeError(f'sig {sig} cannot be caught') + else: + raise + + def _handle_signal(self, sig): + """Internal helper that is the actual signal handler.""" + handle = self._signal_handlers.get(sig) + if handle is None: + return # Assume it's some race condition. + if handle._cancelled: + self.remove_signal_handler(sig) # Remove it properly. + else: + self._add_callback_signalsafe(handle) + + def remove_signal_handler(self, sig): + """Remove a handler for a signal. UNIX only. + + Return True if a signal handler was removed, False if not. + """ + self._check_signal(sig) + try: + del self._signal_handlers[sig] + except KeyError: + return False + + if sig == signal.SIGINT: + handler = signal.default_int_handler + else: + handler = signal.SIG_DFL + + try: + signal.signal(sig, handler) + except OSError as exc: + if exc.errno == errno.EINVAL: + raise RuntimeError(f'sig {sig} cannot be caught') + else: + raise + + if not self._signal_handlers: + try: + signal.set_wakeup_fd(-1) + except (ValueError, OSError) as exc: + logger.info('set_wakeup_fd(-1) failed: %s', exc) + + return True + + def _check_signal(self, sig): + """Internal helper to validate a signal. + + Raise ValueError if the signal number is invalid or uncatchable. + Raise RuntimeError if there is a problem setting up the handler. + """ + if not isinstance(sig, int): + raise TypeError(f'sig must be an int, not {sig!r}') + + if sig not in signal.valid_signals(): + raise ValueError(f'invalid signal number {sig}') + + def _make_read_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixReadPipeTransport(self, pipe, protocol, waiter, extra) + + def _make_write_pipe_transport(self, pipe, protocol, waiter=None, + extra=None): + return _UnixWritePipeTransport(self, pipe, protocol, waiter, extra) + + async def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + with events.get_child_watcher() as watcher: + if not watcher.is_active(): + # Check early. + # Raising exception before process creation + # prevents subprocess execution if the watcher + # is not ready to handle it. + raise RuntimeError("asyncio.get_child_watcher() is not activated, " + "subprocess support is not installed.") + waiter = self.create_future() + transp = _UnixSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + + watcher.add_child_handler(transp.get_pid(), + self._child_watcher_callback, transp) + try: + await waiter + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + transp.close() + await transp._wait() + raise + + return transp + + def _child_watcher_callback(self, pid, returncode, transp): + # Skip one iteration for callbacks to be executed + self.call_soon_threadsafe(self.call_soon, transp._process_exited, returncode) + + async def create_unix_connection( + self, protocol_factory, path=None, *, + ssl=None, sock=None, + server_hostname=None, + ssl_handshake_timeout=None): + assert server_hostname is None or isinstance(server_hostname, str) + if ssl: + if server_hostname is None: + raise ValueError( + 'you have to pass server_hostname when using ssl') + else: + if server_hostname is not None: + raise ValueError('server_hostname is only meaningful with ssl') + if ssl_handshake_timeout is not None: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + path = os.fspath(path) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM, 0) + try: + sock.setblocking(False) + await self.sock_connect(sock, path) + except: + sock.close() + raise + + else: + if sock is None: + raise ValueError('no path and sock were specified') + if (sock.family != socket.AF_UNIX or + sock.type != socket.SOCK_STREAM): + raise ValueError( + f'A UNIX Domain Stream Socket was expected, got {sock!r}') + sock.setblocking(False) + + transport, protocol = await self._create_connection_transport( + sock, protocol_factory, ssl, server_hostname, + ssl_handshake_timeout=ssl_handshake_timeout) + return transport, protocol + + async def create_unix_server( + self, protocol_factory, path=None, *, + sock=None, backlog=100, ssl=None, + ssl_handshake_timeout=None, + start_serving=True): + if isinstance(ssl, bool): + raise TypeError('ssl argument must be an SSLContext or None') + + if ssl_handshake_timeout is not None and not ssl: + raise ValueError( + 'ssl_handshake_timeout is only meaningful with ssl') + + if path is not None: + if sock is not None: + raise ValueError( + 'path and sock can not be specified at the same time') + + path = os.fspath(path) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + + # Check for abstract socket. `str` and `bytes` paths are supported. + if path[0] not in (0, '\x00'): + try: + if stat.S_ISSOCK(os.stat(path).st_mode): + os.remove(path) + except FileNotFoundError: + pass + except OSError as err: + # Directory may have permissions only to create socket. + logger.error('Unable to check or remove stale UNIX socket ' + '%r: %r', path, err) + + try: + sock.bind(path) + except OSError as exc: + sock.close() + if exc.errno == errno.EADDRINUSE: + # Let's improve the error message by adding + # with what exact address it occurs. + msg = f'Address {path!r} is already in use' + raise OSError(errno.EADDRINUSE, msg) from None + else: + raise + except: + sock.close() + raise + else: + if sock is None: + raise ValueError( + 'path was not specified, and no sock specified') + + if (sock.family != socket.AF_UNIX or + sock.type != socket.SOCK_STREAM): + raise ValueError( + f'A UNIX Domain Stream Socket was expected, got {sock!r}') + + sock.setblocking(False) + server = base_events.Server(self, [sock], protocol_factory, + ssl, backlog, ssl_handshake_timeout) + if start_serving: + server._start_serving() + # Skip one loop iteration so that all 'loop.add_reader' + # go through. + await tasks.sleep(0) + + return server + + async def _sock_sendfile_native(self, sock, file, offset, count): + try: + os.sendfile + except AttributeError: + raise exceptions.SendfileNotAvailableError( + "os.sendfile() is not available") + try: + fileno = file.fileno() + except (AttributeError, io.UnsupportedOperation) as err: + raise exceptions.SendfileNotAvailableError("not a regular file") + try: + fsize = os.fstat(fileno).st_size + except OSError: + raise exceptions.SendfileNotAvailableError("not a regular file") + blocksize = count if count else fsize + if not blocksize: + return 0 # empty file + + fut = self.create_future() + self._sock_sendfile_native_impl(fut, None, sock, fileno, + offset, count, blocksize, 0) + return await fut + + def _sock_sendfile_native_impl(self, fut, registered_fd, sock, fileno, + offset, count, blocksize, total_sent): + fd = sock.fileno() + if registered_fd is not None: + # Remove the callback early. It should be rare that the + # selector says the fd is ready but the call still returns + # EAGAIN, and I am willing to take a hit in that case in + # order to simplify the common case. + self.remove_writer(registered_fd) + if fut.cancelled(): + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + return + if count: + blocksize = count - total_sent + if blocksize <= 0: + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_result(total_sent) + return + + try: + sent = os.sendfile(fd, fileno, offset, blocksize) + except (BlockingIOError, InterruptedError): + if registered_fd is None: + self._sock_add_cancellation_callback(fut, sock) + self.add_writer(fd, self._sock_sendfile_native_impl, fut, + fd, sock, fileno, + offset, count, blocksize, total_sent) + except OSError as exc: + if (registered_fd is not None and + exc.errno == errno.ENOTCONN and + type(exc) is not ConnectionError): + # If we have an ENOTCONN and this isn't a first call to + # sendfile(), i.e. the connection was closed in the middle + # of the operation, normalize the error to ConnectionError + # to make it consistent across all Posix systems. + new_exc = ConnectionError( + "socket is not connected", errno.ENOTCONN) + new_exc.__cause__ = exc + exc = new_exc + if total_sent == 0: + # We can get here for different reasons, the main + # one being 'file' is not a regular mmap(2)-like + # file, in which case we'll fall back on using + # plain send(). + err = exceptions.SendfileNotAvailableError( + "os.sendfile call failed") + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_exception(err) + else: + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_exception(exc) + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_exception(exc) + else: + if sent == 0: + # EOF + self._sock_sendfile_update_filepos(fileno, offset, total_sent) + fut.set_result(total_sent) + else: + offset += sent + total_sent += sent + if registered_fd is None: + self._sock_add_cancellation_callback(fut, sock) + self.add_writer(fd, self._sock_sendfile_native_impl, fut, + fd, sock, fileno, + offset, count, blocksize, total_sent) + + def _sock_sendfile_update_filepos(self, fileno, offset, total_sent): + if total_sent > 0: + os.lseek(fileno, offset, os.SEEK_SET) + + def _sock_add_cancellation_callback(self, fut, sock): + def cb(fut): + if fut.cancelled(): + fd = sock.fileno() + if fd != -1: + self.remove_writer(fd) + fut.add_done_callback(cb) + + +class _UnixReadPipeTransport(transports.ReadTransport): + + max_size = 256 * 1024 # max bytes we read in one event loop iteration + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super().__init__(extra) + self._extra['pipe'] = pipe + self._loop = loop + self._pipe = pipe + self._fileno = pipe.fileno() + self._protocol = protocol + self._closing = False + self._paused = False + + mode = os.fstat(self._fileno).st_mode + if not (stat.S_ISFIFO(mode) or + stat.S_ISSOCK(mode) or + stat.S_ISCHR(mode)): + self._pipe = None + self._fileno = None + self._protocol = None + raise ValueError("Pipe transport is for pipes/sockets only.") + + os.set_blocking(self._fileno, False) + + self._loop.call_soon(self._protocol.connection_made, self) + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._fileno, self._read_ready) + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append(f'fd={self._fileno}') + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: + polling = selector_events._test_selector_event( + selector, self._fileno, selectors.EVENT_READ) + if polling: + info.append('polling') + else: + info.append('idle') + elif self._pipe is not None: + info.append('open') + else: + info.append('closed') + return '<{}>'.format(' '.join(info)) + + def _read_ready(self): + try: + data = os.read(self._fileno, self.max_size) + except (BlockingIOError, InterruptedError): + pass + except OSError as exc: + self._fatal_error(exc, 'Fatal read error on pipe transport') + else: + if data: + self._protocol.data_received(data) + else: + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + self._closing = True + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._protocol.eof_received) + self._loop.call_soon(self._call_connection_lost, None) + + def pause_reading(self): + if self._closing or self._paused: + return + self._paused = True + self._loop._remove_reader(self._fileno) + if self._loop.get_debug(): + logger.debug("%r pauses reading", self) + + def resume_reading(self): + if self._closing or not self._paused: + return + self._paused = False + self._loop._add_reader(self._fileno, self._read_ready) + if self._loop.get_debug(): + logger.debug("%r resumes reading", self) + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if not self._closing: + self._close(None) + + def __del__(self, _warn=warnings.warn): + if self._pipe is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._pipe.close() + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if (isinstance(exc, OSError) and exc.errno == errno.EIO): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc): + self._closing = True + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +class _UnixWritePipeTransport(transports._FlowControlMixin, + transports.WriteTransport): + + def __init__(self, loop, pipe, protocol, waiter=None, extra=None): + super().__init__(extra, loop) + self._extra['pipe'] = pipe + self._pipe = pipe + self._fileno = pipe.fileno() + self._protocol = protocol + self._buffer = bytearray() + self._conn_lost = 0 + self._closing = False # Set when close() or write_eof() called. + + mode = os.fstat(self._fileno).st_mode + is_char = stat.S_ISCHR(mode) + is_fifo = stat.S_ISFIFO(mode) + is_socket = stat.S_ISSOCK(mode) + if not (is_char or is_fifo or is_socket): + self._pipe = None + self._fileno = None + self._protocol = None + raise ValueError("Pipe transport is only for " + "pipes, sockets and character devices") + + os.set_blocking(self._fileno, False) + self._loop.call_soon(self._protocol.connection_made, self) + + # On AIX, the reader trick (to be notified when the read end of the + # socket is closed) only works for sockets. On other platforms it + # works for pipes and sockets. (Exception: OS X 10.4? Issue #19294.) + if is_socket or (is_fifo and not sys.platform.startswith("aix")): + # only start reading when connection_made() has been called + self._loop.call_soon(self._loop._add_reader, + self._fileno, self._read_ready) + + if waiter is not None: + # only wake up the waiter when connection_made() has been called + self._loop.call_soon(futures._set_result_unless_cancelled, + waiter, None) + + def __repr__(self): + info = [self.__class__.__name__] + if self._pipe is None: + info.append('closed') + elif self._closing: + info.append('closing') + info.append(f'fd={self._fileno}') + selector = getattr(self._loop, '_selector', None) + if self._pipe is not None and selector is not None: + polling = selector_events._test_selector_event( + selector, self._fileno, selectors.EVENT_WRITE) + if polling: + info.append('polling') + else: + info.append('idle') + + bufsize = self.get_write_buffer_size() + info.append(f'bufsize={bufsize}') + elif self._pipe is not None: + info.append('open') + else: + info.append('closed') + return '<{}>'.format(' '.join(info)) + + def get_write_buffer_size(self): + return len(self._buffer) + + def _read_ready(self): + # Pipe was closed by peer. + if self._loop.get_debug(): + logger.info("%r was closed by peer", self) + if self._buffer: + self._close(BrokenPipeError()) + else: + self._close() + + def write(self, data): + assert isinstance(data, (bytes, bytearray, memoryview)), repr(data) + if isinstance(data, bytearray): + data = memoryview(data) + if not data: + return + + if self._conn_lost or self._closing: + if self._conn_lost >= constants.LOG_THRESHOLD_FOR_CONNLOST_WRITES: + logger.warning('pipe closed by peer or ' + 'os.write(pipe, data) raised exception.') + self._conn_lost += 1 + return + + if not self._buffer: + # Attempt to send it right away first. + try: + n = os.write(self._fileno, data) + except (BlockingIOError, InterruptedError): + n = 0 + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._conn_lost += 1 + self._fatal_error(exc, 'Fatal write error on pipe transport') + return + if n == len(data): + return + elif n > 0: + data = memoryview(data)[n:] + self._loop._add_writer(self._fileno, self._write_ready) + + self._buffer += data + self._maybe_pause_protocol() + + def _write_ready(self): + assert self._buffer, 'Data should not be empty' + + try: + n = os.write(self._fileno, self._buffer) + except (BlockingIOError, InterruptedError): + pass + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + self._buffer.clear() + self._conn_lost += 1 + # Remove writer here, _fatal_error() doesn't it + # because _buffer is empty. + self._loop._remove_writer(self._fileno) + self._fatal_error(exc, 'Fatal write error on pipe transport') + else: + if n == len(self._buffer): + self._buffer.clear() + self._loop._remove_writer(self._fileno) + self._maybe_resume_protocol() # May append to buffer. + if self._closing: + self._loop._remove_reader(self._fileno) + self._call_connection_lost(None) + return + elif n > 0: + del self._buffer[:n] + + def can_write_eof(self): + return True + + def write_eof(self): + if self._closing: + return + assert self._pipe + self._closing = True + if not self._buffer: + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, None) + + def set_protocol(self, protocol): + self._protocol = protocol + + def get_protocol(self): + return self._protocol + + def is_closing(self): + return self._closing + + def close(self): + if self._pipe is not None and not self._closing: + # write_eof is all what we needed to close the write pipe + self.write_eof() + + def __del__(self, _warn=warnings.warn): + if self._pipe is not None: + _warn(f"unclosed transport {self!r}", ResourceWarning, source=self) + self._pipe.close() + + def abort(self): + self._close(None) + + def _fatal_error(self, exc, message='Fatal error on pipe transport'): + # should be called by exception handler only + if isinstance(exc, OSError): + if self._loop.get_debug(): + logger.debug("%r: %s", self, message, exc_info=True) + else: + self._loop.call_exception_handler({ + 'message': message, + 'exception': exc, + 'transport': self, + 'protocol': self._protocol, + }) + self._close(exc) + + def _close(self, exc=None): + self._closing = True + if self._buffer: + self._loop._remove_writer(self._fileno) + self._buffer.clear() + self._loop._remove_reader(self._fileno) + self._loop.call_soon(self._call_connection_lost, exc) + + def _call_connection_lost(self, exc): + try: + self._protocol.connection_lost(exc) + finally: + self._pipe.close() + self._pipe = None + self._protocol = None + self._loop = None + + +class _UnixSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + stdin_w = None + if stdin == subprocess.PIPE and sys.platform.startswith('aix'): + # Use a socket pair for stdin on AIX, since it does not + # support selecting read events on the write end of a + # socket (which we use in order to detect closing of the + # other end). + stdin, stdin_w = socket.socketpair() + try: + self._proc = subprocess.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + universal_newlines=False, bufsize=bufsize, **kwargs) + if stdin_w is not None: + stdin.close() + self._proc.stdin = open(stdin_w.detach(), 'wb', buffering=bufsize) + stdin_w = None + finally: + if stdin_w is not None: + stdin.close() + stdin_w.close() + + +class AbstractChildWatcher: + """Abstract base class for monitoring child processes. + + Objects derived from this class monitor a collection of subprocesses and + report their termination or interruption by a signal. + + New callbacks are registered with .add_child_handler(). Starting a new + process must be done within a 'with' block to allow the watcher to suspend + its activity until the new process if fully registered (this is needed to + prevent a race condition in some implementations). + + Example: + with watcher: + proc = subprocess.Popen("sleep 1") + watcher.add_child_handler(proc.pid, callback) + + Notes: + Implementations of this class must be thread-safe. + + Since child watcher objects may catch the SIGCHLD signal and call + waitpid(-1), there should be only one active object per process. + """ + + def add_child_handler(self, pid, callback, *args): + """Register a new child handler. + + Arrange for callback(pid, returncode, *args) to be called when + process 'pid' terminates. Specifying another callback for the same + process replaces the previous handler. + + Note: callback() must be thread-safe. + """ + raise NotImplementedError() + + def remove_child_handler(self, pid): + """Removes the handler for process 'pid'. + + The function returns True if the handler was successfully removed, + False if there was nothing to remove.""" + + raise NotImplementedError() + + def attach_loop(self, loop): + """Attach the watcher to an event loop. + + If the watcher was previously attached to an event loop, then it is + first detached before attaching to the new loop. + + Note: loop may be None. + """ + raise NotImplementedError() + + def close(self): + """Close the watcher. + + This must be called to make sure that any underlying resource is freed. + """ + raise NotImplementedError() + + def is_active(self): + """Return ``True`` if the watcher is active and is used by the event loop. + + Return True if the watcher is installed and ready to handle process exit + notifications. + + """ + raise NotImplementedError() + + def __enter__(self): + """Enter the watcher's context and allow starting new processes + + This function must return self""" + raise NotImplementedError() + + def __exit__(self, a, b, c): + """Exit the watcher's context""" + raise NotImplementedError() + + +class PidfdChildWatcher(AbstractChildWatcher): + """Child watcher implementation using Linux's pid file descriptors. + + This child watcher polls process file descriptors (pidfds) to await child + process termination. In some respects, PidfdChildWatcher is a "Goldilocks" + child watcher implementation. It doesn't require signals or threads, doesn't + interfere with any processes launched outside the event loop, and scales + linearly with the number of subprocesses launched by the event loop. The + main disadvantage is that pidfds are specific to Linux, and only work on + recent (5.3+) kernels. + """ + + def __init__(self): + self._loop = None + self._callbacks = {} + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_traceback): + pass + + def is_active(self): + return self._loop is not None and self._loop.is_running() + + def close(self): + self.attach_loop(None) + + def attach_loop(self, loop): + if self._loop is not None and loop is None and self._callbacks: + warnings.warn( + 'A loop is being detached ' + 'from a child watcher with pending handlers', + RuntimeWarning) + for pidfd, _, _ in self._callbacks.values(): + self._loop._remove_reader(pidfd) + os.close(pidfd) + self._callbacks.clear() + self._loop = loop + + def add_child_handler(self, pid, callback, *args): + existing = self._callbacks.get(pid) + if existing is not None: + self._callbacks[pid] = existing[0], callback, args + else: + pidfd = os.pidfd_open(pid) + self._loop._add_reader(pidfd, self._do_wait, pid) + self._callbacks[pid] = pidfd, callback, args + + def _do_wait(self, pid): + pidfd, callback, args = self._callbacks.pop(pid) + self._loop._remove_reader(pidfd) + try: + _, status = os.waitpid(pid, 0) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + returncode = 255 + logger.warning( + "child process pid %d exit status already read: " + " will report returncode 255", + pid) + else: + returncode = waitstatus_to_exitcode(status) + + os.close(pidfd) + callback(pid, returncode, *args) + + def remove_child_handler(self, pid): + try: + pidfd, _, _ = self._callbacks.pop(pid) + except KeyError: + return False + self._loop._remove_reader(pidfd) + os.close(pidfd) + return True + + +class BaseChildWatcher(AbstractChildWatcher): + + def __init__(self): + self._loop = None + self._callbacks = {} + + def close(self): + self.attach_loop(None) + + def is_active(self): + return self._loop is not None and self._loop.is_running() + + def _do_waitpid(self, expected_pid): + raise NotImplementedError() + + def _do_waitpid_all(self): + raise NotImplementedError() + + def attach_loop(self, loop): + assert loop is None or isinstance(loop, events.AbstractEventLoop) + + if self._loop is not None and loop is None and self._callbacks: + warnings.warn( + 'A loop is being detached ' + 'from a child watcher with pending handlers', + RuntimeWarning) + + if self._loop is not None: + self._loop.remove_signal_handler(signal.SIGCHLD) + + self._loop = loop + if loop is not None: + loop.add_signal_handler(signal.SIGCHLD, self._sig_chld) + + # Prevent a race condition in case a child terminated + # during the switch. + self._do_waitpid_all() + + def _sig_chld(self): + try: + self._do_waitpid_all() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException as exc: + # self._loop should always be available here + # as '_sig_chld' is added as a signal handler + # in 'attach_loop' + self._loop.call_exception_handler({ + 'message': 'Unknown exception in SIGCHLD handler', + 'exception': exc, + }) + + +class SafeChildWatcher(BaseChildWatcher): + """'Safe' child watcher implementation. + + This implementation avoids disrupting other code spawning processes by + polling explicitly each process in the SIGCHLD handler instead of calling + os.waitpid(-1). + + This is a safe solution but it has a significant overhead when handling a + big number of children (O(n) each time SIGCHLD is raised) + """ + + def close(self): + self._callbacks.clear() + super().close() + + def __enter__(self): + return self + + def __exit__(self, a, b, c): + pass + + def add_child_handler(self, pid, callback, *args): + self._callbacks[pid] = (callback, args) + + # Prevent a race condition in case the child is already terminated. + self._do_waitpid(pid) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + + for pid in list(self._callbacks): + self._do_waitpid(pid) + + def _do_waitpid(self, expected_pid): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, os.WNOHANG) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + else: + if pid == 0: + # The child process is still alive. + return + + returncode = waitstatus_to_exitcode(status) + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + + try: + callback, args = self._callbacks.pop(pid) + except KeyError: # pragma: no cover + # May happen if .remove_child_handler() is called + # after os.waitpid() returns. + if self._loop.get_debug(): + logger.warning("Child watcher got an unexpected pid: %r", + pid, exc_info=True) + else: + callback(pid, returncode, *args) + + +class FastChildWatcher(BaseChildWatcher): + """'Fast' child watcher implementation. + + This implementation reaps every terminated processes by calling + os.waitpid(-1) directly, possibly breaking other code spawning processes + and waiting for their termination. + + There is no noticeable overhead when handling a big number of children + (O(1) each time a child terminates). + """ + def __init__(self): + super().__init__() + self._lock = threading.Lock() + self._zombies = {} + self._forks = 0 + + def close(self): + self._callbacks.clear() + self._zombies.clear() + super().close() + + def __enter__(self): + with self._lock: + self._forks += 1 + + return self + + def __exit__(self, a, b, c): + with self._lock: + self._forks -= 1 + + if self._forks or not self._zombies: + return + + collateral_victims = str(self._zombies) + self._zombies.clear() + + logger.warning( + "Caught subprocesses termination from unknown pids: %s", + collateral_victims) + + def add_child_handler(self, pid, callback, *args): + assert self._forks, "Must use the context manager" + + with self._lock: + try: + returncode = self._zombies.pop(pid) + except KeyError: + # The child is running. + self._callbacks[pid] = callback, args + return + + # The child is dead already. We can fire the callback. + callback(pid, returncode, *args) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def _do_waitpid_all(self): + # Because of signal coalescing, we must keep calling waitpid() as + # long as we're able to reap a child. + while True: + try: + pid, status = os.waitpid(-1, os.WNOHANG) + except ChildProcessError: + # No more child processes exist. + return + else: + if pid == 0: + # A child process is still alive. + return + + returncode = waitstatus_to_exitcode(status) + + with self._lock: + try: + callback, args = self._callbacks.pop(pid) + except KeyError: + # unknown child + if self._forks: + # It may not be registered yet. + self._zombies[pid] = returncode + if self._loop.get_debug(): + logger.debug('unknown process %s exited ' + 'with returncode %s', + pid, returncode) + continue + callback = None + else: + if self._loop.get_debug(): + logger.debug('process %s exited with returncode %s', + pid, returncode) + + if callback is None: + logger.warning( + "Caught subprocess termination from unknown pid: " + "%d -> %d", pid, returncode) + else: + callback(pid, returncode, *args) + + +class MultiLoopChildWatcher(AbstractChildWatcher): + """A watcher that doesn't require running loop in the main thread. + + This implementation registers a SIGCHLD signal handler on + instantiation (which may conflict with other code that + install own handler for this signal). + + The solution is safe but it has a significant overhead when + handling a big number of processes (*O(n)* each time a + SIGCHLD is received). + """ + + # Implementation note: + # The class keeps compatibility with AbstractChildWatcher ABC + # To achieve this it has empty attach_loop() method + # and doesn't accept explicit loop argument + # for add_child_handler()/remove_child_handler() + # but retrieves the current loop by get_running_loop() + + def __init__(self): + self._callbacks = {} + self._saved_sighandler = None + + def is_active(self): + return self._saved_sighandler is not None + + def close(self): + self._callbacks.clear() + if self._saved_sighandler is None: + return + + handler = signal.getsignal(signal.SIGCHLD) + if handler != self._sig_chld: + logger.warning("SIGCHLD handler was changed by outside code") + else: + signal.signal(signal.SIGCHLD, self._saved_sighandler) + self._saved_sighandler = None + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def add_child_handler(self, pid, callback, *args): + loop = events.get_running_loop() + self._callbacks[pid] = (loop, callback, args) + + # Prevent a race condition in case the child is already terminated. + self._do_waitpid(pid) + + def remove_child_handler(self, pid): + try: + del self._callbacks[pid] + return True + except KeyError: + return False + + def attach_loop(self, loop): + # Don't save the loop but initialize itself if called first time + # The reason to do it here is that attach_loop() is called from + # unix policy only for the main thread. + # Main thread is required for subscription on SIGCHLD signal + if self._saved_sighandler is not None: + return + + self._saved_sighandler = signal.signal(signal.SIGCHLD, self._sig_chld) + if self._saved_sighandler is None: + logger.warning("Previous SIGCHLD handler was set by non-Python code, " + "restore to default handler on watcher close.") + self._saved_sighandler = signal.SIG_DFL + + # Set SA_RESTART to limit EINTR occurrences. + signal.siginterrupt(signal.SIGCHLD, False) + + def _do_waitpid_all(self): + for pid in list(self._callbacks): + self._do_waitpid(pid) + + def _do_waitpid(self, expected_pid): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, os.WNOHANG) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + debug_log = False + else: + if pid == 0: + # The child process is still alive. + return + + returncode = waitstatus_to_exitcode(status) + debug_log = True + try: + loop, callback, args = self._callbacks.pop(pid) + except KeyError: # pragma: no cover + # May happen if .remove_child_handler() is called + # after os.waitpid() returns. + logger.warning("Child watcher got an unexpected pid: %r", + pid, exc_info=True) + else: + if loop.is_closed(): + logger.warning("Loop %r that handles pid %r is closed", loop, pid) + else: + if debug_log and loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + loop.call_soon_threadsafe(callback, pid, returncode, *args) + + def _sig_chld(self, signum, frame): + try: + self._do_waitpid_all() + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + logger.warning('Unknown exception in SIGCHLD handler', exc_info=True) + + +class ThreadedChildWatcher(AbstractChildWatcher): + """Threaded child watcher implementation. + + The watcher uses a thread per process + for waiting for the process finish. + + It doesn't require subscription on POSIX signal + but a thread creation is not free. + + The watcher has O(1) complexity, its performance doesn't depend + on amount of spawn processes. + """ + + def __init__(self): + self._pid_counter = itertools.count(0) + self._threads = {} + + def is_active(self): + return True + + def close(self): + self._join_threads() + + def _join_threads(self): + """Internal: Join all non-daemon threads""" + threads = [thread for thread in list(self._threads.values()) + if thread.is_alive() and not thread.daemon] + for thread in threads: + thread.join() + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + pass + + def __del__(self, _warn=warnings.warn): + threads = [thread for thread in list(self._threads.values()) + if thread.is_alive()] + if threads: + _warn(f"{self.__class__} has registered but not finished child processes", + ResourceWarning, + source=self) + + def add_child_handler(self, pid, callback, *args): + loop = events.get_running_loop() + thread = threading.Thread(target=self._do_waitpid, + name=f"waitpid-{next(self._pid_counter)}", + args=(loop, pid, callback, args), + daemon=True) + self._threads[pid] = thread + thread.start() + + def remove_child_handler(self, pid): + # asyncio never calls remove_child_handler() !!! + # The method is no-op but is implemented because + # abstract base classes require it. + return True + + def attach_loop(self, loop): + pass + + def _do_waitpid(self, loop, expected_pid, callback, args): + assert expected_pid > 0 + + try: + pid, status = os.waitpid(expected_pid, 0) + except ChildProcessError: + # The child process is already reaped + # (may happen if waitpid() is called elsewhere). + pid = expected_pid + returncode = 255 + logger.warning( + "Unknown child process pid %d, will report returncode 255", + pid) + else: + returncode = waitstatus_to_exitcode(status) + if loop.get_debug(): + logger.debug('process %s exited with returncode %s', + expected_pid, returncode) + + if loop.is_closed(): + logger.warning("Loop %r that handles pid %r is closed", loop, pid) + else: + loop.call_soon_threadsafe(callback, pid, returncode, *args) + + self._threads.pop(expected_pid) + + +class _UnixDefaultEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + """UNIX event loop policy with a watcher for child processes.""" + _loop_factory = _UnixSelectorEventLoop + + def __init__(self): + super().__init__() + self._watcher = None + + def _init_watcher(self): + with events._lock: + if self._watcher is None: # pragma: no branch + self._watcher = ThreadedChildWatcher() + if threading.current_thread() is threading.main_thread(): + self._watcher.attach_loop(self._local._loop) + + def set_event_loop(self, loop): + """Set the event loop. + + As a side effect, if a child watcher was set before, then calling + .set_event_loop() from the main thread will call .attach_loop(loop) on + the child watcher. + """ + + super().set_event_loop(loop) + + if (self._watcher is not None and + threading.current_thread() is threading.main_thread()): + self._watcher.attach_loop(loop) + + def get_child_watcher(self): + """Get the watcher for child processes. + + If not yet set, a ThreadedChildWatcher object is automatically created. + """ + if self._watcher is None: + self._init_watcher() + + return self._watcher + + def set_child_watcher(self, watcher): + """Set the watcher for child processes.""" + + assert watcher is None or isinstance(watcher, AbstractChildWatcher) + + if self._watcher is not None: + self._watcher.close() + + self._watcher = watcher + + +SelectorEventLoop = _UnixSelectorEventLoop +DefaultEventLoopPolicy = _UnixDefaultEventLoopPolicy diff --git a/python310/asyncio/windows_events.py b/python310/asyncio/windows_events.py new file mode 100644 index 0000000000000000000000000000000000000000..3204c7c9be96a4b32a67ba4ba1b752a65b9e6b09 --- /dev/null +++ b/python310/asyncio/windows_events.py @@ -0,0 +1,924 @@ +"""Selector and proactor event loops for Windows.""" + +import sys + +if sys.platform != 'win32': # pragma: no cover + raise ImportError('win32 only') + +import _overlapped +import _winapi +import errno +import math +import msvcrt +import socket +import struct +import time +import weakref + +from . import events +from . import base_subprocess +from . import futures +from . import exceptions +from . import proactor_events +from . import selector_events +from . import tasks +from . import windows_utils +from .log import logger + + +__all__ = ( + 'SelectorEventLoop', 'ProactorEventLoop', 'IocpProactor', + 'DefaultEventLoopPolicy', 'WindowsSelectorEventLoopPolicy', + 'WindowsProactorEventLoopPolicy', +) + + +NULL = 0 +INFINITE = 0xffffffff +ERROR_CONNECTION_REFUSED = 1225 +ERROR_CONNECTION_ABORTED = 1236 + +# Initial delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_INIT_DELAY = 0.001 + +# Maximum delay in seconds for connect_pipe() before retrying to connect +CONNECT_PIPE_MAX_DELAY = 0.100 + + +class _OverlappedFuture(futures.Future): + """Subclass of Future which represents an overlapped operation. + + Cancelling it will immediately cancel the overlapped operation. + """ + + def __init__(self, ov, *, loop=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + self._ov = ov + + def _repr_info(self): + info = super()._repr_info() + if self._ov is not None: + state = 'pending' if self._ov.pending else 'completed' + info.insert(1, f'overlapped=<{state}, {self._ov.address:#x}>') + return info + + def _cancel_overlapped(self): + if self._ov is None: + return + try: + self._ov.cancel() + except OSError as exc: + context = { + 'message': 'Cancelling an overlapped future failed', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + self._ov = None + + def cancel(self, msg=None): + self._cancel_overlapped() + return super().cancel(msg=msg) + + def set_exception(self, exception): + super().set_exception(exception) + self._cancel_overlapped() + + def set_result(self, result): + super().set_result(result) + self._ov = None + + +class _BaseWaitHandleFuture(futures.Future): + """Subclass of Future which represents a wait handle.""" + + def __init__(self, ov, handle, wait_handle, *, loop=None): + super().__init__(loop=loop) + if self._source_traceback: + del self._source_traceback[-1] + # Keep a reference to the Overlapped object to keep it alive until the + # wait is unregistered + self._ov = ov + self._handle = handle + self._wait_handle = wait_handle + + # Should we call UnregisterWaitEx() if the wait completes + # or is cancelled? + self._registered = True + + def _poll(self): + # non-blocking wait: use a timeout of 0 millisecond + return (_winapi.WaitForSingleObject(self._handle, 0) == + _winapi.WAIT_OBJECT_0) + + def _repr_info(self): + info = super()._repr_info() + info.append(f'handle={self._handle:#x}') + if self._handle is not None: + state = 'signaled' if self._poll() else 'waiting' + info.append(state) + if self._wait_handle is not None: + info.append(f'wait_handle={self._wait_handle:#x}') + return info + + def _unregister_wait_cb(self, fut): + # The wait was unregistered: it's not safe to destroy the Overlapped + # object + self._ov = None + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWait(wait_handle) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING means that the unregister is pending + + self._unregister_wait_cb(None) + + def cancel(self, msg=None): + self._unregister_wait() + return super().cancel(msg=msg) + + def set_exception(self, exception): + self._unregister_wait() + super().set_exception(exception) + + def set_result(self, result): + self._unregister_wait() + super().set_result(result) + + +class _WaitCancelFuture(_BaseWaitHandleFuture): + """Subclass of Future which represents a wait for the cancellation of a + _WaitHandleFuture using an event. + """ + + def __init__(self, ov, event, wait_handle, *, loop=None): + super().__init__(ov, event, wait_handle, loop=loop) + + self._done_callback = None + + def cancel(self): + raise RuntimeError("_WaitCancelFuture must not be cancelled") + + def set_result(self, result): + super().set_result(result) + if self._done_callback is not None: + self._done_callback(self) + + def set_exception(self, exception): + super().set_exception(exception) + if self._done_callback is not None: + self._done_callback(self) + + +class _WaitHandleFuture(_BaseWaitHandleFuture): + def __init__(self, ov, handle, wait_handle, proactor, *, loop=None): + super().__init__(ov, handle, wait_handle, loop=loop) + self._proactor = proactor + self._unregister_proactor = True + self._event = _overlapped.CreateEvent(None, True, False, None) + self._event_fut = None + + def _unregister_wait_cb(self, fut): + if self._event is not None: + _winapi.CloseHandle(self._event) + self._event = None + self._event_fut = None + + # If the wait was cancelled, the wait may never be signalled, so + # it's required to unregister it. Otherwise, IocpProactor.close() will + # wait forever for an event which will never come. + # + # If the IocpProactor already received the event, it's safe to call + # _unregister() because we kept a reference to the Overlapped object + # which is used as a unique key. + self._proactor._unregister(self._ov) + self._proactor = None + + super()._unregister_wait_cb(fut) + + def _unregister_wait(self): + if not self._registered: + return + self._registered = False + + wait_handle = self._wait_handle + self._wait_handle = None + try: + _overlapped.UnregisterWaitEx(wait_handle, self._event) + except OSError as exc: + if exc.winerror != _overlapped.ERROR_IO_PENDING: + context = { + 'message': 'Failed to unregister the wait handle', + 'exception': exc, + 'future': self, + } + if self._source_traceback: + context['source_traceback'] = self._source_traceback + self._loop.call_exception_handler(context) + return + # ERROR_IO_PENDING is not an error, the wait was unregistered + + self._event_fut = self._proactor._wait_cancel(self._event, + self._unregister_wait_cb) + + +class PipeServer(object): + """Class representing a pipe server. + + This is much like a bound, listening socket. + """ + def __init__(self, address): + self._address = address + self._free_instances = weakref.WeakSet() + # initialize the pipe attribute before calling _server_pipe_handle() + # because this function can raise an exception and the destructor calls + # the close() method + self._pipe = None + self._accept_pipe_future = None + self._pipe = self._server_pipe_handle(True) + + def _get_unconnected_pipe(self): + # Create new instance and return previous one. This ensures + # that (until the server is closed) there is always at least + # one pipe handle for address. Therefore if a client attempt + # to connect it will not fail with FileNotFoundError. + tmp, self._pipe = self._pipe, self._server_pipe_handle(False) + return tmp + + def _server_pipe_handle(self, first): + # Return a wrapper for a new pipe handle. + if self.closed(): + return None + flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED + if first: + flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + h = _winapi.CreateNamedPipe( + self._address, flags, + _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE | + _winapi.PIPE_WAIT, + _winapi.PIPE_UNLIMITED_INSTANCES, + windows_utils.BUFSIZE, windows_utils.BUFSIZE, + _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + pipe = windows_utils.PipeHandle(h) + self._free_instances.add(pipe) + return pipe + + def closed(self): + return (self._address is None) + + def close(self): + if self._accept_pipe_future is not None: + self._accept_pipe_future.cancel() + self._accept_pipe_future = None + # Close all instances which have not been connected to by a client. + if self._address is not None: + for pipe in self._free_instances: + pipe.close() + self._pipe = None + self._address = None + self._free_instances.clear() + + __del__ = close + + +class _WindowsSelectorEventLoop(selector_events.BaseSelectorEventLoop): + """Windows version of selector event loop.""" + + +class ProactorEventLoop(proactor_events.BaseProactorEventLoop): + """Windows version of proactor event loop using IOCP.""" + + def __init__(self, proactor=None): + if proactor is None: + proactor = IocpProactor() + super().__init__(proactor) + + def run_forever(self): + try: + assert self._self_reading_future is None + self.call_soon(self._loop_self_reading) + super().run_forever() + finally: + if self._self_reading_future is not None: + ov = self._self_reading_future._ov + self._self_reading_future.cancel() + # self_reading_future was just cancelled so if it hasn't been + # finished yet, it never will be (it's possible that it has + # already finished and its callback is waiting in the queue, + # where it could still happen if the event loop is restarted). + # Unregister it otherwise IocpProactor.close will wait for it + # forever + if ov is not None: + self._proactor._unregister(ov) + self._self_reading_future = None + + async def create_pipe_connection(self, protocol_factory, address): + f = self._proactor.connect_pipe(address) + pipe = await f + protocol = protocol_factory() + trans = self._make_duplex_pipe_transport(pipe, protocol, + extra={'addr': address}) + return trans, protocol + + async def start_serving_pipe(self, protocol_factory, address): + server = PipeServer(address) + + def loop_accept_pipe(f=None): + pipe = None + try: + if f: + pipe = f.result() + server._free_instances.discard(pipe) + + if server.closed(): + # A client connected before the server was closed: + # drop the client (close the pipe) and exit + pipe.close() + return + + protocol = protocol_factory() + self._make_duplex_pipe_transport( + pipe, protocol, extra={'addr': address}) + + pipe = server._get_unconnected_pipe() + if pipe is None: + return + + f = self._proactor.accept_pipe(pipe) + except BrokenPipeError: + if pipe and pipe.fileno() != -1: + pipe.close() + self.call_soon(loop_accept_pipe) + except OSError as exc: + if pipe and pipe.fileno() != -1: + self.call_exception_handler({ + 'message': 'Pipe accept failed', + 'exception': exc, + 'pipe': pipe, + }) + pipe.close() + elif self._debug: + logger.warning("Accept pipe failed on pipe %r", + pipe, exc_info=True) + self.call_soon(loop_accept_pipe) + except exceptions.CancelledError: + if pipe: + pipe.close() + else: + server._accept_pipe_future = f + f.add_done_callback(loop_accept_pipe) + + self.call_soon(loop_accept_pipe) + return [server] + + async def _make_subprocess_transport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + extra=None, **kwargs): + waiter = self.create_future() + transp = _WindowsSubprocessTransport(self, protocol, args, shell, + stdin, stdout, stderr, bufsize, + waiter=waiter, extra=extra, + **kwargs) + try: + await waiter + except (SystemExit, KeyboardInterrupt): + raise + except BaseException: + transp.close() + await transp._wait() + raise + + return transp + + +class IocpProactor: + """Proactor implementation using IOCP.""" + + def __init__(self, concurrency=0xffffffff): + self._loop = None + self._results = [] + self._iocp = _overlapped.CreateIoCompletionPort( + _overlapped.INVALID_HANDLE_VALUE, NULL, 0, concurrency) + self._cache = {} + self._registered = weakref.WeakSet() + self._unregistered = [] + self._stopped_serving = weakref.WeakSet() + + def _check_closed(self): + if self._iocp is None: + raise RuntimeError('IocpProactor is closed') + + def __repr__(self): + info = ['overlapped#=%s' % len(self._cache), + 'result#=%s' % len(self._results)] + if self._iocp is None: + info.append('closed') + return '<%s %s>' % (self.__class__.__name__, " ".join(info)) + + def set_loop(self, loop): + self._loop = loop + + def select(self, timeout=None): + if not self._results: + self._poll(timeout) + tmp = self._results + self._results = [] + try: + return tmp + finally: + # Needed to break cycles when an exception occurs. + tmp = None + + def _result(self, value): + fut = self._loop.create_future() + fut.set_result(value) + return fut + + def recv(self, conn, nbytes, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + if isinstance(conn, socket.socket): + ov.WSARecv(conn.fileno(), nbytes, flags) + else: + ov.ReadFile(conn.fileno(), nbytes) + except BrokenPipeError: + return self._result(b'') + + def finish_recv(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def recv_into(self, conn, buf, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + if isinstance(conn, socket.socket): + ov.WSARecvInto(conn.fileno(), buf, flags) + else: + ov.ReadFileInto(conn.fileno(), buf) + except BrokenPipeError: + return self._result(0) + + def finish_recv(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def recvfrom(self, conn, nbytes, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + try: + ov.WSARecvFrom(conn.fileno(), nbytes, flags) + except BrokenPipeError: + return self._result((b'', None)) + + def finish_recv(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_recv) + + def sendto(self, conn, buf, flags=0, addr=None): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + + ov.WSASendTo(conn.fileno(), buf, flags, addr) + + def finish_send(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_send) + + def send(self, conn, buf, flags=0): + self._register_with_iocp(conn) + ov = _overlapped.Overlapped(NULL) + if isinstance(conn, socket.socket): + ov.WSASend(conn.fileno(), buf, flags) + else: + ov.WriteFile(conn.fileno(), buf) + + def finish_send(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + + return self._register(ov, conn, finish_send) + + def accept(self, listener): + self._register_with_iocp(listener) + conn = self._get_accept_socket(listener.family) + ov = _overlapped.Overlapped(NULL) + ov.AcceptEx(listener.fileno(), conn.fileno()) + + def finish_accept(trans, key, ov): + ov.getresult() + # Use SO_UPDATE_ACCEPT_CONTEXT so getsockname() etc work. + buf = struct.pack('@P', listener.fileno()) + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_ACCEPT_CONTEXT, buf) + conn.settimeout(listener.gettimeout()) + return conn, conn.getpeername() + + async def accept_coro(future, conn): + # Coroutine closing the accept socket if the future is cancelled + try: + await future + except exceptions.CancelledError: + conn.close() + raise + + future = self._register(ov, listener, finish_accept) + coro = accept_coro(future, conn) + tasks.ensure_future(coro, loop=self._loop) + return future + + def connect(self, conn, address): + if conn.type == socket.SOCK_DGRAM: + # WSAConnect will complete immediately for UDP sockets so we don't + # need to register any IOCP operation + _overlapped.WSAConnect(conn.fileno(), address) + fut = self._loop.create_future() + fut.set_result(None) + return fut + + self._register_with_iocp(conn) + # The socket needs to be locally bound before we call ConnectEx(). + try: + _overlapped.BindLocal(conn.fileno(), conn.family) + except OSError as e: + if e.winerror != errno.WSAEINVAL: + raise + # Probably already locally bound; check using getsockname(). + if conn.getsockname()[1] == 0: + raise + ov = _overlapped.Overlapped(NULL) + ov.ConnectEx(conn.fileno(), address) + + def finish_connect(trans, key, ov): + ov.getresult() + # Use SO_UPDATE_CONNECT_CONTEXT so getsockname() etc work. + conn.setsockopt(socket.SOL_SOCKET, + _overlapped.SO_UPDATE_CONNECT_CONTEXT, 0) + return conn + + return self._register(ov, conn, finish_connect) + + def sendfile(self, sock, file, offset, count): + self._register_with_iocp(sock) + ov = _overlapped.Overlapped(NULL) + offset_low = offset & 0xffff_ffff + offset_high = (offset >> 32) & 0xffff_ffff + ov.TransmitFile(sock.fileno(), + msvcrt.get_osfhandle(file.fileno()), + offset_low, offset_high, + count, 0, 0) + + def finish_sendfile(trans, key, ov): + try: + return ov.getresult() + except OSError as exc: + if exc.winerror in (_overlapped.ERROR_NETNAME_DELETED, + _overlapped.ERROR_OPERATION_ABORTED): + raise ConnectionResetError(*exc.args) + else: + raise + return self._register(ov, sock, finish_sendfile) + + def accept_pipe(self, pipe): + self._register_with_iocp(pipe) + ov = _overlapped.Overlapped(NULL) + connected = ov.ConnectNamedPipe(pipe.fileno()) + + if connected: + # ConnectNamePipe() failed with ERROR_PIPE_CONNECTED which means + # that the pipe is connected. There is no need to wait for the + # completion of the connection. + return self._result(pipe) + + def finish_accept_pipe(trans, key, ov): + ov.getresult() + return pipe + + return self._register(ov, pipe, finish_accept_pipe) + + async def connect_pipe(self, address): + delay = CONNECT_PIPE_INIT_DELAY + while True: + # Unfortunately there is no way to do an overlapped connect to + # a pipe. Call CreateFile() in a loop until it doesn't fail with + # ERROR_PIPE_BUSY. + try: + handle = _overlapped.ConnectPipe(address) + break + except OSError as exc: + if exc.winerror != _overlapped.ERROR_PIPE_BUSY: + raise + + # ConnectPipe() failed with ERROR_PIPE_BUSY: retry later + delay = min(delay * 2, CONNECT_PIPE_MAX_DELAY) + await tasks.sleep(delay) + + return windows_utils.PipeHandle(handle) + + def wait_for_handle(self, handle, timeout=None): + """Wait for a handle. + + Return a Future object. The result of the future is True if the wait + completed, or False if the wait did not complete (on timeout). + """ + return self._wait_for_handle(handle, timeout, False) + + def _wait_cancel(self, event, done_callback): + fut = self._wait_for_handle(event, None, True) + # add_done_callback() cannot be used because the wait may only complete + # in IocpProactor.close(), while the event loop is not running. + fut._done_callback = done_callback + return fut + + def _wait_for_handle(self, handle, timeout, _is_cancel): + self._check_closed() + + if timeout is None: + ms = _winapi.INFINITE + else: + # RegisterWaitForSingleObject() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = math.ceil(timeout * 1e3) + + # We only create ov so we can use ov.address as a key for the cache. + ov = _overlapped.Overlapped(NULL) + wait_handle = _overlapped.RegisterWaitWithQueue( + handle, self._iocp, ov.address, ms) + if _is_cancel: + f = _WaitCancelFuture(ov, handle, wait_handle, loop=self._loop) + else: + f = _WaitHandleFuture(ov, handle, wait_handle, self, + loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + + def finish_wait_for_handle(trans, key, ov): + # Note that this second wait means that we should only use + # this with handles types where a successful wait has no + # effect. So events or processes are all right, but locks + # or semaphores are not. Also note if the handle is + # signalled and then quickly reset, then we may return + # False even though we have not timed out. + return f._poll() + + self._cache[ov.address] = (f, ov, 0, finish_wait_for_handle) + return f + + def _register_with_iocp(self, obj): + # To get notifications of finished ops on this objects sent to the + # completion port, were must register the handle. + if obj not in self._registered: + self._registered.add(obj) + _overlapped.CreateIoCompletionPort(obj.fileno(), self._iocp, 0, 0) + # XXX We could also use SetFileCompletionNotificationModes() + # to avoid sending notifications to completion port of ops + # that succeed immediately. + + def _register(self, ov, obj, callback): + self._check_closed() + + # Return a future which will be set with the result of the + # operation when it completes. The future's value is actually + # the value returned by callback(). + f = _OverlappedFuture(ov, loop=self._loop) + if f._source_traceback: + del f._source_traceback[-1] + if not ov.pending: + # The operation has completed, so no need to postpone the + # work. We cannot take this short cut if we need the + # NumberOfBytes, CompletionKey values returned by + # PostQueuedCompletionStatus(). + try: + value = callback(None, None, ov) + except OSError as e: + f.set_exception(e) + else: + f.set_result(value) + # Even if GetOverlappedResult() was called, we have to wait for the + # notification of the completion in GetQueuedCompletionStatus(). + # Register the overlapped operation to keep a reference to the + # OVERLAPPED object, otherwise the memory is freed and Windows may + # read uninitialized memory. + + # Register the overlapped operation for later. Note that + # we only store obj to prevent it from being garbage + # collected too early. + self._cache[ov.address] = (f, ov, obj, callback) + return f + + def _unregister(self, ov): + """Unregister an overlapped object. + + Call this method when its future has been cancelled. The event can + already be signalled (pending in the proactor event queue). It is also + safe if the event is never signalled (because it was cancelled). + """ + self._check_closed() + self._unregistered.append(ov) + + def _get_accept_socket(self, family): + s = socket.socket(family) + s.settimeout(0) + return s + + def _poll(self, timeout=None): + if timeout is None: + ms = INFINITE + elif timeout < 0: + raise ValueError("negative timeout") + else: + # GetQueuedCompletionStatus() has a resolution of 1 millisecond, + # round away from zero to wait *at least* timeout seconds. + ms = math.ceil(timeout * 1e3) + if ms >= INFINITE: + raise ValueError("timeout too big") + + while True: + status = _overlapped.GetQueuedCompletionStatus(self._iocp, ms) + if status is None: + break + ms = 0 + + err, transferred, key, address = status + try: + f, ov, obj, callback = self._cache.pop(address) + except KeyError: + if self._loop.get_debug(): + self._loop.call_exception_handler({ + 'message': ('GetQueuedCompletionStatus() returned an ' + 'unexpected event'), + 'status': ('err=%s transferred=%s key=%#x address=%#x' + % (err, transferred, key, address)), + }) + + # key is either zero, or it is used to return a pipe + # handle which should be closed to avoid a leak. + if key not in (0, _overlapped.INVALID_HANDLE_VALUE): + _winapi.CloseHandle(key) + continue + + if obj in self._stopped_serving: + f.cancel() + # Don't call the callback if _register() already read the result or + # if the overlapped has been cancelled + elif not f.done(): + try: + value = callback(transferred, key, ov) + except OSError as e: + f.set_exception(e) + self._results.append(f) + else: + f.set_result(value) + self._results.append(f) + finally: + f = None + + # Remove unregistered futures + for ov in self._unregistered: + self._cache.pop(ov.address, None) + self._unregistered.clear() + + def _stop_serving(self, obj): + # obj is a socket or pipe handle. It will be closed in + # BaseProactorEventLoop._stop_serving() which will make any + # pending operations fail quickly. + self._stopped_serving.add(obj) + + def close(self): + if self._iocp is None: + # already closed + return + + # Cancel remaining registered operations. + for address, (fut, ov, obj, callback) in list(self._cache.items()): + if fut.cancelled(): + # Nothing to do with cancelled futures + pass + elif isinstance(fut, _WaitCancelFuture): + # _WaitCancelFuture must not be cancelled + pass + else: + try: + fut.cancel() + except OSError as exc: + if self._loop is not None: + context = { + 'message': 'Cancelling a future failed', + 'exception': exc, + 'future': fut, + } + if fut._source_traceback: + context['source_traceback'] = fut._source_traceback + self._loop.call_exception_handler(context) + + # Wait until all cancelled overlapped complete: don't exit with running + # overlapped to prevent a crash. Display progress every second if the + # loop is still running. + msg_update = 1.0 + start_time = time.monotonic() + next_msg = start_time + msg_update + while self._cache: + if next_msg <= time.monotonic(): + logger.debug('%r is running after closing for %.1f seconds', + self, time.monotonic() - start_time) + next_msg = time.monotonic() + msg_update + + # handle a few events, or timeout + self._poll(msg_update) + + self._results = [] + + _winapi.CloseHandle(self._iocp) + self._iocp = None + + def __del__(self): + self.close() + + +class _WindowsSubprocessTransport(base_subprocess.BaseSubprocessTransport): + + def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs): + self._proc = windows_utils.Popen( + args, shell=shell, stdin=stdin, stdout=stdout, stderr=stderr, + bufsize=bufsize, **kwargs) + + def callback(f): + returncode = self._proc.poll() + self._process_exited(returncode) + + f = self._loop._proactor.wait_for_handle(int(self._proc._handle)) + f.add_done_callback(callback) + + +SelectorEventLoop = _WindowsSelectorEventLoop + + +class WindowsSelectorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + _loop_factory = SelectorEventLoop + + +class WindowsProactorEventLoopPolicy(events.BaseDefaultEventLoopPolicy): + _loop_factory = ProactorEventLoop + + +DefaultEventLoopPolicy = WindowsProactorEventLoopPolicy diff --git a/python310/asyncio/windows_utils.py b/python310/asyncio/windows_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ef277fac3e291c307d5e26ada8ef5e28c47434f1 --- /dev/null +++ b/python310/asyncio/windows_utils.py @@ -0,0 +1,173 @@ +"""Various Windows specific bits and pieces.""" + +import sys + +if sys.platform != 'win32': # pragma: no cover + raise ImportError('win32 only') + +import _winapi +import itertools +import msvcrt +import os +import subprocess +import tempfile +import warnings + + +__all__ = 'pipe', 'Popen', 'PIPE', 'PipeHandle' + + +# Constants/globals + + +BUFSIZE = 8192 +PIPE = subprocess.PIPE +STDOUT = subprocess.STDOUT +_mmap_counter = itertools.count() + + +# Replacement for os.pipe() using handles instead of fds + + +def pipe(*, duplex=False, overlapped=(True, True), bufsize=BUFSIZE): + """Like os.pipe() but with overlapped support and using handles not fds.""" + address = tempfile.mktemp( + prefix=r'\\.\pipe\python-pipe-{:d}-{:d}-'.format( + os.getpid(), next(_mmap_counter))) + + if duplex: + openmode = _winapi.PIPE_ACCESS_DUPLEX + access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE + obsize, ibsize = bufsize, bufsize + else: + openmode = _winapi.PIPE_ACCESS_INBOUND + access = _winapi.GENERIC_WRITE + obsize, ibsize = 0, bufsize + + openmode |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE + + if overlapped[0]: + openmode |= _winapi.FILE_FLAG_OVERLAPPED + + if overlapped[1]: + flags_and_attribs = _winapi.FILE_FLAG_OVERLAPPED + else: + flags_and_attribs = 0 + + h1 = h2 = None + try: + h1 = _winapi.CreateNamedPipe( + address, openmode, _winapi.PIPE_WAIT, + 1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL) + + h2 = _winapi.CreateFile( + address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING, + flags_and_attribs, _winapi.NULL) + + ov = _winapi.ConnectNamedPipe(h1, overlapped=True) + ov.GetOverlappedResult(True) + return h1, h2 + except: + if h1 is not None: + _winapi.CloseHandle(h1) + if h2 is not None: + _winapi.CloseHandle(h2) + raise + + +# Wrapper for a pipe handle + + +class PipeHandle: + """Wrapper for an overlapped pipe handle which is vaguely file-object like. + + The IOCP event loop can use these instead of socket objects. + """ + def __init__(self, handle): + self._handle = handle + + def __repr__(self): + if self._handle is not None: + handle = f'handle={self._handle!r}' + else: + handle = 'closed' + return f'<{self.__class__.__name__} {handle}>' + + @property + def handle(self): + return self._handle + + def fileno(self): + if self._handle is None: + raise ValueError("I/O operation on closed pipe") + return self._handle + + def close(self, *, CloseHandle=_winapi.CloseHandle): + if self._handle is not None: + CloseHandle(self._handle) + self._handle = None + + def __del__(self, _warn=warnings.warn): + if self._handle is not None: + _warn(f"unclosed {self!r}", ResourceWarning, source=self) + self.close() + + def __enter__(self): + return self + + def __exit__(self, t, v, tb): + self.close() + + +# Replacement for subprocess.Popen using overlapped pipe handles + + +class Popen(subprocess.Popen): + """Replacement for subprocess.Popen using overlapped pipe handles. + + The stdin, stdout, stderr are None or instances of PipeHandle. + """ + def __init__(self, args, stdin=None, stdout=None, stderr=None, **kwds): + assert not kwds.get('universal_newlines') + assert kwds.get('bufsize', 0) == 0 + stdin_rfd = stdout_wfd = stderr_wfd = None + stdin_wh = stdout_rh = stderr_rh = None + if stdin == PIPE: + stdin_rh, stdin_wh = pipe(overlapped=(False, True), duplex=True) + stdin_rfd = msvcrt.open_osfhandle(stdin_rh, os.O_RDONLY) + else: + stdin_rfd = stdin + if stdout == PIPE: + stdout_rh, stdout_wh = pipe(overlapped=(True, False)) + stdout_wfd = msvcrt.open_osfhandle(stdout_wh, 0) + else: + stdout_wfd = stdout + if stderr == PIPE: + stderr_rh, stderr_wh = pipe(overlapped=(True, False)) + stderr_wfd = msvcrt.open_osfhandle(stderr_wh, 0) + elif stderr == STDOUT: + stderr_wfd = stdout_wfd + else: + stderr_wfd = stderr + try: + super().__init__(args, stdin=stdin_rfd, stdout=stdout_wfd, + stderr=stderr_wfd, **kwds) + except: + for h in (stdin_wh, stdout_rh, stderr_rh): + if h is not None: + _winapi.CloseHandle(h) + raise + else: + if stdin_wh is not None: + self.stdin = PipeHandle(stdin_wh) + if stdout_rh is not None: + self.stdout = PipeHandle(stdout_rh) + if stderr_rh is not None: + self.stderr = PipeHandle(stderr_rh) + finally: + if stdin == PIPE: + os.close(stdin_rfd) + if stdout == PIPE: + os.close(stdout_wfd) + if stderr == PIPE: + os.close(stderr_wfd) diff --git a/python310/asyncore.py b/python310/asyncore.py new file mode 100644 index 0000000000000000000000000000000000000000..a360d404395e5b1623b1b1393839b1174afd1865 --- /dev/null +++ b/python310/asyncore.py @@ -0,0 +1,649 @@ +# -*- Mode: Python -*- +# Id: asyncore.py,v 2.51 2000/09/07 22:29:26 rushing Exp +# Author: Sam Rushing + +# ====================================================================== +# Copyright 1996 by Sam Rushing +# +# All Rights Reserved +# +# Permission to use, copy, modify, and distribute this software and +# its documentation for any purpose and without fee is hereby +# granted, provided that the above copyright notice appear in all +# copies and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of Sam +# Rushing not be used in advertising or publicity pertaining to +# distribution of the software without specific, written prior +# permission. +# +# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, +# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN +# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR +# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS +# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, +# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN +# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# ====================================================================== + +"""Basic infrastructure for asynchronous socket service clients and servers. + +There are only two ways to have a program on a single processor do "more +than one thing at a time". Multi-threaded programming is the simplest and +most popular way to do it, but there is another very different technique, +that lets you have nearly all the advantages of multi-threading, without +actually using multiple threads. it's really only practical if your program +is largely I/O bound. If your program is CPU bound, then pre-emptive +scheduled threads are probably what you really need. Network servers are +rarely CPU-bound, however. + +If your operating system supports the select() system call in its I/O +library (and nearly all do), then you can use it to juggle multiple +communication channels at once; doing other work while your I/O is taking +place in the "background." Although this strategy can seem strange and +complex, especially at first, it is in many ways easier to understand and +control than multi-threaded programming. The module documented here solves +many of the difficult problems for you, making the task of building +sophisticated high-performance network servers and clients a snap. +""" + +import select +import socket +import sys +import time +import warnings + +import os +from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, ECONNRESET, EINVAL, \ + ENOTCONN, ESHUTDOWN, EISCONN, EBADF, ECONNABORTED, EPIPE, EAGAIN, \ + errorcode + +warnings.warn( + 'The asyncore module is deprecated and will be removed in Python 3.12. ' + 'The recommended replacement is asyncio', + DeprecationWarning, + stacklevel=2) + + +_DISCONNECTED = frozenset({ECONNRESET, ENOTCONN, ESHUTDOWN, ECONNABORTED, EPIPE, + EBADF}) + +try: + socket_map +except NameError: + socket_map = {} + +def _strerror(err): + try: + return os.strerror(err) + except (ValueError, OverflowError, NameError): + if err in errorcode: + return errorcode[err] + return "Unknown error %s" %err + +class ExitNow(Exception): + pass + +_reraised_exceptions = (ExitNow, KeyboardInterrupt, SystemExit) + +def read(obj): + try: + obj.handle_read_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def write(obj): + try: + obj.handle_write_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def _exception(obj): + try: + obj.handle_expt_event() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def readwrite(obj, flags): + try: + if flags & select.POLLIN: + obj.handle_read_event() + if flags & select.POLLOUT: + obj.handle_write_event() + if flags & select.POLLPRI: + obj.handle_expt_event() + if flags & (select.POLLHUP | select.POLLERR | select.POLLNVAL): + obj.handle_close() + except OSError as e: + if e.errno not in _DISCONNECTED: + obj.handle_error() + else: + obj.handle_close() + except _reraised_exceptions: + raise + except: + obj.handle_error() + +def poll(timeout=0.0, map=None): + if map is None: + map = socket_map + if map: + r = []; w = []; e = [] + for fd, obj in list(map.items()): + is_r = obj.readable() + is_w = obj.writable() + if is_r: + r.append(fd) + # accepting sockets should not be writable + if is_w and not obj.accepting: + w.append(fd) + if is_r or is_w: + e.append(fd) + if [] == r == w == e: + time.sleep(timeout) + return + + r, w, e = select.select(r, w, e, timeout) + + for fd in r: + obj = map.get(fd) + if obj is None: + continue + read(obj) + + for fd in w: + obj = map.get(fd) + if obj is None: + continue + write(obj) + + for fd in e: + obj = map.get(fd) + if obj is None: + continue + _exception(obj) + +def poll2(timeout=0.0, map=None): + # Use the poll() support added to the select module in Python 2.0 + if map is None: + map = socket_map + if timeout is not None: + # timeout is in milliseconds + timeout = int(timeout*1000) + pollster = select.poll() + if map: + for fd, obj in list(map.items()): + flags = 0 + if obj.readable(): + flags |= select.POLLIN | select.POLLPRI + # accepting sockets should not be writable + if obj.writable() and not obj.accepting: + flags |= select.POLLOUT + if flags: + pollster.register(fd, flags) + + r = pollster.poll(timeout) + for fd, flags in r: + obj = map.get(fd) + if obj is None: + continue + readwrite(obj, flags) + +poll3 = poll2 # Alias for backward compatibility + +def loop(timeout=30.0, use_poll=False, map=None, count=None): + if map is None: + map = socket_map + + if use_poll and hasattr(select, 'poll'): + poll_fun = poll2 + else: + poll_fun = poll + + if count is None: + while map: + poll_fun(timeout, map) + + else: + while map and count > 0: + poll_fun(timeout, map) + count = count - 1 + +class dispatcher: + + debug = False + connected = False + accepting = False + connecting = False + closing = False + addr = None + ignore_log_types = frozenset({'warning'}) + + def __init__(self, sock=None, map=None): + if map is None: + self._map = socket_map + else: + self._map = map + + self._fileno = None + + if sock: + # Set to nonblocking just to make sure for cases where we + # get a socket from a blocking source. + sock.setblocking(False) + self.set_socket(sock, map) + self.connected = True + # The constructor no longer requires that the socket + # passed be connected. + try: + self.addr = sock.getpeername() + except OSError as err: + if err.errno in (ENOTCONN, EINVAL): + # To handle the case where we got an unconnected + # socket. + self.connected = False + else: + # The socket is broken in some unknown way, alert + # the user and remove it from the map (to prevent + # polling of broken sockets). + self.del_channel(map) + raise + else: + self.socket = None + + def __repr__(self): + status = [self.__class__.__module__+"."+self.__class__.__qualname__] + if self.accepting and self.addr: + status.append('listening') + elif self.connected: + status.append('connected') + if self.addr is not None: + try: + status.append('%s:%d' % self.addr) + except TypeError: + status.append(repr(self.addr)) + return '<%s at %#x>' % (' '.join(status), id(self)) + + def add_channel(self, map=None): + #self.log_info('adding channel %s' % self) + if map is None: + map = self._map + map[self._fileno] = self + + def del_channel(self, map=None): + fd = self._fileno + if map is None: + map = self._map + if fd in map: + #self.log_info('closing channel %d:%s' % (fd, self)) + del map[fd] + self._fileno = None + + def create_socket(self, family=socket.AF_INET, type=socket.SOCK_STREAM): + self.family_and_type = family, type + sock = socket.socket(family, type) + sock.setblocking(False) + self.set_socket(sock) + + def set_socket(self, sock, map=None): + self.socket = sock + self._fileno = sock.fileno() + self.add_channel(map) + + def set_reuse_addr(self): + # try to re-use a server port if possible + try: + self.socket.setsockopt( + socket.SOL_SOCKET, socket.SO_REUSEADDR, + self.socket.getsockopt(socket.SOL_SOCKET, + socket.SO_REUSEADDR) | 1 + ) + except OSError: + pass + + # ================================================== + # predicates for select() + # these are used as filters for the lists of sockets + # to pass to select(). + # ================================================== + + def readable(self): + return True + + def writable(self): + return True + + # ================================================== + # socket object methods. + # ================================================== + + def listen(self, num): + self.accepting = True + if os.name == 'nt' and num > 5: + num = 5 + return self.socket.listen(num) + + def bind(self, addr): + self.addr = addr + return self.socket.bind(addr) + + def connect(self, address): + self.connected = False + self.connecting = True + err = self.socket.connect_ex(address) + if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \ + or err == EINVAL and os.name == 'nt': + self.addr = address + return + if err in (0, EISCONN): + self.addr = address + self.handle_connect_event() + else: + raise OSError(err, errorcode[err]) + + def accept(self): + # XXX can return either an address pair or None + try: + conn, addr = self.socket.accept() + except TypeError: + return None + except OSError as why: + if why.errno in (EWOULDBLOCK, ECONNABORTED, EAGAIN): + return None + else: + raise + else: + return conn, addr + + def send(self, data): + try: + result = self.socket.send(data) + return result + except OSError as why: + if why.errno == EWOULDBLOCK: + return 0 + elif why.errno in _DISCONNECTED: + self.handle_close() + return 0 + else: + raise + + def recv(self, buffer_size): + try: + data = self.socket.recv(buffer_size) + if not data: + # a closed connection is indicated by signaling + # a read condition, and having recv() return 0. + self.handle_close() + return b'' + else: + return data + except OSError as why: + # winsock sometimes raises ENOTCONN + if why.errno in _DISCONNECTED: + self.handle_close() + return b'' + else: + raise + + def close(self): + self.connected = False + self.accepting = False + self.connecting = False + self.del_channel() + if self.socket is not None: + try: + self.socket.close() + except OSError as why: + if why.errno not in (ENOTCONN, EBADF): + raise + + # log and log_info may be overridden to provide more sophisticated + # logging and warning methods. In general, log is for 'hit' logging + # and 'log_info' is for informational, warning and error logging. + + def log(self, message): + sys.stderr.write('log: %s\n' % str(message)) + + def log_info(self, message, type='info'): + if type not in self.ignore_log_types: + print('%s: %s' % (type, message)) + + def handle_read_event(self): + if self.accepting: + # accepting sockets are never connected, they "spawn" new + # sockets that are connected + self.handle_accept() + elif not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_read() + else: + self.handle_read() + + def handle_connect_event(self): + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + raise OSError(err, _strerror(err)) + self.handle_connect() + self.connected = True + self.connecting = False + + def handle_write_event(self): + if self.accepting: + # Accepting sockets shouldn't get a write event. + # We will pretend it didn't happen. + return + + if not self.connected: + if self.connecting: + self.handle_connect_event() + self.handle_write() + + def handle_expt_event(self): + # handle_expt_event() is called if there might be an error on the + # socket, or if there is OOB data + # check for the error condition first + err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + if err != 0: + # we can get here when select.select() says that there is an + # exceptional condition on the socket + # since there is an error, we'll go ahead and close the socket + # like we would in a subclassed handle_read() that received no + # data + self.handle_close() + else: + self.handle_expt() + + def handle_error(self): + nil, t, v, tbinfo = compact_traceback() + + # sometimes a user repr method will crash. + try: + self_repr = repr(self) + except: + self_repr = '<__repr__(self) failed for object at %0x>' % id(self) + + self.log_info( + 'uncaptured python exception, closing channel %s (%s:%s %s)' % ( + self_repr, + t, + v, + tbinfo + ), + 'error' + ) + self.handle_close() + + def handle_expt(self): + self.log_info('unhandled incoming priority event', 'warning') + + def handle_read(self): + self.log_info('unhandled read event', 'warning') + + def handle_write(self): + self.log_info('unhandled write event', 'warning') + + def handle_connect(self): + self.log_info('unhandled connect event', 'warning') + + def handle_accept(self): + pair = self.accept() + if pair is not None: + self.handle_accepted(*pair) + + def handle_accepted(self, sock, addr): + sock.close() + self.log_info('unhandled accepted event', 'warning') + + def handle_close(self): + self.log_info('unhandled close event', 'warning') + self.close() + +# --------------------------------------------------------------------------- +# adds simple buffered output capability, useful for simple clients. +# [for more sophisticated usage use asynchat.async_chat] +# --------------------------------------------------------------------------- + +class dispatcher_with_send(dispatcher): + + def __init__(self, sock=None, map=None): + dispatcher.__init__(self, sock, map) + self.out_buffer = b'' + + def initiate_send(self): + num_sent = 0 + num_sent = dispatcher.send(self, self.out_buffer[:65536]) + self.out_buffer = self.out_buffer[num_sent:] + + def handle_write(self): + self.initiate_send() + + def writable(self): + return (not self.connected) or len(self.out_buffer) + + def send(self, data): + if self.debug: + self.log_info('sending %s' % repr(data)) + self.out_buffer = self.out_buffer + data + self.initiate_send() + +# --------------------------------------------------------------------------- +# used for debugging. +# --------------------------------------------------------------------------- + +def compact_traceback(): + t, v, tb = sys.exc_info() + tbinfo = [] + if not tb: # Must have a traceback + raise AssertionError("traceback does not exist") + while tb: + tbinfo.append(( + tb.tb_frame.f_code.co_filename, + tb.tb_frame.f_code.co_name, + str(tb.tb_lineno) + )) + tb = tb.tb_next + + # just to be safe + del tb + + file, function, line = tbinfo[-1] + info = ' '.join(['[%s|%s|%s]' % x for x in tbinfo]) + return (file, function, line), t, v, info + +def close_all(map=None, ignore_all=False): + if map is None: + map = socket_map + for x in list(map.values()): + try: + x.close() + except OSError as x: + if x.errno == EBADF: + pass + elif not ignore_all: + raise + except _reraised_exceptions: + raise + except: + if not ignore_all: + raise + map.clear() + +# Asynchronous File I/O: +# +# After a little research (reading man pages on various unixen, and +# digging through the linux kernel), I've determined that select() +# isn't meant for doing asynchronous file i/o. +# Heartening, though - reading linux/mm/filemap.c shows that linux +# supports asynchronous read-ahead. So _MOST_ of the time, the data +# will be sitting in memory for us already when we go to read it. +# +# What other OS's (besides NT) support async file i/o? [VMS?] +# +# Regardless, this is useful for pipes, and stdin/stdout... + +if os.name == 'posix': + class file_wrapper: + # Here we override just enough to make a file + # look like a socket for the purposes of asyncore. + # The passed fd is automatically os.dup()'d + + def __init__(self, fd): + self.fd = os.dup(fd) + + def __del__(self): + if self.fd >= 0: + warnings.warn("unclosed file %r" % self, ResourceWarning, + source=self) + self.close() + + def recv(self, *args): + return os.read(self.fd, *args) + + def send(self, *args): + return os.write(self.fd, *args) + + def getsockopt(self, level, optname, buflen=None): + if (level == socket.SOL_SOCKET and + optname == socket.SO_ERROR and + not buflen): + return 0 + raise NotImplementedError("Only asyncore specific behaviour " + "implemented.") + + read = recv + write = send + + def close(self): + if self.fd < 0: + return + fd = self.fd + self.fd = -1 + os.close(fd) + + def fileno(self): + return self.fd + + class file_dispatcher(dispatcher): + + def __init__(self, fd, map=None): + dispatcher.__init__(self, None, map) + self.connected = True + try: + fd = fd.fileno() + except AttributeError: + pass + self.set_file(fd) + # set it to non-blocking mode + os.set_blocking(fd, False) + + def set_file(self, fd): + self.socket = file_wrapper(fd) + self._fileno = self.socket.fileno() + self.add_channel() diff --git a/python310/base64.py b/python310/base64.py new file mode 100644 index 0000000000000000000000000000000000000000..1cfca7f5c9ae9351803659e05d6206e614990c89 --- /dev/null +++ b/python310/base64.py @@ -0,0 +1,603 @@ +#! /usr/bin/python3.10 + +"""Base16, Base32, Base64 (RFC 3548), Base85 and Ascii85 data encodings""" + +# Modified 04-Oct-1995 by Jack Jansen to use binascii module +# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support +# Modified 22-May-2007 by Guido van Rossum to use bytes everywhere + +import re +import struct +import binascii + + +__all__ = [ + # Legacy interface exports traditional RFC 2045 Base64 encodings + 'encode', 'decode', 'encodebytes', 'decodebytes', + # Generalized interface for other encodings + 'b64encode', 'b64decode', 'b32encode', 'b32decode', + 'b32hexencode', 'b32hexdecode', 'b16encode', 'b16decode', + # Base85 and Ascii85 encodings + 'b85encode', 'b85decode', 'a85encode', 'a85decode', + # Standard Base64 encoding + 'standard_b64encode', 'standard_b64decode', + # Some common Base64 alternatives. As referenced by RFC 3458, see thread + # starting at: + # + # http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html + 'urlsafe_b64encode', 'urlsafe_b64decode', + ] + + +bytes_types = (bytes, bytearray) # Types acceptable as binary data + +def _bytes_from_decode_data(s): + if isinstance(s, str): + try: + return s.encode('ascii') + except UnicodeEncodeError: + raise ValueError('string argument should contain only ASCII characters') + if isinstance(s, bytes_types): + return s + try: + return memoryview(s).tobytes() + except TypeError: + raise TypeError("argument should be a bytes-like object or ASCII " + "string, not %r" % s.__class__.__name__) from None + + +# Base64 encoding/decoding uses binascii + +def b64encode(s, altchars=None): + """Encode the bytes-like object s using Base64 and return a bytes object. + + Optional altchars should be a byte string of length 2 which specifies an + alternative alphabet for the '+' and '/' characters. This allows an + application to e.g. generate url or filesystem safe Base64 strings. + """ + encoded = binascii.b2a_base64(s, newline=False) + if altchars is not None: + assert len(altchars) == 2, repr(altchars) + return encoded.translate(bytes.maketrans(b'+/', altchars)) + return encoded + + +def b64decode(s, altchars=None, validate=False): + """Decode the Base64 encoded bytes-like object or ASCII string s. + + Optional altchars must be a bytes-like object or ASCII string of length 2 + which specifies the alternative alphabet used instead of the '+' and '/' + characters. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded. + + If validate is False (the default), characters that are neither in the + normal base-64 alphabet nor the alternative alphabet are discarded prior + to the padding check. If validate is True, these non-alphabet characters + in the input result in a binascii.Error. + """ + s = _bytes_from_decode_data(s) + if altchars is not None: + altchars = _bytes_from_decode_data(altchars) + assert len(altchars) == 2, repr(altchars) + s = s.translate(bytes.maketrans(altchars, b'+/')) + if validate and not re.fullmatch(b'[A-Za-z0-9+/]*={0,2}', s): + raise binascii.Error('Non-base64 digit found') + return binascii.a2b_base64(s) + + +def standard_b64encode(s): + """Encode bytes-like object s using the standard Base64 alphabet. + + The result is returned as a bytes object. + """ + return b64encode(s) + +def standard_b64decode(s): + """Decode bytes encoded with the standard Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the standard alphabet + are discarded prior to the padding check. + """ + return b64decode(s) + + +_urlsafe_encode_translation = bytes.maketrans(b'+/', b'-_') +_urlsafe_decode_translation = bytes.maketrans(b'-_', b'+/') + +def urlsafe_b64encode(s): + """Encode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object to encode. The result is returned as a + bytes object. The alphabet uses '-' instead of '+' and '_' instead of + '/'. + """ + return b64encode(s).translate(_urlsafe_encode_translation) + +def urlsafe_b64decode(s): + """Decode bytes using the URL- and filesystem-safe Base64 alphabet. + + Argument s is a bytes-like object or ASCII string to decode. The result + is returned as a bytes object. A binascii.Error is raised if the input + is incorrectly padded. Characters that are not in the URL-safe base-64 + alphabet, and are not a plus '+' or slash '/', are discarded prior to the + padding check. + + The alphabet uses '-' instead of '+' and '_' instead of '/'. + """ + s = _bytes_from_decode_data(s) + s = s.translate(_urlsafe_decode_translation) + return b64decode(s) + + + +# Base32 encoding/decoding must be done in Python +_B32_ENCODE_DOCSTRING = ''' +Encode the bytes-like objects using {encoding} and return a bytes object. +''' +_B32_DECODE_DOCSTRING = ''' +Decode the {encoding} encoded bytes-like object or ASCII string s. + +Optional casefold is a flag specifying whether a lowercase alphabet is +acceptable as input. For security purposes, the default is False. +{extra_args} +The result is returned as a bytes object. A binascii.Error is raised if +the input is incorrectly padded or if there are non-alphabet +characters present in the input. +''' +_B32_DECODE_MAP01_DOCSTRING = ''' +RFC 3548 allows for optional mapping of the digit 0 (zero) to the +letter O (oh), and for optional mapping of the digit 1 (one) to +either the letter I (eye) or letter L (el). The optional argument +map01 when not None, specifies which letter the digit 1 should be +mapped to (when map01 is not None, the digit 0 is always mapped to +the letter O). For security purposes the default is None, so that +0 and 1 are not allowed in the input. +''' +_b32alphabet = b'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567' +_b32hexalphabet = b'0123456789ABCDEFGHIJKLMNOPQRSTUV' +_b32tab2 = {} +_b32rev = {} + +def _b32encode(alphabet, s): + global _b32tab2 + # Delay the initialization of the table to not waste memory + # if the function is never called + if alphabet not in _b32tab2: + b32tab = [bytes((i,)) for i in alphabet] + _b32tab2[alphabet] = [a + b for a in b32tab for b in b32tab] + b32tab = None + + if not isinstance(s, bytes_types): + s = memoryview(s).tobytes() + leftover = len(s) % 5 + # Pad the last quantum with zero bits if necessary + if leftover: + s = s + b'\0' * (5 - leftover) # Don't use += ! + encoded = bytearray() + from_bytes = int.from_bytes + b32tab2 = _b32tab2[alphabet] + for i in range(0, len(s), 5): + c = from_bytes(s[i: i + 5], 'big') + encoded += (b32tab2[c >> 30] + # bits 1 - 10 + b32tab2[(c >> 20) & 0x3ff] + # bits 11 - 20 + b32tab2[(c >> 10) & 0x3ff] + # bits 21 - 30 + b32tab2[c & 0x3ff] # bits 31 - 40 + ) + # Adjust for any leftover partial quanta + if leftover == 1: + encoded[-6:] = b'======' + elif leftover == 2: + encoded[-4:] = b'====' + elif leftover == 3: + encoded[-3:] = b'===' + elif leftover == 4: + encoded[-1:] = b'=' + return bytes(encoded) + +def _b32decode(alphabet, s, casefold=False, map01=None): + global _b32rev + # Delay the initialization of the table to not waste memory + # if the function is never called + if alphabet not in _b32rev: + _b32rev[alphabet] = {v: k for k, v in enumerate(alphabet)} + s = _bytes_from_decode_data(s) + if len(s) % 8: + raise binascii.Error('Incorrect padding') + # Handle section 2.4 zero and one mapping. The flag map01 will be either + # False, or the character to map the digit 1 (one) to. It should be + # either L (el) or I (eye). + if map01 is not None: + map01 = _bytes_from_decode_data(map01) + assert len(map01) == 1, repr(map01) + s = s.translate(bytes.maketrans(b'01', b'O' + map01)) + if casefold: + s = s.upper() + # Strip off pad characters from the right. We need to count the pad + # characters because this will tell us how many null bytes to remove from + # the end of the decoded string. + l = len(s) + s = s.rstrip(b'=') + padchars = l - len(s) + # Now decode the full quanta + decoded = bytearray() + b32rev = _b32rev[alphabet] + for i in range(0, len(s), 8): + quanta = s[i: i + 8] + acc = 0 + try: + for c in quanta: + acc = (acc << 5) + b32rev[c] + except KeyError: + raise binascii.Error('Non-base32 digit found') from None + decoded += acc.to_bytes(5, 'big') + # Process the last, partial quanta + if l % 8 or padchars not in {0, 1, 3, 4, 6}: + raise binascii.Error('Incorrect padding') + if padchars and decoded: + acc <<= 5 * padchars + last = acc.to_bytes(5, 'big') + leftover = (43 - 5 * padchars) // 8 # 1: 4, 3: 3, 4: 2, 6: 1 + decoded[-5:] = last[:leftover] + return bytes(decoded) + + +def b32encode(s): + return _b32encode(_b32alphabet, s) +b32encode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32') + +def b32decode(s, casefold=False, map01=None): + return _b32decode(_b32alphabet, s, casefold, map01) +b32decode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32', + extra_args=_B32_DECODE_MAP01_DOCSTRING) + +def b32hexencode(s): + return _b32encode(_b32hexalphabet, s) +b32hexencode.__doc__ = _B32_ENCODE_DOCSTRING.format(encoding='base32hex') + +def b32hexdecode(s, casefold=False): + # base32hex does not have the 01 mapping + return _b32decode(_b32hexalphabet, s, casefold) +b32hexdecode.__doc__ = _B32_DECODE_DOCSTRING.format(encoding='base32hex', + extra_args='') + + +# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns +# lowercase. The RFC also recommends against accepting input case +# insensitively. +def b16encode(s): + """Encode the bytes-like object s using Base16 and return a bytes object. + """ + return binascii.hexlify(s).upper() + + +def b16decode(s, casefold=False): + """Decode the Base16 encoded bytes-like object or ASCII string s. + + Optional casefold is a flag specifying whether a lowercase alphabet is + acceptable as input. For security purposes, the default is False. + + The result is returned as a bytes object. A binascii.Error is raised if + s is incorrectly padded or if there are non-alphabet characters present + in the input. + """ + s = _bytes_from_decode_data(s) + if casefold: + s = s.upper() + if re.search(b'[^0-9A-F]', s): + raise binascii.Error('Non-base16 digit found') + return binascii.unhexlify(s) + +# +# Ascii85 encoding/decoding +# + +_a85chars = None +_a85chars2 = None +_A85START = b"<~" +_A85END = b"~>" + +def _85encode(b, chars, chars2, pad=False, foldnuls=False, foldspaces=False): + # Helper function for a85encode and b85encode + if not isinstance(b, bytes_types): + b = memoryview(b).tobytes() + + padding = (-len(b)) % 4 + if padding: + b = b + b'\0' * padding + words = struct.Struct('!%dI' % (len(b) // 4)).unpack(b) + + chunks = [b'z' if foldnuls and not word else + b'y' if foldspaces and word == 0x20202020 else + (chars2[word // 614125] + + chars2[word // 85 % 7225] + + chars[word % 85]) + for word in words] + + if padding and not pad: + if chunks[-1] == b'z': + chunks[-1] = chars[0] * 5 + chunks[-1] = chunks[-1][:-padding] + + return b''.join(chunks) + +def a85encode(b, *, foldspaces=False, wrapcol=0, pad=False, adobe=False): + """Encode bytes-like object b using Ascii85 and return a bytes object. + + foldspaces is an optional flag that uses the special short sequence 'y' + instead of 4 consecutive spaces (ASCII 0x20) as supported by 'btoa'. This + feature is not supported by the "standard" Adobe encoding. + + wrapcol controls whether the output should have newline (b'\\n') characters + added to it. If this is non-zero, each output line will be at most this + many characters long. + + pad controls whether the input is padded to a multiple of 4 before + encoding. Note that the btoa implementation always pads. + + adobe controls whether the encoded byte sequence is framed with <~ and ~>, + which is used by the Adobe implementation. + """ + global _a85chars, _a85chars2 + # Delay the initialization of tables to not waste memory + # if the function is never called + if _a85chars2 is None: + _a85chars = [bytes((i,)) for i in range(33, 118)] + _a85chars2 = [(a + b) for a in _a85chars for b in _a85chars] + + result = _85encode(b, _a85chars, _a85chars2, pad, True, foldspaces) + + if adobe: + result = _A85START + result + if wrapcol: + wrapcol = max(2 if adobe else 1, wrapcol) + chunks = [result[i: i + wrapcol] + for i in range(0, len(result), wrapcol)] + if adobe: + if len(chunks[-1]) + 2 > wrapcol: + chunks.append(b'') + result = b'\n'.join(chunks) + if adobe: + result += _A85END + + return result + +def a85decode(b, *, foldspaces=False, adobe=False, ignorechars=b' \t\n\r\v'): + """Decode the Ascii85 encoded bytes-like object or ASCII string b. + + foldspaces is a flag that specifies whether the 'y' short sequence should be + accepted as shorthand for 4 consecutive spaces (ASCII 0x20). This feature is + not supported by the "standard" Adobe encoding. + + adobe controls whether the input sequence is in Adobe Ascii85 format (i.e. + is framed with <~ and ~>). + + ignorechars should be a byte string containing characters to ignore from the + input. This should only contain whitespace characters, and by default + contains all whitespace characters in ASCII. + + The result is returned as a bytes object. + """ + b = _bytes_from_decode_data(b) + if adobe: + if not b.endswith(_A85END): + raise ValueError( + "Ascii85 encoded byte sequences must end " + "with {!r}".format(_A85END) + ) + if b.startswith(_A85START): + b = b[2:-2] # Strip off start/end markers + else: + b = b[:-2] + # + # We have to go through this stepwise, so as to ignore spaces and handle + # special short sequences + # + packI = struct.Struct('!I').pack + decoded = [] + decoded_append = decoded.append + curr = [] + curr_append = curr.append + curr_clear = curr.clear + for x in b + b'u' * 4: + if b'!'[0] <= x <= b'u'[0]: + curr_append(x) + if len(curr) == 5: + acc = 0 + for x in curr: + acc = 85 * acc + (x - 33) + try: + decoded_append(packI(acc)) + except struct.error: + raise ValueError('Ascii85 overflow') from None + curr_clear() + elif x == b'z'[0]: + if curr: + raise ValueError('z inside Ascii85 5-tuple') + decoded_append(b'\0\0\0\0') + elif foldspaces and x == b'y'[0]: + if curr: + raise ValueError('y inside Ascii85 5-tuple') + decoded_append(b'\x20\x20\x20\x20') + elif x in ignorechars: + # Skip whitespace + continue + else: + raise ValueError('Non-Ascii85 digit found: %c' % x) + + result = b''.join(decoded) + padding = 4 - len(curr) + if padding: + # Throw away the extra padding + result = result[:-padding] + return result + +# The following code is originally taken (with permission) from Mercurial + +_b85alphabet = (b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" + b"abcdefghijklmnopqrstuvwxyz!#$%&()*+-;<=>?@^_`{|}~") +_b85chars = None +_b85chars2 = None +_b85dec = None + +def b85encode(b, pad=False): + """Encode bytes-like object b in base85 format and return a bytes object. + + If pad is true, the input is padded with b'\\0' so its length is a multiple of + 4 bytes before encoding. + """ + global _b85chars, _b85chars2 + # Delay the initialization of tables to not waste memory + # if the function is never called + if _b85chars2 is None: + _b85chars = [bytes((i,)) for i in _b85alphabet] + _b85chars2 = [(a + b) for a in _b85chars for b in _b85chars] + return _85encode(b, _b85chars, _b85chars2, pad) + +def b85decode(b): + """Decode the base85-encoded bytes-like object or ASCII string b + + The result is returned as a bytes object. + """ + global _b85dec + # Delay the initialization of tables to not waste memory + # if the function is never called + if _b85dec is None: + _b85dec = [None] * 256 + for i, c in enumerate(_b85alphabet): + _b85dec[c] = i + + b = _bytes_from_decode_data(b) + padding = (-len(b)) % 5 + b = b + b'~' * padding + out = [] + packI = struct.Struct('!I').pack + for i in range(0, len(b), 5): + chunk = b[i:i + 5] + acc = 0 + try: + for c in chunk: + acc = acc * 85 + _b85dec[c] + except TypeError: + for j, c in enumerate(chunk): + if _b85dec[c] is None: + raise ValueError('bad base85 character at position %d' + % (i + j)) from None + raise + try: + out.append(packI(acc)) + except struct.error: + raise ValueError('base85 overflow in hunk starting at byte %d' + % i) from None + + result = b''.join(out) + if padding: + result = result[:-padding] + return result + +# Legacy interface. This code could be cleaned up since I don't believe +# binascii has any line length limitations. It just doesn't seem worth it +# though. The files should be opened in binary mode. + +MAXLINESIZE = 76 # Excluding the CRLF +MAXBINSIZE = (MAXLINESIZE//4)*3 + +def encode(input, output): + """Encode a file; input and output are binary files.""" + while True: + s = input.read(MAXBINSIZE) + if not s: + break + while len(s) < MAXBINSIZE: + ns = input.read(MAXBINSIZE-len(s)) + if not ns: + break + s += ns + line = binascii.b2a_base64(s) + output.write(line) + + +def decode(input, output): + """Decode a file; input and output are binary files.""" + while True: + line = input.readline() + if not line: + break + s = binascii.a2b_base64(line) + output.write(s) + +def _input_type_check(s): + try: + m = memoryview(s) + except TypeError as err: + msg = "expected bytes-like object, not %s" % s.__class__.__name__ + raise TypeError(msg) from err + if m.format not in ('c', 'b', 'B'): + msg = ("expected single byte elements, not %r from %s" % + (m.format, s.__class__.__name__)) + raise TypeError(msg) + if m.ndim != 1: + msg = ("expected 1-D data, not %d-D data from %s" % + (m.ndim, s.__class__.__name__)) + raise TypeError(msg) + + +def encodebytes(s): + """Encode a bytestring into a bytes object containing multiple lines + of base-64 data.""" + _input_type_check(s) + pieces = [] + for i in range(0, len(s), MAXBINSIZE): + chunk = s[i : i + MAXBINSIZE] + pieces.append(binascii.b2a_base64(chunk)) + return b"".join(pieces) + + +def decodebytes(s): + """Decode a bytestring of base-64 data into a bytes object.""" + _input_type_check(s) + return binascii.a2b_base64(s) + + +# Usable as a script... +def main(): + """Small main program""" + import sys, getopt + try: + opts, args = getopt.getopt(sys.argv[1:], 'deut') + except getopt.error as msg: + sys.stdout = sys.stderr + print(msg) + print("""usage: %s [-d|-e|-u|-t] [file|-] + -d, -u: decode + -e: encode (default) + -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]) + sys.exit(2) + func = encode + for o, a in opts: + if o == '-e': func = encode + if o == '-d': func = decode + if o == '-u': func = decode + if o == '-t': test(); return + if args and args[0] != '-': + with open(args[0], 'rb') as f: + func(f, sys.stdout.buffer) + else: + func(sys.stdin.buffer, sys.stdout.buffer) + + +def test(): + s0 = b"Aladdin:open sesame" + print(repr(s0)) + s1 = encodebytes(s0) + print(repr(s1)) + s2 = decodebytes(s1) + print(repr(s2)) + assert s0 == s2 + + +if __name__ == '__main__': + main() diff --git a/python310/bdb.py b/python310/bdb.py new file mode 100644 index 0000000000000000000000000000000000000000..7f9b09514ffd00ea9cda0d56a014a79ea16ada21 --- /dev/null +++ b/python310/bdb.py @@ -0,0 +1,891 @@ +"""Debugger basics""" + +import fnmatch +import sys +import os +from inspect import CO_GENERATOR, CO_COROUTINE, CO_ASYNC_GENERATOR + +__all__ = ["BdbQuit", "Bdb", "Breakpoint"] + +GENERATOR_AND_COROUTINE_FLAGS = CO_GENERATOR | CO_COROUTINE | CO_ASYNC_GENERATOR + + +class BdbQuit(Exception): + """Exception to give up completely.""" + + +class Bdb: + """Generic Python debugger base class. + + This class takes care of details of the trace facility; + a derived class should implement user interaction. + The standard debugger class (pdb.Pdb) is an example. + + The optional skip argument must be an iterable of glob-style + module name patterns. The debugger will not step into frames + that originate in a module that matches one of these patterns. + Whether a frame is considered to originate in a certain module + is determined by the __name__ in the frame globals. + """ + + def __init__(self, skip=None): + self.skip = set(skip) if skip else None + self.breaks = {} + self.fncache = {} + self.frame_returning = None + + self._load_breaks() + + def canonic(self, filename): + """Return canonical form of filename. + + For real filenames, the canonical form is a case-normalized (on + case insensitive filesystems) absolute path. 'Filenames' with + angle brackets, such as "", generated in interactive + mode, are returned unchanged. + """ + if filename == "<" + filename[1:-1] + ">": + return filename + canonic = self.fncache.get(filename) + if not canonic: + canonic = os.path.abspath(filename) + canonic = os.path.normcase(canonic) + self.fncache[filename] = canonic + return canonic + + def reset(self): + """Set values of attributes as ready to start debugging.""" + import linecache + linecache.checkcache() + self.botframe = None + self._set_stopinfo(None, None) + + def trace_dispatch(self, frame, event, arg): + """Dispatch a trace function for debugged frames based on the event. + + This function is installed as the trace function for debugged + frames. Its return value is the new trace function, which is + usually itself. The default implementation decides how to + dispatch a frame, depending on the type of event (passed in as a + string) that is about to be executed. + + The event can be one of the following: + line: A new line of code is going to be executed. + call: A function is about to be called or another code block + is entered. + return: A function or other code block is about to return. + exception: An exception has occurred. + c_call: A C function is about to be called. + c_return: A C function has returned. + c_exception: A C function has raised an exception. + + For the Python events, specialized functions (see the dispatch_*() + methods) are called. For the C events, no action is taken. + + The arg parameter depends on the previous event. + """ + if self.quitting: + return # None + if event == 'line': + return self.dispatch_line(frame) + if event == 'call': + return self.dispatch_call(frame, arg) + if event == 'return': + return self.dispatch_return(frame, arg) + if event == 'exception': + return self.dispatch_exception(frame, arg) + if event == 'c_call': + return self.trace_dispatch + if event == 'c_exception': + return self.trace_dispatch + if event == 'c_return': + return self.trace_dispatch + print('bdb.Bdb.dispatch: unknown debugging event:', repr(event)) + return self.trace_dispatch + + def dispatch_line(self, frame): + """Invoke user function and return trace function for line event. + + If the debugger stops on the current line, invoke + self.user_line(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + if self.stop_here(frame) or self.break_here(frame): + self.user_line(frame) + if self.quitting: raise BdbQuit + return self.trace_dispatch + + def dispatch_call(self, frame, arg): + """Invoke user function and return trace function for call event. + + If the debugger stops on this function call, invoke + self.user_call(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + # XXX 'arg' is no longer used + if self.botframe is None: + # First call of dispatch since reset() + self.botframe = frame.f_back # (CT) Note that this may also be None! + return self.trace_dispatch + if not (self.stop_here(frame) or self.break_anywhere(frame)): + # No need to trace this function + return # None + # Ignore call events in generator except when stepping. + if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: + return self.trace_dispatch + self.user_call(frame, arg) + if self.quitting: raise BdbQuit + return self.trace_dispatch + + def dispatch_return(self, frame, arg): + """Invoke user function and return trace function for return event. + + If the debugger stops on this function return, invoke + self.user_return(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + if self.stop_here(frame) or frame == self.returnframe: + # Ignore return events in generator except when stepping. + if self.stopframe and frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: + return self.trace_dispatch + try: + self.frame_returning = frame + self.user_return(frame, arg) + finally: + self.frame_returning = None + if self.quitting: raise BdbQuit + # The user issued a 'next' or 'until' command. + if self.stopframe is frame and self.stoplineno != -1: + self._set_stopinfo(None, None) + return self.trace_dispatch + + def dispatch_exception(self, frame, arg): + """Invoke user function and return trace function for exception event. + + If the debugger stops on this exception, invoke + self.user_exception(). Raise BdbQuit if self.quitting is set. + Return self.trace_dispatch to continue tracing in this scope. + """ + if self.stop_here(frame): + # When stepping with next/until/return in a generator frame, skip + # the internal StopIteration exception (with no traceback) + # triggered by a subiterator run with the 'yield from' statement. + if not (frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS + and arg[0] is StopIteration and arg[2] is None): + self.user_exception(frame, arg) + if self.quitting: raise BdbQuit + # Stop at the StopIteration or GeneratorExit exception when the user + # has set stopframe in a generator by issuing a return command, or a + # next/until command at the last statement in the generator before the + # exception. + elif (self.stopframe and frame is not self.stopframe + and self.stopframe.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS + and arg[0] in (StopIteration, GeneratorExit)): + self.user_exception(frame, arg) + if self.quitting: raise BdbQuit + + return self.trace_dispatch + + # Normally derived classes don't override the following + # methods, but they may if they want to redefine the + # definition of stopping and breakpoints. + + def is_skipped_module(self, module_name): + "Return True if module_name matches any skip pattern." + if module_name is None: # some modules do not have names + return False + for pattern in self.skip: + if fnmatch.fnmatch(module_name, pattern): + return True + return False + + def stop_here(self, frame): + "Return True if frame is below the starting frame in the stack." + # (CT) stopframe may now also be None, see dispatch_call. + # (CT) the former test for None is therefore removed from here. + if self.skip and \ + self.is_skipped_module(frame.f_globals.get('__name__')): + return False + if frame is self.stopframe: + if self.stoplineno == -1: + return False + return frame.f_lineno >= self.stoplineno + if not self.stopframe: + return True + return False + + def break_here(self, frame): + """Return True if there is an effective breakpoint for this line. + + Check for line or function breakpoint and if in effect. + Delete temporary breakpoints if effective() says to. + """ + filename = self.canonic(frame.f_code.co_filename) + if filename not in self.breaks: + return False + lineno = frame.f_lineno + if lineno not in self.breaks[filename]: + # The line itself has no breakpoint, but maybe the line is the + # first line of a function with breakpoint set by function name. + lineno = frame.f_code.co_firstlineno + if lineno not in self.breaks[filename]: + return False + + # flag says ok to delete temp. bp + (bp, flag) = effective(filename, lineno, frame) + if bp: + self.currentbp = bp.number + if (flag and bp.temporary): + self.do_clear(str(bp.number)) + return True + else: + return False + + def do_clear(self, arg): + """Remove temporary breakpoint. + + Must implement in derived classes or get NotImplementedError. + """ + raise NotImplementedError("subclass of bdb must implement do_clear()") + + def break_anywhere(self, frame): + """Return True if there is any breakpoint for frame's filename. + """ + return self.canonic(frame.f_code.co_filename) in self.breaks + + # Derived classes should override the user_* methods + # to gain control. + + def user_call(self, frame, argument_list): + """Called if we might stop in a function.""" + pass + + def user_line(self, frame): + """Called when we stop or break at a line.""" + pass + + def user_return(self, frame, return_value): + """Called when a return trap is set here.""" + pass + + def user_exception(self, frame, exc_info): + """Called when we stop on an exception.""" + pass + + def _set_stopinfo(self, stopframe, returnframe, stoplineno=0): + """Set the attributes for stopping. + + If stoplineno is greater than or equal to 0, then stop at line + greater than or equal to the stopline. If stoplineno is -1, then + don't stop at all. + """ + self.stopframe = stopframe + self.returnframe = returnframe + self.quitting = False + # stoplineno >= 0 means: stop at line >= the stoplineno + # stoplineno -1 means: don't stop at all + self.stoplineno = stoplineno + + # Derived classes and clients can call the following methods + # to affect the stepping state. + + def set_until(self, frame, lineno=None): + """Stop when the line with the lineno greater than the current one is + reached or when returning from current frame.""" + # the name "until" is borrowed from gdb + if lineno is None: + lineno = frame.f_lineno + 1 + self._set_stopinfo(frame, frame, lineno) + + def set_step(self): + """Stop after one line of code.""" + # Issue #13183: pdb skips frames after hitting a breakpoint and running + # step commands. + # Restore the trace function in the caller (that may not have been set + # for performance reasons) when returning from the current frame. + if self.frame_returning: + caller_frame = self.frame_returning.f_back + if caller_frame and not caller_frame.f_trace: + caller_frame.f_trace = self.trace_dispatch + self._set_stopinfo(None, None) + + def set_next(self, frame): + """Stop on the next line in or below the given frame.""" + self._set_stopinfo(frame, None) + + def set_return(self, frame): + """Stop when returning from the given frame.""" + if frame.f_code.co_flags & GENERATOR_AND_COROUTINE_FLAGS: + self._set_stopinfo(frame, None, -1) + else: + self._set_stopinfo(frame.f_back, frame) + + def set_trace(self, frame=None): + """Start debugging from frame. + + If frame is not specified, debugging starts from caller's frame. + """ + if frame is None: + frame = sys._getframe().f_back + self.reset() + while frame: + frame.f_trace = self.trace_dispatch + self.botframe = frame + frame = frame.f_back + self.set_step() + sys.settrace(self.trace_dispatch) + + def set_continue(self): + """Stop only at breakpoints or when finished. + + If there are no breakpoints, set the system trace function to None. + """ + # Don't stop except at breakpoints or when finished + self._set_stopinfo(self.botframe, None, -1) + if not self.breaks: + # no breakpoints; run without debugger overhead + sys.settrace(None) + frame = sys._getframe().f_back + while frame and frame is not self.botframe: + del frame.f_trace + frame = frame.f_back + + def set_quit(self): + """Set quitting attribute to True. + + Raises BdbQuit exception in the next call to a dispatch_*() method. + """ + self.stopframe = self.botframe + self.returnframe = None + self.quitting = True + sys.settrace(None) + + # Derived classes and clients can call the following methods + # to manipulate breakpoints. These methods return an + # error message if something went wrong, None if all is well. + # Set_break prints out the breakpoint line and file:lineno. + # Call self.get_*break*() to see the breakpoints or better + # for bp in Breakpoint.bpbynumber: if bp: bp.bpprint(). + + def _add_to_breaks(self, filename, lineno): + """Add breakpoint to breaks, if not already there.""" + bp_linenos = self.breaks.setdefault(filename, []) + if lineno not in bp_linenos: + bp_linenos.append(lineno) + + def set_break(self, filename, lineno, temporary=False, cond=None, + funcname=None): + """Set a new breakpoint for filename:lineno. + + If lineno doesn't exist for the filename, return an error message. + The filename should be in canonical form. + """ + filename = self.canonic(filename) + import linecache # Import as late as possible + line = linecache.getline(filename, lineno) + if not line: + return 'Line %s:%d does not exist' % (filename, lineno) + self._add_to_breaks(filename, lineno) + bp = Breakpoint(filename, lineno, temporary, cond, funcname) + return None + + def _load_breaks(self): + """Apply all breakpoints (set in other instances) to this one. + + Populates this instance's breaks list from the Breakpoint class's + list, which can have breakpoints set by another Bdb instance. This + is necessary for interactive sessions to keep the breakpoints + active across multiple calls to run(). + """ + for (filename, lineno) in Breakpoint.bplist.keys(): + self._add_to_breaks(filename, lineno) + + def _prune_breaks(self, filename, lineno): + """Prune breakpoints for filename:lineno. + + A list of breakpoints is maintained in the Bdb instance and in + the Breakpoint class. If a breakpoint in the Bdb instance no + longer exists in the Breakpoint class, then it's removed from the + Bdb instance. + """ + if (filename, lineno) not in Breakpoint.bplist: + self.breaks[filename].remove(lineno) + if not self.breaks[filename]: + del self.breaks[filename] + + def clear_break(self, filename, lineno): + """Delete breakpoints for filename:lineno. + + If no breakpoints were set, return an error message. + """ + filename = self.canonic(filename) + if filename not in self.breaks: + return 'There are no breakpoints in %s' % filename + if lineno not in self.breaks[filename]: + return 'There is no breakpoint at %s:%d' % (filename, lineno) + # If there's only one bp in the list for that file,line + # pair, then remove the breaks entry + for bp in Breakpoint.bplist[filename, lineno][:]: + bp.deleteMe() + self._prune_breaks(filename, lineno) + return None + + def clear_bpbynumber(self, arg): + """Delete a breakpoint by its index in Breakpoint.bpbynumber. + + If arg is invalid, return an error message. + """ + try: + bp = self.get_bpbynumber(arg) + except ValueError as err: + return str(err) + bp.deleteMe() + self._prune_breaks(bp.file, bp.line) + return None + + def clear_all_file_breaks(self, filename): + """Delete all breakpoints in filename. + + If none were set, return an error message. + """ + filename = self.canonic(filename) + if filename not in self.breaks: + return 'There are no breakpoints in %s' % filename + for line in self.breaks[filename]: + blist = Breakpoint.bplist[filename, line] + for bp in blist: + bp.deleteMe() + del self.breaks[filename] + return None + + def clear_all_breaks(self): + """Delete all existing breakpoints. + + If none were set, return an error message. + """ + if not self.breaks: + return 'There are no breakpoints' + for bp in Breakpoint.bpbynumber: + if bp: + bp.deleteMe() + self.breaks = {} + return None + + def get_bpbynumber(self, arg): + """Return a breakpoint by its index in Breakpoint.bybpnumber. + + For invalid arg values or if the breakpoint doesn't exist, + raise a ValueError. + """ + if not arg: + raise ValueError('Breakpoint number expected') + try: + number = int(arg) + except ValueError: + raise ValueError('Non-numeric breakpoint number %s' % arg) from None + try: + bp = Breakpoint.bpbynumber[number] + except IndexError: + raise ValueError('Breakpoint number %d out of range' % number) from None + if bp is None: + raise ValueError('Breakpoint %d already deleted' % number) + return bp + + def get_break(self, filename, lineno): + """Return True if there is a breakpoint for filename:lineno.""" + filename = self.canonic(filename) + return filename in self.breaks and \ + lineno in self.breaks[filename] + + def get_breaks(self, filename, lineno): + """Return all breakpoints for filename:lineno. + + If no breakpoints are set, return an empty list. + """ + filename = self.canonic(filename) + return filename in self.breaks and \ + lineno in self.breaks[filename] and \ + Breakpoint.bplist[filename, lineno] or [] + + def get_file_breaks(self, filename): + """Return all lines with breakpoints for filename. + + If no breakpoints are set, return an empty list. + """ + filename = self.canonic(filename) + if filename in self.breaks: + return self.breaks[filename] + else: + return [] + + def get_all_breaks(self): + """Return all breakpoints that are set.""" + return self.breaks + + # Derived classes and clients can call the following method + # to get a data structure representing a stack trace. + + def get_stack(self, f, t): + """Return a list of (frame, lineno) in a stack trace and a size. + + List starts with original calling frame, if there is one. + Size may be number of frames above or below f. + """ + stack = [] + if t and t.tb_frame is f: + t = t.tb_next + while f is not None: + stack.append((f, f.f_lineno)) + if f is self.botframe: + break + f = f.f_back + stack.reverse() + i = max(0, len(stack) - 1) + while t is not None: + stack.append((t.tb_frame, t.tb_lineno)) + t = t.tb_next + if f is None: + i = max(0, len(stack) - 1) + return stack, i + + def format_stack_entry(self, frame_lineno, lprefix=': '): + """Return a string with information about a stack entry. + + The stack entry frame_lineno is a (frame, lineno) tuple. The + return string contains the canonical filename, the function name + or '', the input arguments, the return value, and the + line of code (if it exists). + + """ + import linecache, reprlib + frame, lineno = frame_lineno + filename = self.canonic(frame.f_code.co_filename) + s = '%s(%r)' % (filename, lineno) + if frame.f_code.co_name: + s += frame.f_code.co_name + else: + s += "" + s += '()' + if '__return__' in frame.f_locals: + rv = frame.f_locals['__return__'] + s += '->' + s += reprlib.repr(rv) + if lineno is not None: + line = linecache.getline(filename, lineno, frame.f_globals) + if line: + s += lprefix + line.strip() + return s + + # The following methods can be called by clients to use + # a debugger to debug a statement or an expression. + # Both can be given as a string, or a code object. + + def run(self, cmd, globals=None, locals=None): + """Debug a statement executed via the exec() function. + + globals defaults to __main__.dict; locals defaults to globals. + """ + if globals is None: + import __main__ + globals = __main__.__dict__ + if locals is None: + locals = globals + self.reset() + if isinstance(cmd, str): + cmd = compile(cmd, "", "exec") + sys.settrace(self.trace_dispatch) + try: + exec(cmd, globals, locals) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + + def runeval(self, expr, globals=None, locals=None): + """Debug an expression executed via the eval() function. + + globals defaults to __main__.dict; locals defaults to globals. + """ + if globals is None: + import __main__ + globals = __main__.__dict__ + if locals is None: + locals = globals + self.reset() + sys.settrace(self.trace_dispatch) + try: + return eval(expr, globals, locals) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + + def runctx(self, cmd, globals, locals): + """For backwards-compatibility. Defers to run().""" + # B/W compatibility + self.run(cmd, globals, locals) + + # This method is more useful to debug a single function call. + + def runcall(self, func, /, *args, **kwds): + """Debug a single function call. + + Return the result of the function call. + """ + self.reset() + sys.settrace(self.trace_dispatch) + res = None + try: + res = func(*args, **kwds) + except BdbQuit: + pass + finally: + self.quitting = True + sys.settrace(None) + return res + + +def set_trace(): + """Start debugging with a Bdb instance from the caller's frame.""" + Bdb().set_trace() + + +class Breakpoint: + """Breakpoint class. + + Implements temporary breakpoints, ignore counts, disabling and + (re)-enabling, and conditionals. + + Breakpoints are indexed by number through bpbynumber and by + the (file, line) tuple using bplist. The former points to a + single instance of class Breakpoint. The latter points to a + list of such instances since there may be more than one + breakpoint per line. + + When creating a breakpoint, its associated filename should be + in canonical form. If funcname is defined, a breakpoint hit will be + counted when the first line of that function is executed. A + conditional breakpoint always counts a hit. + """ + + # XXX Keeping state in the class is a mistake -- this means + # you cannot have more than one active Bdb instance. + + next = 1 # Next bp to be assigned + bplist = {} # indexed by (file, lineno) tuple + bpbynumber = [None] # Each entry is None or an instance of Bpt + # index 0 is unused, except for marking an + # effective break .... see effective() + + def __init__(self, file, line, temporary=False, cond=None, funcname=None): + self.funcname = funcname + # Needed if funcname is not None. + self.func_first_executable_line = None + self.file = file # This better be in canonical form! + self.line = line + self.temporary = temporary + self.cond = cond + self.enabled = True + self.ignore = 0 + self.hits = 0 + self.number = Breakpoint.next + Breakpoint.next += 1 + # Build the two lists + self.bpbynumber.append(self) + if (file, line) in self.bplist: + self.bplist[file, line].append(self) + else: + self.bplist[file, line] = [self] + + @staticmethod + def clearBreakpoints(): + Breakpoint.next = 1 + Breakpoint.bplist = {} + Breakpoint.bpbynumber = [None] + + def deleteMe(self): + """Delete the breakpoint from the list associated to a file:line. + + If it is the last breakpoint in that position, it also deletes + the entry for the file:line. + """ + + index = (self.file, self.line) + self.bpbynumber[self.number] = None # No longer in list + self.bplist[index].remove(self) + if not self.bplist[index]: + # No more bp for this f:l combo + del self.bplist[index] + + def enable(self): + """Mark the breakpoint as enabled.""" + self.enabled = True + + def disable(self): + """Mark the breakpoint as disabled.""" + self.enabled = False + + def bpprint(self, out=None): + """Print the output of bpformat(). + + The optional out argument directs where the output is sent + and defaults to standard output. + """ + if out is None: + out = sys.stdout + print(self.bpformat(), file=out) + + def bpformat(self): + """Return a string with information about the breakpoint. + + The information includes the breakpoint number, temporary + status, file:line position, break condition, number of times to + ignore, and number of times hit. + + """ + if self.temporary: + disp = 'del ' + else: + disp = 'keep ' + if self.enabled: + disp = disp + 'yes ' + else: + disp = disp + 'no ' + ret = '%-4dbreakpoint %s at %s:%d' % (self.number, disp, + self.file, self.line) + if self.cond: + ret += '\n\tstop only if %s' % (self.cond,) + if self.ignore: + ret += '\n\tignore next %d hits' % (self.ignore,) + if self.hits: + if self.hits > 1: + ss = 's' + else: + ss = '' + ret += '\n\tbreakpoint already hit %d time%s' % (self.hits, ss) + return ret + + def __str__(self): + "Return a condensed description of the breakpoint." + return 'breakpoint %s at %s:%s' % (self.number, self.file, self.line) + +# -----------end of Breakpoint class---------- + + +def checkfuncname(b, frame): + """Return True if break should happen here. + + Whether a break should happen depends on the way that b (the breakpoint) + was set. If it was set via line number, check if b.line is the same as + the one in the frame. If it was set via function name, check if this is + the right function and if it is on the first executable line. + """ + if not b.funcname: + # Breakpoint was set via line number. + if b.line != frame.f_lineno: + # Breakpoint was set at a line with a def statement and the function + # defined is called: don't break. + return False + return True + + # Breakpoint set via function name. + if frame.f_code.co_name != b.funcname: + # It's not a function call, but rather execution of def statement. + return False + + # We are in the right frame. + if not b.func_first_executable_line: + # The function is entered for the 1st time. + b.func_first_executable_line = frame.f_lineno + + if b.func_first_executable_line != frame.f_lineno: + # But we are not at the first line number: don't break. + return False + return True + + +def effective(file, line, frame): + """Return (active breakpoint, delete temporary flag) or (None, None) as + breakpoint to act upon. + + The "active breakpoint" is the first entry in bplist[line, file] (which + must exist) that is enabled, for which checkfuncname is True, and that + has neither a False condition nor a positive ignore count. The flag, + meaning that a temporary breakpoint should be deleted, is False only + when the condiion cannot be evaluated (in which case, ignore count is + ignored). + + If no such entry exists, then (None, None) is returned. + """ + possibles = Breakpoint.bplist[file, line] + for b in possibles: + if not b.enabled: + continue + if not checkfuncname(b, frame): + continue + # Count every hit when bp is enabled + b.hits += 1 + if not b.cond: + # If unconditional, and ignoring go on to next, else break + if b.ignore > 0: + b.ignore -= 1 + continue + else: + # breakpoint and marker that it's ok to delete if temporary + return (b, True) + else: + # Conditional bp. + # Ignore count applies only to those bpt hits where the + # condition evaluates to true. + try: + val = eval(b.cond, frame.f_globals, frame.f_locals) + if val: + if b.ignore > 0: + b.ignore -= 1 + # continue + else: + return (b, True) + # else: + # continue + except: + # if eval fails, most conservative thing is to stop on + # breakpoint regardless of ignore count. Don't delete + # temporary, as another hint to user. + return (b, False) + return (None, None) + + +# -------------------- testing -------------------- + +class Tdb(Bdb): + def user_call(self, frame, args): + name = frame.f_code.co_name + if not name: name = '???' + print('+++ call', name, args) + def user_line(self, frame): + import linecache + name = frame.f_code.co_name + if not name: name = '???' + fn = self.canonic(frame.f_code.co_filename) + line = linecache.getline(fn, frame.f_lineno, frame.f_globals) + print('+++', fn, frame.f_lineno, name, ':', line.strip()) + def user_return(self, frame, retval): + print('+++ return', retval) + def user_exception(self, frame, exc_stuff): + print('+++ exception', exc_stuff) + self.set_continue() + +def foo(n): + print('foo(', n, ')') + x = bar(n*10) + print('bar returned', x) + +def bar(a): + print('bar(', a, ')') + return a/2 + +def test(): + t = Tdb() + t.run('import bdb; bdb.foo(10)') diff --git a/python310/binhex.py b/python310/binhex.py new file mode 100644 index 0000000000000000000000000000000000000000..ace5217d2713921d2b03c1a956a0f23ed0bdbccb --- /dev/null +++ b/python310/binhex.py @@ -0,0 +1,502 @@ +"""Macintosh binhex compression/decompression. + +easy interface: +binhex(inputfilename, outputfilename) +hexbin(inputfilename, outputfilename) +""" + +# +# Jack Jansen, CWI, August 1995. +# +# The module is supposed to be as compatible as possible. Especially the +# easy interface should work "as expected" on any platform. +# XXXX Note: currently, textfiles appear in mac-form on all platforms. +# We seem to lack a simple character-translate in python. +# (we should probably use ISO-Latin-1 on all but the mac platform). +# XXXX The simple routines are too simple: they expect to hold the complete +# files in-core. Should be fixed. +# XXXX It would be nice to handle AppleDouble format on unix +# (for servers serving macs). +# XXXX I don't understand what happens when you get 0x90 times the same byte on +# input. The resulting code (xx 90 90) would appear to be interpreted as an +# escaped *value* of 0x90. All coders I've seen appear to ignore this nicety... +# +import binascii +import contextlib +import io +import os +import struct +import warnings + +warnings.warn('the binhex module is deprecated', DeprecationWarning, + stacklevel=2) + + +__all__ = ["binhex","hexbin","Error"] + +class Error(Exception): + pass + +# States (what have we written) +_DID_HEADER = 0 +_DID_DATA = 1 + +# Various constants +REASONABLY_LARGE = 32768 # Minimal amount we pass the rle-coder +LINELEN = 64 +RUNCHAR = b"\x90" + +# +# This code is no longer byte-order dependent + + +class FInfo: + def __init__(self): + self.Type = '????' + self.Creator = '????' + self.Flags = 0 + +def getfileinfo(name): + finfo = FInfo() + with io.open(name, 'rb') as fp: + # Quick check for textfile + data = fp.read(512) + if 0 not in data: + finfo.Type = 'TEXT' + fp.seek(0, 2) + dsize = fp.tell() + dir, file = os.path.split(name) + file = file.replace(':', '-', 1) + return file, finfo, dsize, 0 + +class openrsrc: + def __init__(self, *args): + pass + + def read(self, *args): + return b'' + + def write(self, *args): + pass + + def close(self): + pass + + +# DeprecationWarning is already emitted on "import binhex". There is no need +# to repeat the warning at each call to deprecated binascii functions. +@contextlib.contextmanager +def _ignore_deprecation_warning(): + with warnings.catch_warnings(): + warnings.filterwarnings('ignore', '', DeprecationWarning) + yield + + +class _Hqxcoderengine: + """Write data to the coder in 3-byte chunks""" + + def __init__(self, ofp): + self.ofp = ofp + self.data = b'' + self.hqxdata = b'' + self.linelen = LINELEN - 1 + + def write(self, data): + self.data = self.data + data + datalen = len(self.data) + todo = (datalen // 3) * 3 + data = self.data[:todo] + self.data = self.data[todo:] + if not data: + return + with _ignore_deprecation_warning(): + self.hqxdata = self.hqxdata + binascii.b2a_hqx(data) + self._flush(0) + + def _flush(self, force): + first = 0 + while first <= len(self.hqxdata) - self.linelen: + last = first + self.linelen + self.ofp.write(self.hqxdata[first:last] + b'\r') + self.linelen = LINELEN + first = last + self.hqxdata = self.hqxdata[first:] + if force: + self.ofp.write(self.hqxdata + b':\r') + + def close(self): + if self.data: + with _ignore_deprecation_warning(): + self.hqxdata = self.hqxdata + binascii.b2a_hqx(self.data) + self._flush(1) + self.ofp.close() + del self.ofp + +class _Rlecoderengine: + """Write data to the RLE-coder in suitably large chunks""" + + def __init__(self, ofp): + self.ofp = ofp + self.data = b'' + + def write(self, data): + self.data = self.data + data + if len(self.data) < REASONABLY_LARGE: + return + with _ignore_deprecation_warning(): + rledata = binascii.rlecode_hqx(self.data) + self.ofp.write(rledata) + self.data = b'' + + def close(self): + if self.data: + with _ignore_deprecation_warning(): + rledata = binascii.rlecode_hqx(self.data) + self.ofp.write(rledata) + self.ofp.close() + del self.ofp + +class BinHex: + def __init__(self, name_finfo_dlen_rlen, ofp): + name, finfo, dlen, rlen = name_finfo_dlen_rlen + close_on_error = False + if isinstance(ofp, str): + ofname = ofp + ofp = io.open(ofname, 'wb') + close_on_error = True + try: + ofp.write(b'(This file must be converted with BinHex 4.0)\r\r:') + hqxer = _Hqxcoderengine(ofp) + self.ofp = _Rlecoderengine(hqxer) + self.crc = 0 + if finfo is None: + finfo = FInfo() + self.dlen = dlen + self.rlen = rlen + self._writeinfo(name, finfo) + self.state = _DID_HEADER + except: + if close_on_error: + ofp.close() + raise + + def _writeinfo(self, name, finfo): + nl = len(name) + if nl > 63: + raise Error('Filename too long') + d = bytes([nl]) + name.encode("latin-1") + b'\0' + tp, cr = finfo.Type, finfo.Creator + if isinstance(tp, str): + tp = tp.encode("latin-1") + if isinstance(cr, str): + cr = cr.encode("latin-1") + d2 = tp + cr + + # Force all structs to be packed with big-endian + d3 = struct.pack('>h', finfo.Flags) + d4 = struct.pack('>ii', self.dlen, self.rlen) + info = d + d2 + d3 + d4 + self._write(info) + self._writecrc() + + def _write(self, data): + self.crc = binascii.crc_hqx(data, self.crc) + self.ofp.write(data) + + def _writecrc(self): + # XXXX Should this be here?? + # self.crc = binascii.crc_hqx('\0\0', self.crc) + if self.crc < 0: + fmt = '>h' + else: + fmt = '>H' + self.ofp.write(struct.pack(fmt, self.crc)) + self.crc = 0 + + def write(self, data): + if self.state != _DID_HEADER: + raise Error('Writing data at the wrong time') + self.dlen = self.dlen - len(data) + self._write(data) + + def close_data(self): + if self.dlen != 0: + raise Error('Incorrect data size, diff=%r' % (self.rlen,)) + self._writecrc() + self.state = _DID_DATA + + def write_rsrc(self, data): + if self.state < _DID_DATA: + self.close_data() + if self.state != _DID_DATA: + raise Error('Writing resource data at the wrong time') + self.rlen = self.rlen - len(data) + self._write(data) + + def close(self): + if self.state is None: + return + try: + if self.state < _DID_DATA: + self.close_data() + if self.state != _DID_DATA: + raise Error('Close at the wrong time') + if self.rlen != 0: + raise Error("Incorrect resource-datasize, diff=%r" % (self.rlen,)) + self._writecrc() + finally: + self.state = None + ofp = self.ofp + del self.ofp + ofp.close() + +def binhex(inp, out): + """binhex(infilename, outfilename): create binhex-encoded copy of a file""" + finfo = getfileinfo(inp) + ofp = BinHex(finfo, out) + + with io.open(inp, 'rb') as ifp: + # XXXX Do textfile translation on non-mac systems + while True: + d = ifp.read(128000) + if not d: break + ofp.write(d) + ofp.close_data() + + ifp = openrsrc(inp, 'rb') + while True: + d = ifp.read(128000) + if not d: break + ofp.write_rsrc(d) + ofp.close() + ifp.close() + +class _Hqxdecoderengine: + """Read data via the decoder in 4-byte chunks""" + + def __init__(self, ifp): + self.ifp = ifp + self.eof = 0 + + def read(self, totalwtd): + """Read at least wtd bytes (or until EOF)""" + decdata = b'' + wtd = totalwtd + # + # The loop here is convoluted, since we don't really now how + # much to decode: there may be newlines in the incoming data. + while wtd > 0: + if self.eof: return decdata + wtd = ((wtd + 2) // 3) * 4 + data = self.ifp.read(wtd) + # + # Next problem: there may not be a complete number of + # bytes in what we pass to a2b. Solve by yet another + # loop. + # + while True: + try: + with _ignore_deprecation_warning(): + decdatacur, self.eof = binascii.a2b_hqx(data) + break + except binascii.Incomplete: + pass + newdata = self.ifp.read(1) + if not newdata: + raise Error('Premature EOF on binhex file') + data = data + newdata + decdata = decdata + decdatacur + wtd = totalwtd - len(decdata) + if not decdata and not self.eof: + raise Error('Premature EOF on binhex file') + return decdata + + def close(self): + self.ifp.close() + +class _Rledecoderengine: + """Read data via the RLE-coder""" + + def __init__(self, ifp): + self.ifp = ifp + self.pre_buffer = b'' + self.post_buffer = b'' + self.eof = 0 + + def read(self, wtd): + if wtd > len(self.post_buffer): + self._fill(wtd - len(self.post_buffer)) + rv = self.post_buffer[:wtd] + self.post_buffer = self.post_buffer[wtd:] + return rv + + def _fill(self, wtd): + self.pre_buffer = self.pre_buffer + self.ifp.read(wtd + 4) + if self.ifp.eof: + with _ignore_deprecation_warning(): + self.post_buffer = self.post_buffer + \ + binascii.rledecode_hqx(self.pre_buffer) + self.pre_buffer = b'' + return + + # + # Obfuscated code ahead. We have to take care that we don't + # end up with an orphaned RUNCHAR later on. So, we keep a couple + # of bytes in the buffer, depending on what the end of + # the buffer looks like: + # '\220\0\220' - Keep 3 bytes: repeated \220 (escaped as \220\0) + # '?\220' - Keep 2 bytes: repeated something-else + # '\220\0' - Escaped \220: Keep 2 bytes. + # '?\220?' - Complete repeat sequence: decode all + # otherwise: keep 1 byte. + # + mark = len(self.pre_buffer) + if self.pre_buffer[-3:] == RUNCHAR + b'\0' + RUNCHAR: + mark = mark - 3 + elif self.pre_buffer[-1:] == RUNCHAR: + mark = mark - 2 + elif self.pre_buffer[-2:] == RUNCHAR + b'\0': + mark = mark - 2 + elif self.pre_buffer[-2:-1] == RUNCHAR: + pass # Decode all + else: + mark = mark - 1 + + with _ignore_deprecation_warning(): + self.post_buffer = self.post_buffer + \ + binascii.rledecode_hqx(self.pre_buffer[:mark]) + self.pre_buffer = self.pre_buffer[mark:] + + def close(self): + self.ifp.close() + +class HexBin: + def __init__(self, ifp): + if isinstance(ifp, str): + ifp = io.open(ifp, 'rb') + # + # Find initial colon. + # + while True: + ch = ifp.read(1) + if not ch: + raise Error("No binhex data found") + # Cater for \r\n terminated lines (which show up as \n\r, hence + # all lines start with \r) + if ch == b'\r': + continue + if ch == b':': + break + + hqxifp = _Hqxdecoderengine(ifp) + self.ifp = _Rledecoderengine(hqxifp) + self.crc = 0 + self._readheader() + + def _read(self, len): + data = self.ifp.read(len) + self.crc = binascii.crc_hqx(data, self.crc) + return data + + def _checkcrc(self): + filecrc = struct.unpack('>h', self.ifp.read(2))[0] & 0xffff + #self.crc = binascii.crc_hqx('\0\0', self.crc) + # XXXX Is this needed?? + self.crc = self.crc & 0xffff + if filecrc != self.crc: + raise Error('CRC error, computed %x, read %x' + % (self.crc, filecrc)) + self.crc = 0 + + def _readheader(self): + len = self._read(1) + fname = self._read(ord(len)) + rest = self._read(1 + 4 + 4 + 2 + 4 + 4) + self._checkcrc() + + type = rest[1:5] + creator = rest[5:9] + flags = struct.unpack('>h', rest[9:11])[0] + self.dlen = struct.unpack('>l', rest[11:15])[0] + self.rlen = struct.unpack('>l', rest[15:19])[0] + + self.FName = fname + self.FInfo = FInfo() + self.FInfo.Creator = creator + self.FInfo.Type = type + self.FInfo.Flags = flags + + self.state = _DID_HEADER + + def read(self, *n): + if self.state != _DID_HEADER: + raise Error('Read data at wrong time') + if n: + n = n[0] + n = min(n, self.dlen) + else: + n = self.dlen + rv = b'' + while len(rv) < n: + rv = rv + self._read(n-len(rv)) + self.dlen = self.dlen - n + return rv + + def close_data(self): + if self.state != _DID_HEADER: + raise Error('close_data at wrong time') + if self.dlen: + dummy = self._read(self.dlen) + self._checkcrc() + self.state = _DID_DATA + + def read_rsrc(self, *n): + if self.state == _DID_HEADER: + self.close_data() + if self.state != _DID_DATA: + raise Error('Read resource data at wrong time') + if n: + n = n[0] + n = min(n, self.rlen) + else: + n = self.rlen + self.rlen = self.rlen - n + return self._read(n) + + def close(self): + if self.state is None: + return + try: + if self.rlen: + dummy = self.read_rsrc(self.rlen) + self._checkcrc() + finally: + self.state = None + self.ifp.close() + +def hexbin(inp, out): + """hexbin(infilename, outfilename) - Decode binhexed file""" + ifp = HexBin(inp) + finfo = ifp.FInfo + if not out: + out = ifp.FName + + with io.open(out, 'wb') as ofp: + # XXXX Do translation on non-mac systems + while True: + d = ifp.read(128000) + if not d: break + ofp.write(d) + ifp.close_data() + + d = ifp.read_rsrc(128000) + if d: + ofp = openrsrc(out, 'wb') + ofp.write(d) + while True: + d = ifp.read_rsrc(128000) + if not d: break + ofp.write(d) + ofp.close() + + ifp.close() diff --git a/python310/bisect.py b/python310/bisect.py new file mode 100644 index 0000000000000000000000000000000000000000..d37da74f7b4055c6cb476d55b22d90a9cf8769bb --- /dev/null +++ b/python310/bisect.py @@ -0,0 +1,110 @@ +"""Bisection algorithms.""" + + +def insort_right(a, x, lo=0, hi=None, *, key=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the right of the rightmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + if key is None: + lo = bisect_right(a, x, lo, hi) + else: + lo = bisect_right(a, key(x), lo, hi, key=key) + a.insert(lo, x) + + +def bisect_right(a, x, lo=0, hi=None, *, key=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e <= x, and all e in + a[i:] have e > x. So if x already appears in the list, a.insert(i, x) will + insert just after the rightmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + # Note, the comparison uses "<" to match the + # __lt__() logic in list.sort() and in heapq. + if key is None: + while lo < hi: + mid = (lo + hi) // 2 + if x < a[mid]: + hi = mid + else: + lo = mid + 1 + else: + while lo < hi: + mid = (lo + hi) // 2 + if x < key(a[mid]): + hi = mid + else: + lo = mid + 1 + return lo + + +def insort_left(a, x, lo=0, hi=None, *, key=None): + """Insert item x in list a, and keep it sorted assuming a is sorted. + + If x is already in a, insert it to the left of the leftmost x. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if key is None: + lo = bisect_left(a, x, lo, hi) + else: + lo = bisect_left(a, key(x), lo, hi, key=key) + a.insert(lo, x) + +def bisect_left(a, x, lo=0, hi=None, *, key=None): + """Return the index where to insert item x in list a, assuming a is sorted. + + The return value i is such that all e in a[:i] have e < x, and all e in + a[i:] have e >= x. So if x already appears in the list, a.insert(i, x) will + insert just before the leftmost x already there. + + Optional args lo (default 0) and hi (default len(a)) bound the + slice of a to be searched. + """ + + if lo < 0: + raise ValueError('lo must be non-negative') + if hi is None: + hi = len(a) + # Note, the comparison uses "<" to match the + # __lt__() logic in list.sort() and in heapq. + if key is None: + while lo < hi: + mid = (lo + hi) // 2 + if a[mid] < x: + lo = mid + 1 + else: + hi = mid + else: + while lo < hi: + mid = (lo + hi) // 2 + if key(a[mid]) < x: + lo = mid + 1 + else: + hi = mid + return lo + + +# Overwrite above definitions with a fast C implementation +try: + from _bisect import * +except ImportError: + pass + +# Create aliases +bisect = bisect_right +insort = insort_right diff --git a/python310/bz2.py b/python310/bz2.py new file mode 100644 index 0000000000000000000000000000000000000000..fabe4f73c8d8085cffc2792dee4c1629a87b8b35 --- /dev/null +++ b/python310/bz2.py @@ -0,0 +1,344 @@ +"""Interface to the libbzip2 compression library. + +This module provides a file interface, classes for incremental +(de)compression, and functions for one-shot (de)compression. +""" + +__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor", + "open", "compress", "decompress"] + +__author__ = "Nadeem Vawda " + +from builtins import open as _builtin_open +import io +import os +import _compression + +from _bz2 import BZ2Compressor, BZ2Decompressor + + +_MODE_CLOSED = 0 +_MODE_READ = 1 +# Value 2 no longer used +_MODE_WRITE = 3 + + +class BZ2File(_compression.BaseStream): + + """A file object providing transparent bzip2 (de)compression. + + A BZ2File can act as a wrapper for an existing file object, or refer + directly to a named file on disk. + + Note that BZ2File provides a *binary* file interface - data read is + returned as bytes, and data to be written should be given as bytes. + """ + + def __init__(self, filename, mode="r", *, compresslevel=9): + """Open a bzip2-compressed file. + + If filename is a str, bytes, or PathLike object, it gives the + name of the file to be opened. Otherwise, it should be a file + object, which will be used to read or write the compressed data. + + mode can be 'r' for reading (default), 'w' for (over)writing, + 'x' for creating exclusively, or 'a' for appending. These can + equivalently be given as 'rb', 'wb', 'xb', and 'ab'. + + If mode is 'w', 'x' or 'a', compresslevel can be a number between 1 + and 9 specifying the level of compression: 1 produces the least + compression, and 9 (default) produces the most compression. + + If mode is 'r', the input file may be the concatenation of + multiple compressed streams. + """ + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + + if not (1 <= compresslevel <= 9): + raise ValueError("compresslevel must be between 1 and 9") + + if mode in ("", "r", "rb"): + mode = "rb" + mode_code = _MODE_READ + elif mode in ("w", "wb"): + mode = "wb" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + elif mode in ("x", "xb"): + mode = "xb" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + elif mode in ("a", "ab"): + mode = "ab" + mode_code = _MODE_WRITE + self._compressor = BZ2Compressor(compresslevel) + else: + raise ValueError("Invalid mode: %r" % (mode,)) + + if isinstance(filename, (str, bytes, os.PathLike)): + self._fp = _builtin_open(filename, mode) + self._closefp = True + self._mode = mode_code + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + self._mode = mode_code + else: + raise TypeError("filename must be a str, bytes, file or PathLike object") + + if self._mode == _MODE_READ: + raw = _compression.DecompressReader(self._fp, + BZ2Decompressor, trailing_error=OSError) + self._buffer = io.BufferedReader(raw) + else: + self._pos = 0 + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + if self._mode == _MODE_CLOSED: + return + try: + if self._mode == _MODE_READ: + self._buffer.close() + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + self._buffer = None + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._buffer.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode == _MODE_READ + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + def peek(self, n=0): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + self._check_can_read() + # Relies on the undocumented fact that BufferedReader.peek() + # always returns at least one byte (except at EOF), independent + # of the value of n + return self._buffer.peek(n) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b'' if the file is already at EOF. + """ + self._check_can_read() + return self._buffer.read(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b'' if the file is at EOF. + """ + self._check_can_read() + if size < 0: + size = io.DEFAULT_BUFFER_SIZE + return self._buffer.read1(size) + + def readinto(self, b): + """Read bytes into b. + + Returns the number of bytes read (0 for EOF). + """ + self._check_can_read() + return self._buffer.readinto(b) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + if not isinstance(size, int): + if not hasattr(size, "__index__"): + raise TypeError("Integer argument expected") + size = size.__index__() + self._check_can_read() + return self._buffer.readline(size) + + def readlines(self, size=-1): + """Read a list of lines of uncompressed bytes from the file. + + size can be specified to control the number of lines read: no + further lines will be read once the total size of the lines read + so far equals or exceeds size. + """ + if not isinstance(size, int): + if not hasattr(size, "__index__"): + raise TypeError("Integer argument expected") + size = size.__index__() + self._check_can_read() + return self._buffer.readlines(size) + + def write(self, data): + """Write a byte string to the file. + + Returns the number of uncompressed bytes written, which is + always the length of data in bytes. Note that due to buffering, + the file on disk may not reflect the data written until close() + is called. + """ + self._check_can_write() + if isinstance(data, (bytes, bytearray)): + length = len(data) + else: + # accept any data that supports the buffer protocol + data = memoryview(data) + length = data.nbytes + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += length + return length + + def writelines(self, seq): + """Write a sequence of byte strings to the file. + + Returns the number of uncompressed bytes written. + seq can be any iterable yielding byte strings. + + Line separators are not added between the written byte strings. + """ + return _compression.BaseStream.writelines(self, seq) + + def seek(self, offset, whence=io.SEEK_SET): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Values for whence are: + + 0: start of stream (default); offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + self._check_can_seek() + return self._buffer.seek(offset, whence) + + def tell(self): + """Return the current file position.""" + self._check_not_closed() + if self._mode == _MODE_READ: + return self._buffer.tell() + return self._pos + + +def open(filename, mode="rb", compresslevel=9, + encoding=None, errors=None, newline=None): + """Open a bzip2-compressed file in binary or text mode. + + The filename argument can be an actual filename (a str, bytes, or + PathLike object), or an existing file object to read from or write + to. + + The mode argument can be "r", "rb", "w", "wb", "x", "xb", "a" or + "ab" for binary mode, or "rt", "wt", "xt" or "at" for text mode. + The default mode is "rb", and the default compresslevel is 9. + + For binary mode, this function is equivalent to the BZ2File + constructor: BZ2File(filename, mode, compresslevel). In this case, + the encoding, errors and newline arguments must not be provided. + + For text mode, a BZ2File object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if errors is not None: + raise ValueError("Argument 'errors' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + bz_mode = mode.replace("t", "") + binary_file = BZ2File(filename, bz_mode, compresslevel=compresslevel) + + if "t" in mode: + encoding = io.text_encoding(encoding) + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file + + +def compress(data, compresslevel=9): + """Compress a block of data. + + compresslevel, if given, must be a number between 1 and 9. + + For incremental compression, use a BZ2Compressor object instead. + """ + comp = BZ2Compressor(compresslevel) + return comp.compress(data) + comp.flush() + + +def decompress(data): + """Decompress a block of data. + + For incremental decompression, use a BZ2Decompressor object instead. + """ + results = [] + while data: + decomp = BZ2Decompressor() + try: + res = decomp.decompress(data) + except OSError: + if results: + break # Leftover data is not a valid bzip2 stream; ignore it. + else: + raise # Error on the first iteration; bail out. + results.append(res) + if not decomp.eof: + raise ValueError("Compressed data ended before the " + "end-of-stream marker was reached") + data = decomp.unused_data + return b"".join(results) diff --git a/python310/cProfile.py b/python310/cProfile.py new file mode 100644 index 0000000000000000000000000000000000000000..9ae1fb8859e51fefb5193127a8d8a24aa77fc92b --- /dev/null +++ b/python310/cProfile.py @@ -0,0 +1,191 @@ +#! /usr/bin/env python3 + +"""Python interface for the 'lsprof' profiler. + Compatible with the 'profile' module. +""" + +__all__ = ["run", "runctx", "Profile"] + +import _lsprof +import io +import profile as _pyprofile + +# ____________________________________________________________ +# Simple interface + +def run(statement, filename=None, sort=-1): + return _pyprofile._Utils(Profile).run(statement, filename, sort) + +def runctx(statement, globals, locals, filename=None, sort=-1): + return _pyprofile._Utils(Profile).runctx(statement, globals, locals, + filename, sort) + +run.__doc__ = _pyprofile.run.__doc__ +runctx.__doc__ = _pyprofile.runctx.__doc__ + +# ____________________________________________________________ + +class Profile(_lsprof.Profiler): + """Profile(timer=None, timeunit=None, subcalls=True, builtins=True) + + Builds a profiler object using the specified timer function. + The default timer is a fast built-in one based on real time. + For custom timer functions returning integers, timeunit can + be a float specifying a scale (i.e. how long each integer unit + is, in seconds). + """ + + # Most of the functionality is in the base class. + # This subclass only adds convenient and backward-compatible methods. + + def print_stats(self, sort=-1): + import pstats + pstats.Stats(self).strip_dirs().sort_stats(sort).print_stats() + + def dump_stats(self, file): + import marshal + with open(file, 'wb') as f: + self.create_stats() + marshal.dump(self.stats, f) + + def create_stats(self): + self.disable() + self.snapshot_stats() + + def snapshot_stats(self): + entries = self.getstats() + self.stats = {} + callersdicts = {} + # call information + for entry in entries: + func = label(entry.code) + nc = entry.callcount # ncalls column of pstats (before '/') + cc = nc - entry.reccallcount # ncalls column of pstats (after '/') + tt = entry.inlinetime # tottime column of pstats + ct = entry.totaltime # cumtime column of pstats + callers = {} + callersdicts[id(entry.code)] = callers + self.stats[func] = cc, nc, tt, ct, callers + # subcall information + for entry in entries: + if entry.calls: + func = label(entry.code) + for subentry in entry.calls: + try: + callers = callersdicts[id(subentry.code)] + except KeyError: + continue + nc = subentry.callcount + cc = nc - subentry.reccallcount + tt = subentry.inlinetime + ct = subentry.totaltime + if func in callers: + prev = callers[func] + nc += prev[0] + cc += prev[1] + tt += prev[2] + ct += prev[3] + callers[func] = nc, cc, tt, ct + + # The following two methods can be called by clients to use + # a profiler to profile a statement, given as a string. + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + return self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals, locals): + self.enable() + try: + exec(cmd, globals, locals) + finally: + self.disable() + return self + + # This method is more useful to profile a single function call. + def runcall(self, func, /, *args, **kw): + self.enable() + try: + return func(*args, **kw) + finally: + self.disable() + + def __enter__(self): + self.enable() + return self + + def __exit__(self, *exc_info): + self.disable() + +# ____________________________________________________________ + +def label(code): + if isinstance(code, str): + return ('~', 0, code) # built-in functions ('~' sorts at the end) + else: + return (code.co_filename, code.co_firstlineno, code.co_name) + +# ____________________________________________________________ + +def main(): + import os + import sys + import runpy + import pstats + from optparse import OptionParser + usage = "cProfile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..." + parser = OptionParser(usage=usage) + parser.allow_interspersed_args = False + parser.add_option('-o', '--outfile', dest="outfile", + help="Save stats to ", default=None) + parser.add_option('-s', '--sort', dest="sort", + help="Sort order when printing to stdout, based on pstats.Stats class", + default=-1, + choices=sorted(pstats.Stats.sort_arg_dict_default)) + parser.add_option('-m', dest="module", action="store_true", + help="Profile a library module", default=False) + + if not sys.argv[1:]: + parser.print_usage() + sys.exit(2) + + (options, args) = parser.parse_args() + sys.argv[:] = args + + # The script that we're profiling may chdir, so capture the absolute path + # to the output file at startup. + if options.outfile is not None: + options.outfile = os.path.abspath(options.outfile) + + if len(args) > 0: + if options.module: + code = "run_module(modname, run_name='__main__')" + globs = { + 'run_module': runpy.run_module, + 'modname': args[0] + } + else: + progname = args[0] + sys.path.insert(0, os.path.dirname(progname)) + with io.open_code(progname) as fp: + code = compile(fp.read(), progname, 'exec') + globs = { + '__file__': progname, + '__name__': '__main__', + '__package__': None, + '__cached__': None, + } + try: + runctx(code, globs, None, options.outfile, options.sort) + except BrokenPipeError as exc: + # Prevent "Exception ignored" during interpreter shutdown. + sys.stdout = None + sys.exit(exc.errno) + else: + parser.print_usage() + return parser + +# When invoked as main program, invoke the profiler on a script +if __name__ == '__main__': + main() diff --git a/python310/calendar.py b/python310/calendar.py new file mode 100644 index 0000000000000000000000000000000000000000..cbea9ec99f550f3b87bfb9596493b2d7b75dc4c8 --- /dev/null +++ b/python310/calendar.py @@ -0,0 +1,759 @@ +"""Calendar printing functions + +Note when comparing these calendars to the ones printed by cal(1): By +default, these calendars have Monday as the first day of the week, and +Sunday as the last (the European convention). Use setfirstweekday() to +set the first day of the week (0=Monday, 6=Sunday).""" + +import sys +import datetime +import locale as _locale +from itertools import repeat + +__all__ = ["IllegalMonthError", "IllegalWeekdayError", "setfirstweekday", + "firstweekday", "isleap", "leapdays", "weekday", "monthrange", + "monthcalendar", "prmonth", "month", "prcal", "calendar", + "timegm", "month_name", "month_abbr", "day_name", "day_abbr", + "Calendar", "TextCalendar", "HTMLCalendar", "LocaleTextCalendar", + "LocaleHTMLCalendar", "weekheader", + "MONDAY", "TUESDAY", "WEDNESDAY", "THURSDAY", "FRIDAY", + "SATURDAY", "SUNDAY"] + +# Exception raised for bad input (with string parameter for details) +error = ValueError + +# Exceptions raised for bad input +class IllegalMonthError(ValueError): + def __init__(self, month): + self.month = month + def __str__(self): + return "bad month number %r; must be 1-12" % self.month + + +class IllegalWeekdayError(ValueError): + def __init__(self, weekday): + self.weekday = weekday + def __str__(self): + return "bad weekday number %r; must be 0 (Monday) to 6 (Sunday)" % self.weekday + + +# Constants for months referenced later +January = 1 +February = 2 + +# Number of days per month (except for February in leap years) +mdays = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31] + +# This module used to have hard-coded lists of day and month names, as +# English strings. The classes following emulate a read-only version of +# that, but supply localized names. Note that the values are computed +# fresh on each call, in case the user changes locale between calls. + +class _localized_month: + + _months = [datetime.date(2001, i+1, 1).strftime for i in range(12)] + _months.insert(0, lambda x: "") + + def __init__(self, format): + self.format = format + + def __getitem__(self, i): + funcs = self._months[i] + if isinstance(i, slice): + return [f(self.format) for f in funcs] + else: + return funcs(self.format) + + def __len__(self): + return 13 + + +class _localized_day: + + # January 1, 2001, was a Monday. + _days = [datetime.date(2001, 1, i+1).strftime for i in range(7)] + + def __init__(self, format): + self.format = format + + def __getitem__(self, i): + funcs = self._days[i] + if isinstance(i, slice): + return [f(self.format) for f in funcs] + else: + return funcs(self.format) + + def __len__(self): + return 7 + + +# Full and abbreviated names of weekdays +day_name = _localized_day('%A') +day_abbr = _localized_day('%a') + +# Full and abbreviated names of months (1-based arrays!!!) +month_name = _localized_month('%B') +month_abbr = _localized_month('%b') + +# Constants for weekdays +(MONDAY, TUESDAY, WEDNESDAY, THURSDAY, FRIDAY, SATURDAY, SUNDAY) = range(7) + + +def isleap(year): + """Return True for leap years, False for non-leap years.""" + return year % 4 == 0 and (year % 100 != 0 or year % 400 == 0) + + +def leapdays(y1, y2): + """Return number of leap years in range [y1, y2). + Assume y1 <= y2.""" + y1 -= 1 + y2 -= 1 + return (y2//4 - y1//4) - (y2//100 - y1//100) + (y2//400 - y1//400) + + +def weekday(year, month, day): + """Return weekday (0-6 ~ Mon-Sun) for year, month (1-12), day (1-31).""" + if not datetime.MINYEAR <= year <= datetime.MAXYEAR: + year = 2000 + year % 400 + return datetime.date(year, month, day).weekday() + + +def monthrange(year, month): + """Return weekday (0-6 ~ Mon-Sun) and number of days (28-31) for + year, month.""" + if not 1 <= month <= 12: + raise IllegalMonthError(month) + day1 = weekday(year, month, 1) + ndays = mdays[month] + (month == February and isleap(year)) + return day1, ndays + + +def _monthlen(year, month): + return mdays[month] + (month == February and isleap(year)) + + +def _prevmonth(year, month): + if month == 1: + return year-1, 12 + else: + return year, month-1 + + +def _nextmonth(year, month): + if month == 12: + return year+1, 1 + else: + return year, month+1 + + +class Calendar(object): + """ + Base calendar class. This class doesn't do any formatting. It simply + provides data to subclasses. + """ + + def __init__(self, firstweekday=0): + self.firstweekday = firstweekday # 0 = Monday, 6 = Sunday + + def getfirstweekday(self): + return self._firstweekday % 7 + + def setfirstweekday(self, firstweekday): + self._firstweekday = firstweekday + + firstweekday = property(getfirstweekday, setfirstweekday) + + def iterweekdays(self): + """ + Return an iterator for one week of weekday numbers starting with the + configured first one. + """ + for i in range(self.firstweekday, self.firstweekday + 7): + yield i%7 + + def itermonthdates(self, year, month): + """ + Return an iterator for one month. The iterator will yield datetime.date + values and will always iterate through complete weeks, so it will yield + dates outside the specified month. + """ + for y, m, d in self.itermonthdays3(year, month): + yield datetime.date(y, m, d) + + def itermonthdays(self, year, month): + """ + Like itermonthdates(), but will yield day numbers. For days outside + the specified month the day number is 0. + """ + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + yield from repeat(0, days_before) + yield from range(1, ndays + 1) + days_after = (self.firstweekday - day1 - ndays) % 7 + yield from repeat(0, days_after) + + def itermonthdays2(self, year, month): + """ + Like itermonthdates(), but will yield (day number, weekday number) + tuples. For days outside the specified month the day number is 0. + """ + for i, d in enumerate(self.itermonthdays(year, month), self.firstweekday): + yield d, i % 7 + + def itermonthdays3(self, year, month): + """ + Like itermonthdates(), but will yield (year, month, day) tuples. Can be + used for dates outside of datetime.date range. + """ + day1, ndays = monthrange(year, month) + days_before = (day1 - self.firstweekday) % 7 + days_after = (self.firstweekday - day1 - ndays) % 7 + y, m = _prevmonth(year, month) + end = _monthlen(y, m) + 1 + for d in range(end-days_before, end): + yield y, m, d + for d in range(1, ndays + 1): + yield year, month, d + y, m = _nextmonth(year, month) + for d in range(1, days_after + 1): + yield y, m, d + + def itermonthdays4(self, year, month): + """ + Like itermonthdates(), but will yield (year, month, day, day_of_week) tuples. + Can be used for dates outside of datetime.date range. + """ + for i, (y, m, d) in enumerate(self.itermonthdays3(year, month)): + yield y, m, d, (self.firstweekday + i) % 7 + + def monthdatescalendar(self, year, month): + """ + Return a matrix (list of lists) representing a month's calendar. + Each row represents a week; week entries are datetime.date values. + """ + dates = list(self.itermonthdates(year, month)) + return [ dates[i:i+7] for i in range(0, len(dates), 7) ] + + def monthdays2calendar(self, year, month): + """ + Return a matrix representing a month's calendar. + Each row represents a week; week entries are + (day number, weekday number) tuples. Day numbers outside this month + are zero. + """ + days = list(self.itermonthdays2(year, month)) + return [ days[i:i+7] for i in range(0, len(days), 7) ] + + def monthdayscalendar(self, year, month): + """ + Return a matrix representing a month's calendar. + Each row represents a week; days outside this month are zero. + """ + days = list(self.itermonthdays(year, month)) + return [ days[i:i+7] for i in range(0, len(days), 7) ] + + def yeardatescalendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting. The return + value is a list of month rows. Each month row contains up to width months. + Each month contains between 4 and 6 weeks and each week contains 1-7 + days. Days are datetime.date objects. + """ + months = [ + self.monthdatescalendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + def yeardays2calendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are + (day number, weekday number) tuples. Day numbers outside this month are + zero. + """ + months = [ + self.monthdays2calendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + def yeardayscalendar(self, year, width=3): + """ + Return the data for the specified year ready for formatting (similar to + yeardatescalendar()). Entries in the week lists are day numbers. + Day numbers outside this month are zero. + """ + months = [ + self.monthdayscalendar(year, i) + for i in range(January, January+12) + ] + return [months[i:i+width] for i in range(0, len(months), width) ] + + +class TextCalendar(Calendar): + """ + Subclass of Calendar that outputs a calendar as a simple plain text + similar to the UNIX program cal. + """ + + def prweek(self, theweek, width): + """ + Print a single week (no newline). + """ + print(self.formatweek(theweek, width), end='') + + def formatday(self, day, weekday, width): + """ + Returns a formatted day. + """ + if day == 0: + s = '' + else: + s = '%2i' % day # right-align single-digit days + return s.center(width) + + def formatweek(self, theweek, width): + """ + Returns a single week in a string (no newline). + """ + return ' '.join(self.formatday(d, wd, width) for (d, wd) in theweek) + + def formatweekday(self, day, width): + """ + Returns a formatted week day name. + """ + if width >= 9: + names = day_name + else: + names = day_abbr + return names[day][:width].center(width) + + def formatweekheader(self, width): + """ + Return a header for a week. + """ + return ' '.join(self.formatweekday(i, width) for i in self.iterweekdays()) + + def formatmonthname(self, theyear, themonth, width, withyear=True): + """ + Return a formatted month name. + """ + s = month_name[themonth] + if withyear: + s = "%s %r" % (s, theyear) + return s.center(width) + + def prmonth(self, theyear, themonth, w=0, l=0): + """ + Print a month's calendar. + """ + print(self.formatmonth(theyear, themonth, w, l), end='') + + def formatmonth(self, theyear, themonth, w=0, l=0): + """ + Return a month's calendar string (multi-line). + """ + w = max(2, w) + l = max(1, l) + s = self.formatmonthname(theyear, themonth, 7 * (w + 1) - 1) + s = s.rstrip() + s += '\n' * l + s += self.formatweekheader(w).rstrip() + s += '\n' * l + for week in self.monthdays2calendar(theyear, themonth): + s += self.formatweek(week, w).rstrip() + s += '\n' * l + return s + + def formatyear(self, theyear, w=2, l=1, c=6, m=3): + """ + Returns a year's calendar as a multi-line string. + """ + w = max(2, w) + l = max(1, l) + c = max(2, c) + colwidth = (w + 1) * 7 - 1 + v = [] + a = v.append + a(repr(theyear).center(colwidth*m+c*(m-1)).rstrip()) + a('\n'*l) + header = self.formatweekheader(w) + for (i, row) in enumerate(self.yeardays2calendar(theyear, m)): + # months in this row + months = range(m*i+1, min(m*(i+1)+1, 13)) + a('\n'*l) + names = (self.formatmonthname(theyear, k, colwidth, False) + for k in months) + a(formatstring(names, colwidth, c).rstrip()) + a('\n'*l) + headers = (header for k in months) + a(formatstring(headers, colwidth, c).rstrip()) + a('\n'*l) + # max number of weeks for this row + height = max(len(cal) for cal in row) + for j in range(height): + weeks = [] + for cal in row: + if j >= len(cal): + weeks.append('') + else: + weeks.append(self.formatweek(cal[j], w)) + a(formatstring(weeks, colwidth, c).rstrip()) + a('\n' * l) + return ''.join(v) + + def pryear(self, theyear, w=0, l=0, c=6, m=3): + """Print a year's calendar.""" + print(self.formatyear(theyear, w, l, c, m), end='') + + +class HTMLCalendar(Calendar): + """ + This calendar returns complete HTML pages. + """ + + # CSS classes for the day s + cssclasses = ["mon", "tue", "wed", "thu", "fri", "sat", "sun"] + + # CSS classes for the day s + cssclasses_weekday_head = cssclasses + + # CSS class for the days before and after current month + cssclass_noday = "noday" + + # CSS class for the month's head + cssclass_month_head = "month" + + # CSS class for the month + cssclass_month = "month" + + # CSS class for the year's table head + cssclass_year_head = "year" + + # CSS class for the whole year table + cssclass_year = "year" + + def formatday(self, day, weekday): + """ + Return a day as a table cell. + """ + if day == 0: + # day outside month + return ' ' % self.cssclass_noday + else: + return '%d' % (self.cssclasses[weekday], day) + + def formatweek(self, theweek): + """ + Return a complete week as a table row. + """ + s = ''.join(self.formatday(d, wd) for (d, wd) in theweek) + return '%s' % s + + def formatweekday(self, day): + """ + Return a weekday name as a table header. + """ + return '%s' % ( + self.cssclasses_weekday_head[day], day_abbr[day]) + + def formatweekheader(self): + """ + Return a header for a week as a table row. + """ + s = ''.join(self.formatweekday(i) for i in self.iterweekdays()) + return '%s' % s + + def formatmonthname(self, theyear, themonth, withyear=True): + """ + Return a month name as a table row. + """ + if withyear: + s = '%s %s' % (month_name[themonth], theyear) + else: + s = '%s' % month_name[themonth] + return '%s' % ( + self.cssclass_month_head, s) + + def formatmonth(self, theyear, themonth, withyear=True): + """ + Return a formatted month as a table. + """ + v = [] + a = v.append + a('' % ( + self.cssclass_month)) + a('\n') + a(self.formatmonthname(theyear, themonth, withyear=withyear)) + a('\n') + a(self.formatweekheader()) + a('\n') + for week in self.monthdays2calendar(theyear, themonth): + a(self.formatweek(week)) + a('\n') + a('
') + a('\n') + return ''.join(v) + + def formatyear(self, theyear, width=3): + """ + Return a formatted year as a table of tables. + """ + v = [] + a = v.append + width = max(width, 1) + a('' % + self.cssclass_year) + a('\n') + a('' % ( + width, self.cssclass_year_head, theyear)) + for i in range(January, January+12, width): + # months in this row + months = range(i, min(i+width, 13)) + a('') + for m in months: + a('') + a('') + a('
%s
') + a(self.formatmonth(theyear, m, withyear=False)) + a('
') + return ''.join(v) + + def formatyearpage(self, theyear, width=3, css='calendar.css', encoding=None): + """ + Return a formatted year as a complete HTML page. + """ + if encoding is None: + encoding = sys.getdefaultencoding() + v = [] + a = v.append + a('\n' % encoding) + a('\n') + a('\n') + a('\n') + a('\n' % encoding) + if css is not None: + a('\n' % css) + a('Calendar for %d\n' % theyear) + a('\n') + a('\n') + a(self.formatyear(theyear, width)) + a('\n') + a('\n') + return ''.join(v).encode(encoding, "xmlcharrefreplace") + + +class different_locale: + def __init__(self, locale): + self.locale = locale + + def __enter__(self): + self.oldlocale = _locale.getlocale(_locale.LC_TIME) + _locale.setlocale(_locale.LC_TIME, self.locale) + + def __exit__(self, *args): + _locale.setlocale(_locale.LC_TIME, self.oldlocale) + + +class LocaleTextCalendar(TextCalendar): + """ + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + """ + + def __init__(self, firstweekday=0, locale=None): + TextCalendar.__init__(self, firstweekday) + if locale is None: + locale = _locale.getdefaultlocale() + self.locale = locale + + def formatweekday(self, day, width): + with different_locale(self.locale): + return super().formatweekday(day, width) + + def formatmonthname(self, theyear, themonth, width, withyear=True): + with different_locale(self.locale): + return super().formatmonthname(theyear, themonth, width, withyear) + + +class LocaleHTMLCalendar(HTMLCalendar): + """ + This class can be passed a locale name in the constructor and will return + month and weekday names in the specified locale. If this locale includes + an encoding all strings containing month and weekday names will be returned + as unicode. + """ + def __init__(self, firstweekday=0, locale=None): + HTMLCalendar.__init__(self, firstweekday) + if locale is None: + locale = _locale.getdefaultlocale() + self.locale = locale + + def formatweekday(self, day): + with different_locale(self.locale): + return super().formatweekday(day) + + def formatmonthname(self, theyear, themonth, withyear=True): + with different_locale(self.locale): + return super().formatmonthname(theyear, themonth, withyear) + +# Support for old module level interface +c = TextCalendar() + +firstweekday = c.getfirstweekday + +def setfirstweekday(firstweekday): + if not MONDAY <= firstweekday <= SUNDAY: + raise IllegalWeekdayError(firstweekday) + c.firstweekday = firstweekday + +monthcalendar = c.monthdayscalendar +prweek = c.prweek +week = c.formatweek +weekheader = c.formatweekheader +prmonth = c.prmonth +month = c.formatmonth +calendar = c.formatyear +prcal = c.pryear + + +# Spacing of month columns for multi-column year calendar +_colwidth = 7*3 - 1 # Amount printed by prweek() +_spacing = 6 # Number of spaces between columns + + +def format(cols, colwidth=_colwidth, spacing=_spacing): + """Prints multi-column formatting for year calendars""" + print(formatstring(cols, colwidth, spacing)) + + +def formatstring(cols, colwidth=_colwidth, spacing=_spacing): + """Returns a string formatted from n strings, centered within n columns.""" + spacing *= ' ' + return spacing.join(c.center(colwidth) for c in cols) + + +EPOCH = 1970 +_EPOCH_ORD = datetime.date(EPOCH, 1, 1).toordinal() + + +def timegm(tuple): + """Unrelated but handy function to calculate Unix timestamp from GMT.""" + year, month, day, hour, minute, second = tuple[:6] + days = datetime.date(year, month, 1).toordinal() - _EPOCH_ORD + day - 1 + hours = days*24 + hour + minutes = hours*60 + minute + seconds = minutes*60 + second + return seconds + + +def main(args): + import argparse + parser = argparse.ArgumentParser() + textgroup = parser.add_argument_group('text only arguments') + htmlgroup = parser.add_argument_group('html only arguments') + textgroup.add_argument( + "-w", "--width", + type=int, default=2, + help="width of date column (default 2)" + ) + textgroup.add_argument( + "-l", "--lines", + type=int, default=1, + help="number of lines for each week (default 1)" + ) + textgroup.add_argument( + "-s", "--spacing", + type=int, default=6, + help="spacing between months (default 6)" + ) + textgroup.add_argument( + "-m", "--months", + type=int, default=3, + help="months per row (default 3)" + ) + htmlgroup.add_argument( + "-c", "--css", + default="calendar.css", + help="CSS to use for page" + ) + parser.add_argument( + "-L", "--locale", + default=None, + help="locale to be used from month and weekday names" + ) + parser.add_argument( + "-e", "--encoding", + default=None, + help="encoding to use for output" + ) + parser.add_argument( + "-t", "--type", + default="text", + choices=("text", "html"), + help="output type (text or html)" + ) + parser.add_argument( + "year", + nargs='?', type=int, + help="year number (1-9999)" + ) + parser.add_argument( + "month", + nargs='?', type=int, + help="month number (1-12, text only)" + ) + + options = parser.parse_args(args[1:]) + + if options.locale and not options.encoding: + parser.error("if --locale is specified --encoding is required") + sys.exit(1) + + locale = options.locale, options.encoding + + if options.type == "html": + if options.locale: + cal = LocaleHTMLCalendar(locale=locale) + else: + cal = HTMLCalendar() + encoding = options.encoding + if encoding is None: + encoding = sys.getdefaultencoding() + optdict = dict(encoding=encoding, css=options.css) + write = sys.stdout.buffer.write + if options.year is None: + write(cal.formatyearpage(datetime.date.today().year, **optdict)) + elif options.month is None: + write(cal.formatyearpage(options.year, **optdict)) + else: + parser.error("incorrect number of arguments") + sys.exit(1) + else: + if options.locale: + cal = LocaleTextCalendar(locale=locale) + else: + cal = TextCalendar() + optdict = dict(w=options.width, l=options.lines) + if options.month is None: + optdict["c"] = options.spacing + optdict["m"] = options.months + if options.year is None: + result = cal.formatyear(datetime.date.today().year, **optdict) + elif options.month is None: + result = cal.formatyear(options.year, **optdict) + else: + result = cal.formatmonth(options.year, options.month, **optdict) + write = sys.stdout.write + if options.encoding: + result = result.encode(options.encoding) + write = sys.stdout.buffer.write + write(result) + + +if __name__ == "__main__": + main(sys.argv) diff --git a/python310/cgi.py b/python310/cgi.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb8cf28bd66457ef05a8cc19bb53b8f2afbb780 --- /dev/null +++ b/python310/cgi.py @@ -0,0 +1,1004 @@ +#! /usr/local/bin/python + +# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is +# intentionally NOT "/usr/bin/env python". On many systems +# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI +# scripts, and /usr/local/bin is the default directory where Python is +# installed, so /usr/bin/env would be unable to find python. Granted, +# binary installations by Linux vendors often install Python in +# /usr/bin. So let those vendors patch cgi.py to match their choice +# of installation. + +"""Support module for CGI (Common Gateway Interface) scripts. + +This module defines a number of utilities for use by CGI scripts +written in Python. +""" + +# History +# ------- +# +# Michael McLay started this module. Steve Majewski changed the +# interface to SvFormContentDict and FormContentDict. The multipart +# parsing was inspired by code submitted by Andreas Paepcke. Guido van +# Rossum rewrote, reformatted and documented the module and is currently +# responsible for its maintenance. +# + +__version__ = "2.6" + + +# Imports +# ======= + +from io import StringIO, BytesIO, TextIOWrapper +from collections.abc import Mapping +import sys +import os +import urllib.parse +from email.parser import FeedParser +from email.message import Message +import html +import locale +import tempfile +import warnings + +__all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart", + "parse_header", "test", "print_exception", "print_environ", + "print_form", "print_directory", "print_arguments", + "print_environ_usage"] + +# Logging support +# =============== + +logfile = "" # Filename to log to, if not empty +logfp = None # File object to log to, if not None + +def initlog(*allargs): + """Write a log message, if there is a log file. + + Even though this function is called initlog(), you should always + use log(); log is a variable that is set either to initlog + (initially), to dolog (once the log file has been opened), or to + nolog (when logging is disabled). + + The first argument is a format string; the remaining arguments (if + any) are arguments to the % operator, so e.g. + log("%s: %s", "a", "b") + will write "a: b" to the log file, followed by a newline. + + If the global logfp is not None, it should be a file object to + which log data is written. + + If the global logfp is None, the global logfile may be a string + giving a filename to open, in append mode. This file should be + world writable!!! If the file can't be opened, logging is + silently disabled (since there is no safe place where we could + send an error message). + + """ + global log, logfile, logfp + warnings.warn("cgi.log() is deprecated as of 3.10. Use logging instead", + DeprecationWarning, stacklevel=2) + if logfile and not logfp: + try: + logfp = open(logfile, "a", encoding="locale") + except OSError: + pass + if not logfp: + log = nolog + else: + log = dolog + log(*allargs) + +def dolog(fmt, *args): + """Write a log message to the log file. See initlog() for docs.""" + logfp.write(fmt%args + "\n") + +def nolog(*allargs): + """Dummy function, assigned to log when logging is disabled.""" + pass + +def closelog(): + """Close the log file.""" + global log, logfile, logfp + logfile = '' + if logfp: + logfp.close() + logfp = None + log = initlog + +log = initlog # The current logging function + + +# Parsing functions +# ================= + +# Maximum input we will accept when REQUEST_METHOD is POST +# 0 ==> unlimited input +maxlen = 0 + +def parse(fp=None, environ=os.environ, keep_blank_values=0, + strict_parsing=0, separator='&'): + """Parse a query in the environment or from a file (default stdin) + + Arguments, all optional: + + fp : file pointer; default: sys.stdin.buffer + + environ : environment dictionary; default: os.environ + + keep_blank_values: flag indicating whether blank values in + percent-encoded forms should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + """ + if fp is None: + fp = sys.stdin + + # field keys and values (except for files) are returned as strings + # an encoding is required to decode the bytes read from self.fp + if hasattr(fp,'encoding'): + encoding = fp.encoding + else: + encoding = 'latin-1' + + # fp.read() must return bytes + if isinstance(fp, TextIOWrapper): + fp = fp.buffer + + if not 'REQUEST_METHOD' in environ: + environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone + if environ['REQUEST_METHOD'] == 'POST': + ctype, pdict = parse_header(environ['CONTENT_TYPE']) + if ctype == 'multipart/form-data': + return parse_multipart(fp, pdict, separator=separator) + elif ctype == 'application/x-www-form-urlencoded': + clength = int(environ['CONTENT_LENGTH']) + if maxlen and clength > maxlen: + raise ValueError('Maximum content length exceeded') + qs = fp.read(clength).decode(encoding) + else: + qs = '' # Unknown content-type + if 'QUERY_STRING' in environ: + if qs: qs = qs + '&' + qs = qs + environ['QUERY_STRING'] + elif sys.argv[1:]: + if qs: qs = qs + '&' + qs = qs + sys.argv[1] + environ['QUERY_STRING'] = qs # XXX Shouldn't, really + elif 'QUERY_STRING' in environ: + qs = environ['QUERY_STRING'] + else: + if sys.argv[1:]: + qs = sys.argv[1] + else: + qs = "" + environ['QUERY_STRING'] = qs # XXX Shouldn't, really + return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing, + encoding=encoding, separator=separator) + + +def parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator='&'): + """Parse multipart input. + + Arguments: + fp : input file + pdict: dictionary containing other parameters of content-type header + encoding, errors: request encoding and error handler, passed to + FieldStorage + + Returns a dictionary just like parse_qs(): keys are the field names, each + value is a list of values for that field. For non-file fields, the value + is a list of strings. + """ + # RFC 2046, Section 5.1 : The "multipart" boundary delimiters are always + # represented as 7bit US-ASCII. + boundary = pdict['boundary'].decode('ascii') + ctype = "multipart/form-data; boundary={}".format(boundary) + headers = Message() + headers.set_type(ctype) + try: + headers['Content-Length'] = pdict['CONTENT-LENGTH'] + except KeyError: + pass + fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors, + environ={'REQUEST_METHOD': 'POST'}, separator=separator) + return {k: fs.getlist(k) for k in fs} + +def _parseparam(s): + while s[:1] == ';': + s = s[1:] + end = s.find(';') + while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: + end = s.find(';', end + 1) + if end < 0: + end = len(s) + f = s[:end] + yield f.strip() + s = s[end:] + +def parse_header(line): + """Parse a Content-type like header. + + Return the main content-type and a dictionary of options. + + """ + parts = _parseparam(';' + line) + key = parts.__next__() + pdict = {} + for p in parts: + i = p.find('=') + if i >= 0: + name = p[:i].strip().lower() + value = p[i+1:].strip() + if len(value) >= 2 and value[0] == value[-1] == '"': + value = value[1:-1] + value = value.replace('\\\\', '\\').replace('\\"', '"') + pdict[name] = value + return key, pdict + + +# Classes for field storage +# ========================= + +class MiniFieldStorage: + + """Like FieldStorage, for use when no file uploads are possible.""" + + # Dummy attributes + filename = None + list = None + type = None + file = None + type_options = {} + disposition = None + disposition_options = {} + headers = {} + + def __init__(self, name, value): + """Constructor from field name and value.""" + self.name = name + self.value = value + # self.file = StringIO(value) + + def __repr__(self): + """Return printable representation.""" + return "MiniFieldStorage(%r, %r)" % (self.name, self.value) + + +class FieldStorage: + + """Store a sequence of fields, reading multipart/form-data. + + This class provides naming, typing, files stored on disk, and + more. At the top level, it is accessible like a dictionary, whose + keys are the field names. (Note: None can occur as a field name.) + The items are either a Python list (if there's multiple values) or + another FieldStorage or MiniFieldStorage object. If it's a single + object, it has the following attributes: + + name: the field name, if specified; otherwise None + + filename: the filename, if specified; otherwise None; this is the + client side filename, *not* the file name on which it is + stored (that's a temporary file you don't deal with) + + value: the value as a *string*; for file uploads, this + transparently reads the file every time you request the value + and returns *bytes* + + file: the file(-like) object from which you can read the data *as + bytes* ; None if the data is stored a simple string + + type: the content-type, or None if not specified + + type_options: dictionary of options specified on the content-type + line + + disposition: content-disposition, or None if not specified + + disposition_options: dictionary of corresponding options + + headers: a dictionary(-like) object (sometimes email.message.Message or a + subclass thereof) containing *all* headers + + The class is subclassable, mostly for the purpose of overriding + the make_file() method, which is called internally to come up with + a file open for reading and writing. This makes it possible to + override the default choice of storing all files in a temporary + directory and unlinking them as soon as they have been opened. + + """ + def __init__(self, fp=None, headers=None, outerboundary=b'', + environ=os.environ, keep_blank_values=0, strict_parsing=0, + limit=None, encoding='utf-8', errors='replace', + max_num_fields=None, separator='&'): + """Constructor. Read multipart/* until last part. + + Arguments, all optional: + + fp : file pointer; default: sys.stdin.buffer + (not used when the request method is GET) + Can be : + 1. a TextIOWrapper object + 2. an object whose read() and readline() methods return bytes + + headers : header dictionary-like object; default: + taken from environ as per CGI spec + + outerboundary : terminating multipart boundary + (for internal use only) + + environ : environment dictionary; default: os.environ + + keep_blank_values: flag indicating whether blank values in + percent-encoded forms should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + limit : used internally to read parts of multipart/form-data forms, + to exit from the reading loop when reached. It is the difference + between the form content-length and the number of bytes already + read + + encoding, errors : the encoding and error handler used to decode the + binary stream to strings. Must be the same as the charset defined + for the page sending the form (content-type : meta http-equiv or + header) + + max_num_fields: int. If set, then __init__ throws a ValueError + if there are more than n fields read by parse_qsl(). + + """ + method = 'GET' + self.keep_blank_values = keep_blank_values + self.strict_parsing = strict_parsing + self.max_num_fields = max_num_fields + self.separator = separator + if 'REQUEST_METHOD' in environ: + method = environ['REQUEST_METHOD'].upper() + self.qs_on_post = None + if method == 'GET' or method == 'HEAD': + if 'QUERY_STRING' in environ: + qs = environ['QUERY_STRING'] + elif sys.argv[1:]: + qs = sys.argv[1] + else: + qs = "" + qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape') + fp = BytesIO(qs) + if headers is None: + headers = {'content-type': + "application/x-www-form-urlencoded"} + if headers is None: + headers = {} + if method == 'POST': + # Set default content-type for POST to what's traditional + headers['content-type'] = "application/x-www-form-urlencoded" + if 'CONTENT_TYPE' in environ: + headers['content-type'] = environ['CONTENT_TYPE'] + if 'QUERY_STRING' in environ: + self.qs_on_post = environ['QUERY_STRING'] + if 'CONTENT_LENGTH' in environ: + headers['content-length'] = environ['CONTENT_LENGTH'] + else: + if not (isinstance(headers, (Mapping, Message))): + raise TypeError("headers must be mapping or an instance of " + "email.message.Message") + self.headers = headers + if fp is None: + self.fp = sys.stdin.buffer + # self.fp.read() must return bytes + elif isinstance(fp, TextIOWrapper): + self.fp = fp.buffer + else: + if not (hasattr(fp, 'read') and hasattr(fp, 'readline')): + raise TypeError("fp must be file pointer") + self.fp = fp + + self.encoding = encoding + self.errors = errors + + if not isinstance(outerboundary, bytes): + raise TypeError('outerboundary must be bytes, not %s' + % type(outerboundary).__name__) + self.outerboundary = outerboundary + + self.bytes_read = 0 + self.limit = limit + + # Process content-disposition header + cdisp, pdict = "", {} + if 'content-disposition' in self.headers: + cdisp, pdict = parse_header(self.headers['content-disposition']) + self.disposition = cdisp + self.disposition_options = pdict + self.name = None + if 'name' in pdict: + self.name = pdict['name'] + self.filename = None + if 'filename' in pdict: + self.filename = pdict['filename'] + self._binary_file = self.filename is not None + + # Process content-type header + # + # Honor any existing content-type header. But if there is no + # content-type header, use some sensible defaults. Assume + # outerboundary is "" at the outer level, but something non-false + # inside a multi-part. The default for an inner part is text/plain, + # but for an outer part it should be urlencoded. This should catch + # bogus clients which erroneously forget to include a content-type + # header. + # + # See below for what we do if there does exist a content-type header, + # but it happens to be something we don't understand. + if 'content-type' in self.headers: + ctype, pdict = parse_header(self.headers['content-type']) + elif self.outerboundary or method != 'POST': + ctype, pdict = "text/plain", {} + else: + ctype, pdict = 'application/x-www-form-urlencoded', {} + self.type = ctype + self.type_options = pdict + if 'boundary' in pdict: + self.innerboundary = pdict['boundary'].encode(self.encoding, + self.errors) + else: + self.innerboundary = b"" + + clen = -1 + if 'content-length' in self.headers: + try: + clen = int(self.headers['content-length']) + except ValueError: + pass + if maxlen and clen > maxlen: + raise ValueError('Maximum content length exceeded') + self.length = clen + if self.limit is None and clen >= 0: + self.limit = clen + + self.list = self.file = None + self.done = 0 + if ctype == 'application/x-www-form-urlencoded': + self.read_urlencoded() + elif ctype[:10] == 'multipart/': + self.read_multi(environ, keep_blank_values, strict_parsing) + else: + self.read_single() + + def __del__(self): + try: + self.file.close() + except AttributeError: + pass + + def __enter__(self): + return self + + def __exit__(self, *args): + self.file.close() + + def __repr__(self): + """Return a printable representation.""" + return "FieldStorage(%r, %r, %r)" % ( + self.name, self.filename, self.value) + + def __iter__(self): + return iter(self.keys()) + + def __getattr__(self, name): + if name != 'value': + raise AttributeError(name) + if self.file: + self.file.seek(0) + value = self.file.read() + self.file.seek(0) + elif self.list is not None: + value = self.list + else: + value = None + return value + + def __getitem__(self, key): + """Dictionary style indexing.""" + if self.list is None: + raise TypeError("not indexable") + found = [] + for item in self.list: + if item.name == key: found.append(item) + if not found: + raise KeyError(key) + if len(found) == 1: + return found[0] + else: + return found + + def getvalue(self, key, default=None): + """Dictionary style get() method, including 'value' lookup.""" + if key in self: + value = self[key] + if isinstance(value, list): + return [x.value for x in value] + else: + return value.value + else: + return default + + def getfirst(self, key, default=None): + """ Return the first value received.""" + if key in self: + value = self[key] + if isinstance(value, list): + return value[0].value + else: + return value.value + else: + return default + + def getlist(self, key): + """ Return list of received values.""" + if key in self: + value = self[key] + if isinstance(value, list): + return [x.value for x in value] + else: + return [value.value] + else: + return [] + + def keys(self): + """Dictionary style keys() method.""" + if self.list is None: + raise TypeError("not indexable") + return list(set(item.name for item in self.list)) + + def __contains__(self, key): + """Dictionary style __contains__ method.""" + if self.list is None: + raise TypeError("not indexable") + return any(item.name == key for item in self.list) + + def __len__(self): + """Dictionary style len(x) support.""" + return len(self.keys()) + + def __bool__(self): + if self.list is None: + raise TypeError("Cannot be converted to bool.") + return bool(self.list) + + def read_urlencoded(self): + """Internal: read data in query string format.""" + qs = self.fp.read(self.length) + if not isinstance(qs, bytes): + raise ValueError("%s should return bytes, got %s" \ + % (self.fp, type(qs).__name__)) + qs = qs.decode(self.encoding, self.errors) + if self.qs_on_post: + qs += '&' + self.qs_on_post + query = urllib.parse.parse_qsl( + qs, self.keep_blank_values, self.strict_parsing, + encoding=self.encoding, errors=self.errors, + max_num_fields=self.max_num_fields, separator=self.separator) + self.list = [MiniFieldStorage(key, value) for key, value in query] + self.skip_lines() + + FieldStorageClass = None + + def read_multi(self, environ, keep_blank_values, strict_parsing): + """Internal: read a part that is itself multipart.""" + ib = self.innerboundary + if not valid_boundary(ib): + raise ValueError('Invalid boundary in multipart form: %r' % (ib,)) + self.list = [] + if self.qs_on_post: + query = urllib.parse.parse_qsl( + self.qs_on_post, self.keep_blank_values, self.strict_parsing, + encoding=self.encoding, errors=self.errors, + max_num_fields=self.max_num_fields, separator=self.separator) + self.list.extend(MiniFieldStorage(key, value) for key, value in query) + + klass = self.FieldStorageClass or self.__class__ + first_line = self.fp.readline() # bytes + if not isinstance(first_line, bytes): + raise ValueError("%s should return bytes, got %s" \ + % (self.fp, type(first_line).__name__)) + self.bytes_read += len(first_line) + + # Ensure that we consume the file until we've hit our inner boundary + while (first_line.strip() != (b"--" + self.innerboundary) and + first_line): + first_line = self.fp.readline() + self.bytes_read += len(first_line) + + # Propagate max_num_fields into the sub class appropriately + max_num_fields = self.max_num_fields + if max_num_fields is not None: + max_num_fields -= len(self.list) + + while True: + parser = FeedParser() + hdr_text = b"" + while True: + data = self.fp.readline() + hdr_text += data + if not data.strip(): + break + if not hdr_text: + break + # parser takes strings, not bytes + self.bytes_read += len(hdr_text) + parser.feed(hdr_text.decode(self.encoding, self.errors)) + headers = parser.close() + + # Some clients add Content-Length for part headers, ignore them + if 'content-length' in headers: + del headers['content-length'] + + limit = None if self.limit is None \ + else self.limit - self.bytes_read + part = klass(self.fp, headers, ib, environ, keep_blank_values, + strict_parsing, limit, + self.encoding, self.errors, max_num_fields, self.separator) + + if max_num_fields is not None: + max_num_fields -= 1 + if part.list: + max_num_fields -= len(part.list) + if max_num_fields < 0: + raise ValueError('Max number of fields exceeded') + + self.bytes_read += part.bytes_read + self.list.append(part) + if part.done or self.bytes_read >= self.length > 0: + break + self.skip_lines() + + def read_single(self): + """Internal: read an atomic part.""" + if self.length >= 0: + self.read_binary() + self.skip_lines() + else: + self.read_lines() + self.file.seek(0) + + bufsize = 8*1024 # I/O buffering size for copy to file + + def read_binary(self): + """Internal: read binary data.""" + self.file = self.make_file() + todo = self.length + if todo >= 0: + while todo > 0: + data = self.fp.read(min(todo, self.bufsize)) # bytes + if not isinstance(data, bytes): + raise ValueError("%s should return bytes, got %s" + % (self.fp, type(data).__name__)) + self.bytes_read += len(data) + if not data: + self.done = -1 + break + self.file.write(data) + todo = todo - len(data) + + def read_lines(self): + """Internal: read lines until EOF or outerboundary.""" + if self._binary_file: + self.file = self.__file = BytesIO() # store data as bytes for files + else: + self.file = self.__file = StringIO() # as strings for other fields + if self.outerboundary: + self.read_lines_to_outerboundary() + else: + self.read_lines_to_eof() + + def __write(self, line): + """line is always bytes, not string""" + if self.__file is not None: + if self.__file.tell() + len(line) > 1000: + self.file = self.make_file() + data = self.__file.getvalue() + self.file.write(data) + self.__file = None + if self._binary_file: + # keep bytes + self.file.write(line) + else: + # decode to string + self.file.write(line.decode(self.encoding, self.errors)) + + def read_lines_to_eof(self): + """Internal: read lines until EOF.""" + while 1: + line = self.fp.readline(1<<16) # bytes + self.bytes_read += len(line) + if not line: + self.done = -1 + break + self.__write(line) + + def read_lines_to_outerboundary(self): + """Internal: read lines until outerboundary. + Data is read as bytes: boundaries and line ends must be converted + to bytes for comparisons. + """ + next_boundary = b"--" + self.outerboundary + last_boundary = next_boundary + b"--" + delim = b"" + last_line_lfend = True + _read = 0 + while 1: + + if self.limit is not None and 0 <= self.limit <= _read: + break + line = self.fp.readline(1<<16) # bytes + self.bytes_read += len(line) + _read += len(line) + if not line: + self.done = -1 + break + if delim == b"\r": + line = delim + line + delim = b"" + if line.startswith(b"--") and last_line_lfend: + strippedline = line.rstrip() + if strippedline == next_boundary: + break + if strippedline == last_boundary: + self.done = 1 + break + odelim = delim + if line.endswith(b"\r\n"): + delim = b"\r\n" + line = line[:-2] + last_line_lfend = True + elif line.endswith(b"\n"): + delim = b"\n" + line = line[:-1] + last_line_lfend = True + elif line.endswith(b"\r"): + # We may interrupt \r\n sequences if they span the 2**16 + # byte boundary + delim = b"\r" + line = line[:-1] + last_line_lfend = False + else: + delim = b"" + last_line_lfend = False + self.__write(odelim + line) + + def skip_lines(self): + """Internal: skip lines until outer boundary if defined.""" + if not self.outerboundary or self.done: + return + next_boundary = b"--" + self.outerboundary + last_boundary = next_boundary + b"--" + last_line_lfend = True + while True: + line = self.fp.readline(1<<16) + self.bytes_read += len(line) + if not line: + self.done = -1 + break + if line.endswith(b"--") and last_line_lfend: + strippedline = line.strip() + if strippedline == next_boundary: + break + if strippedline == last_boundary: + self.done = 1 + break + last_line_lfend = line.endswith(b'\n') + + def make_file(self): + """Overridable: return a readable & writable file. + + The file will be used as follows: + - data is written to it + - seek(0) + - data is read from it + + The file is opened in binary mode for files, in text mode + for other fields + + This version opens a temporary file for reading and writing, + and immediately deletes (unlinks) it. The trick (on Unix!) is + that the file can still be used, but it can't be opened by + another process, and it will automatically be deleted when it + is closed or when the current process terminates. + + If you want a more permanent file, you derive a class which + overrides this method. If you want a visible temporary file + that is nevertheless automatically deleted when the script + terminates, try defining a __del__ method in a derived class + which unlinks the temporary files you have created. + + """ + if self._binary_file: + return tempfile.TemporaryFile("wb+") + else: + return tempfile.TemporaryFile("w+", + encoding=self.encoding, newline = '\n') + + +# Test/debug code +# =============== + +def test(environ=os.environ): + """Robust test CGI script, usable as main program. + + Write minimal HTTP headers and dump all information provided to + the script in HTML form. + + """ + print("Content-type: text/html") + print() + sys.stderr = sys.stdout + try: + form = FieldStorage() # Replace with other classes to test those + print_directory() + print_arguments() + print_form(form) + print_environ(environ) + print_environ_usage() + def f(): + exec("testing print_exception() -- italics?") + def g(f=f): + f() + print("

What follows is a test, not an actual exception:

") + g() + except: + print_exception() + + print("

Second try with a small maxlen...

") + + global maxlen + maxlen = 50 + try: + form = FieldStorage() # Replace with other classes to test those + print_directory() + print_arguments() + print_form(form) + print_environ(environ) + except: + print_exception() + +def print_exception(type=None, value=None, tb=None, limit=None): + if type is None: + type, value, tb = sys.exc_info() + import traceback + print() + print("

Traceback (most recent call last):

") + list = traceback.format_tb(tb, limit) + \ + traceback.format_exception_only(type, value) + print("
%s%s
" % ( + html.escape("".join(list[:-1])), + html.escape(list[-1]), + )) + del tb + +def print_environ(environ=os.environ): + """Dump the shell environment as HTML.""" + keys = sorted(environ.keys()) + print() + print("

Shell Environment:

") + print("
") + for key in keys: + print("
", html.escape(key), "
", html.escape(environ[key])) + print("
") + print() + +def print_form(form): + """Dump the contents of a form as HTML.""" + keys = sorted(form.keys()) + print() + print("

Form Contents:

") + if not keys: + print("

No form fields.") + print("

") + for key in keys: + print("
" + html.escape(key) + ":", end=' ') + value = form[key] + print("" + html.escape(repr(type(value))) + "") + print("
" + html.escape(repr(value))) + print("
") + print() + +def print_directory(): + """Dump the current directory as HTML.""" + print() + print("

Current Working Directory:

") + try: + pwd = os.getcwd() + except OSError as msg: + print("OSError:", html.escape(str(msg))) + else: + print(html.escape(pwd)) + print() + +def print_arguments(): + print() + print("

Command Line Arguments:

") + print() + print(sys.argv) + print() + +def print_environ_usage(): + """Dump a list of environment variables used by CGI as HTML.""" + print(""" +

These environment variables could have been set:

+
    +
  • AUTH_TYPE +
  • CONTENT_LENGTH +
  • CONTENT_TYPE +
  • DATE_GMT +
  • DATE_LOCAL +
  • DOCUMENT_NAME +
  • DOCUMENT_ROOT +
  • DOCUMENT_URI +
  • GATEWAY_INTERFACE +
  • LAST_MODIFIED +
  • PATH +
  • PATH_INFO +
  • PATH_TRANSLATED +
  • QUERY_STRING +
  • REMOTE_ADDR +
  • REMOTE_HOST +
  • REMOTE_IDENT +
  • REMOTE_USER +
  • REQUEST_METHOD +
  • SCRIPT_NAME +
  • SERVER_NAME +
  • SERVER_PORT +
  • SERVER_PROTOCOL +
  • SERVER_ROOT +
  • SERVER_SOFTWARE +
+In addition, HTTP headers sent by the server may be passed in the +environment as well. Here are some common variable names: +
    +
  • HTTP_ACCEPT +
  • HTTP_CONNECTION +
  • HTTP_HOST +
  • HTTP_PRAGMA +
  • HTTP_REFERER +
  • HTTP_USER_AGENT +
+""") + + +# Utilities +# ========= + +def valid_boundary(s): + import re + if isinstance(s, bytes): + _vb_pattern = b"^[ -~]{0,200}[!-~]$" + else: + _vb_pattern = "^[ -~]{0,200}[!-~]$" + return re.match(_vb_pattern, s) + +# Invoke mainline +# =============== + +# Call test() when this file is run as a script (not imported as a module) +if __name__ == '__main__': + test() diff --git a/python310/cgitb.py b/python310/cgitb.py new file mode 100644 index 0000000000000000000000000000000000000000..17ddda376884dfb6ba2a1e7a315e333463644c25 --- /dev/null +++ b/python310/cgitb.py @@ -0,0 +1,321 @@ +"""More comprehensive traceback formatting for Python scripts. + +To enable this module, do: + + import cgitb; cgitb.enable() + +at the top of your script. The optional arguments to enable() are: + + display - if true, tracebacks are displayed in the web browser + logdir - if set, tracebacks are written to files in this directory + context - number of lines of source code to show for each stack frame + format - 'text' or 'html' controls the output format + +By default, tracebacks are displayed but not saved, the context is 5 lines +and the output format is 'html' (for backwards compatibility with the +original use of this module) + +Alternatively, if you have caught an exception and want cgitb to display it +for you, call cgitb.handler(). The optional argument to handler() is a +3-item tuple (etype, evalue, etb) just like the value of sys.exc_info(). +The default handler displays output as HTML. + +""" +import inspect +import keyword +import linecache +import os +import pydoc +import sys +import tempfile +import time +import tokenize +import traceback + +def reset(): + """Return a string that resets the CGI and browser to a known state.""" + return ''' + --> --> + + ''' + +__UNDEF__ = [] # a special sentinel object +def small(text): + if text: + return '' + text + '' + else: + return '' + +def strong(text): + if text: + return '' + text + '' + else: + return '' + +def grey(text): + if text: + return '' + text + '' + else: + return '' + +def lookup(name, frame, locals): + """Find the value for a given name in the given environment.""" + if name in locals: + return 'local', locals[name] + if name in frame.f_globals: + return 'global', frame.f_globals[name] + if '__builtins__' in frame.f_globals: + builtins = frame.f_globals['__builtins__'] + if type(builtins) is type({}): + if name in builtins: + return 'builtin', builtins[name] + else: + if hasattr(builtins, name): + return 'builtin', getattr(builtins, name) + return None, __UNDEF__ + +def scanvars(reader, frame, locals): + """Scan one logical line of Python and look up values of variables used.""" + vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__ + for ttype, token, start, end, line in tokenize.generate_tokens(reader): + if ttype == tokenize.NEWLINE: break + if ttype == tokenize.NAME and token not in keyword.kwlist: + if lasttoken == '.': + if parent is not __UNDEF__: + value = getattr(parent, token, __UNDEF__) + vars.append((prefix + token, prefix, value)) + else: + where, value = lookup(token, frame, locals) + vars.append((token, where, value)) + elif token == '.': + prefix += lasttoken + '.' + parent = value + else: + parent, prefix = None, '' + lasttoken = token + return vars + +def html(einfo, context=5): + """Return a nice HTML document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = '' + pydoc.html.heading( + '%s' % + strong(pydoc.html.escape(str(etype))), + '#ffffff', '#6622aa', pyver + '
' + date) + ''' +

A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred.

''' + + indent = '' + small(' ' * 5) + ' ' + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + if file: + file = os.path.abspath(file) + link = '%s' % (file, pydoc.html.escape(file)) + else: + file = link = '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + strong(pydoc.html.escape(func)) + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.html.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = ['%s%s %s' % + (' ', link, call)] + if index is not None: + i = lnum - index + for line in lines: + num = small(' ' * (5-len(str(i))) + str(i)) + ' ' + if i in highlight: + line = '=>%s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % line) + else: + line = '  %s%s' % (num, pydoc.html.preformat(line)) + rows.append('%s' % grey(line)) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where in ('global', 'builtin'): + name = ('%s ' % where) + strong(name) + elif where == 'local': + name = strong(name) + else: + name = where + strong(name.split('.')[-1]) + dump.append('%s = %s' % (name, pydoc.html.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('%s' % small(grey(', '.join(dump)))) + frames.append(''' + +%s
''' % '\n'.join(rows)) + + exception = ['

%s: %s' % (strong(pydoc.html.escape(str(etype))), + pydoc.html.escape(str(evalue)))] + for name in dir(evalue): + if name[:1] == '_': continue + value = pydoc.html.repr(getattr(evalue, name)) + exception.append('\n
%s%s =\n%s' % (indent, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + + + +''' % pydoc.html.escape( + ''.join(traceback.format_exception(etype, evalue, etb))) + +def text(einfo, context=5): + """Return a plain text document describing a given traceback.""" + etype, evalue, etb = einfo + if isinstance(etype, type): + etype = etype.__name__ + pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + date = time.ctime(time.time()) + head = "%s\n%s\n%s\n" % (str(etype), pyver, date) + ''' +A problem occurred in a Python script. Here is the sequence of +function calls leading up to the error, in the order they occurred. +''' + + frames = [] + records = inspect.getinnerframes(etb, context) + for frame, file, lnum, func, lines, index in records: + file = file and os.path.abspath(file) or '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = 'in ' + func + if func != "": + call += inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.text.repr(value)) + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = scanvars(reader, frame, locals) + + rows = [' %s %s' % (file, call)] + if index is not None: + i = lnum - index + for line in lines: + num = '%5d ' % i + rows.append(num+line.rstrip()) + i += 1 + + done, dump = {}, [] + for name, where, value in vars: + if name in done: continue + done[name] = 1 + if value is not __UNDEF__: + if where == 'global': name = 'global ' + name + elif where != 'local': name = where + name.split('.')[-1] + dump.append('%s = %s' % (name, pydoc.text.repr(value))) + else: + dump.append(name + ' undefined') + + rows.append('\n'.join(dump)) + frames.append('\n%s\n' % '\n'.join(rows)) + + exception = ['%s: %s' % (str(etype), str(evalue))] + for name in dir(evalue): + value = pydoc.text.repr(getattr(evalue, name)) + exception.append('\n%s%s = %s' % (" "*4, name, value)) + + return head + ''.join(frames) + ''.join(exception) + ''' + +The above is a description of an error in a Python program. Here is +the original traceback: + +%s +''' % ''.join(traceback.format_exception(etype, evalue, etb)) + +class Hook: + """A hook to replace sys.excepthook that shows tracebacks in HTML.""" + + def __init__(self, display=1, logdir=None, context=5, file=None, + format="html"): + self.display = display # send tracebacks to browser if true + self.logdir = logdir # log tracebacks to files if not None + self.context = context # number of source code lines per frame + self.file = file or sys.stdout # place to send the output + self.format = format + + def __call__(self, etype, evalue, etb): + self.handle((etype, evalue, etb)) + + def handle(self, info=None): + info = info or sys.exc_info() + if self.format == "html": + self.file.write(reset()) + + formatter = (self.format=="html") and html or text + plain = False + try: + doc = formatter(info, self.context) + except: # just in case something goes wrong + doc = ''.join(traceback.format_exception(*info)) + plain = True + + if self.display: + if plain: + doc = pydoc.html.escape(doc) + self.file.write('

' + doc + '
\n') + else: + self.file.write(doc + '\n') + else: + self.file.write('

A problem occurred in a Python script.\n') + + if self.logdir is not None: + suffix = ['.txt', '.html'][self.format=="html"] + (fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir) + + try: + with os.fdopen(fd, 'w') as file: + file.write(doc) + msg = '%s contains the description of this error.' % path + except: + msg = 'Tried to save traceback to %s, but failed.' % path + + if self.format == 'html': + self.file.write('

%s

\n' % msg) + else: + self.file.write(msg + '\n') + try: + self.file.flush() + except: pass + +handler = Hook().handle +def enable(display=1, logdir=None, context=5, format="html"): + """Install an exception handler that formats tracebacks as HTML. + + The optional argument 'display' can be set to 0 to suppress sending the + traceback to the browser, and 'logdir' can be set to a directory to cause + tracebacks to be written to files there.""" + sys.excepthook = Hook(display=display, logdir=logdir, + context=context, format=format) diff --git a/python310/chunk.py b/python310/chunk.py new file mode 100644 index 0000000000000000000000000000000000000000..870c39fe7f5037152dd5b430e6b6f3f7e5d7a8de --- /dev/null +++ b/python310/chunk.py @@ -0,0 +1,169 @@ +"""Simple class to read IFF chunks. + +An IFF chunk (used in formats such as AIFF, TIFF, RMFF (RealMedia File +Format)) has the following structure: + ++----------------+ +| ID (4 bytes) | ++----------------+ +| size (4 bytes) | ++----------------+ +| data | +| ... | ++----------------+ + +The ID is a 4-byte string which identifies the type of chunk. + +The size field (a 32-bit value, encoded using big-endian byte order) +gives the size of the whole chunk, including the 8-byte header. + +Usually an IFF-type file consists of one or more chunks. The proposed +usage of the Chunk class defined here is to instantiate an instance at +the start of each chunk and read from the instance until it reaches +the end, after which a new instance can be instantiated. At the end +of the file, creating a new instance will fail with an EOFError +exception. + +Usage: +while True: + try: + chunk = Chunk(file) + except EOFError: + break + chunktype = chunk.getname() + while True: + data = chunk.read(nbytes) + if not data: + pass + # do something with data + +The interface is file-like. The implemented methods are: +read, close, seek, tell, isatty. +Extra methods are: skip() (called by close, skips to the end of the chunk), +getname() (returns the name (ID) of the chunk) + +The __init__ method has one required argument, a file-like object +(including a chunk instance), and one optional argument, a flag which +specifies whether or not chunks are aligned on 2-byte boundaries. The +default is 1, i.e. aligned. +""" + +class Chunk: + def __init__(self, file, align=True, bigendian=True, inclheader=False): + import struct + self.closed = False + self.align = align # whether to align to word (2-byte) boundaries + if bigendian: + strflag = '>' + else: + strflag = '<' + self.file = file + self.chunkname = file.read(4) + if len(self.chunkname) < 4: + raise EOFError + try: + self.chunksize = struct.unpack_from(strflag+'L', file.read(4))[0] + except struct.error: + raise EOFError from None + if inclheader: + self.chunksize = self.chunksize - 8 # subtract header + self.size_read = 0 + try: + self.offset = self.file.tell() + except (AttributeError, OSError): + self.seekable = False + else: + self.seekable = True + + def getname(self): + """Return the name (ID) of the current chunk.""" + return self.chunkname + + def getsize(self): + """Return the size of the current chunk.""" + return self.chunksize + + def close(self): + if not self.closed: + try: + self.skip() + finally: + self.closed = True + + def isatty(self): + if self.closed: + raise ValueError("I/O operation on closed file") + return False + + def seek(self, pos, whence=0): + """Seek to specified position into the chunk. + Default position is 0 (start of chunk). + If the file is not seekable, this will result in an error. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if not self.seekable: + raise OSError("cannot seek") + if whence == 1: + pos = pos + self.size_read + elif whence == 2: + pos = pos + self.chunksize + if pos < 0 or pos > self.chunksize: + raise RuntimeError + self.file.seek(self.offset + pos, 0) + self.size_read = pos + + def tell(self): + if self.closed: + raise ValueError("I/O operation on closed file") + return self.size_read + + def read(self, size=-1): + """Read at most size bytes from the chunk. + If size is omitted or negative, read until the end + of the chunk. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if self.size_read >= self.chunksize: + return b'' + if size < 0: + size = self.chunksize - self.size_read + if size > self.chunksize - self.size_read: + size = self.chunksize - self.size_read + data = self.file.read(size) + self.size_read = self.size_read + len(data) + if self.size_read == self.chunksize and \ + self.align and \ + (self.chunksize & 1): + dummy = self.file.read(1) + self.size_read = self.size_read + len(dummy) + return data + + def skip(self): + """Skip the rest of the chunk. + If you are not interested in the contents of the chunk, + this method should be called so that the file points to + the start of the next chunk. + """ + + if self.closed: + raise ValueError("I/O operation on closed file") + if self.seekable: + try: + n = self.chunksize - self.size_read + # maybe fix alignment + if self.align and (self.chunksize & 1): + n = n + 1 + self.file.seek(n, 1) + self.size_read = self.size_read + n + return + except OSError: + pass + while self.size_read < self.chunksize: + n = min(8192, self.chunksize - self.size_read) + dummy = self.read(n) + if not dummy: + raise EOFError diff --git a/python310/cmd.py b/python310/cmd.py new file mode 100644 index 0000000000000000000000000000000000000000..859e91096d8f57d906c00023ef1a1c0e663178d6 --- /dev/null +++ b/python310/cmd.py @@ -0,0 +1,401 @@ +"""A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +""" + +import string, sys + +__all__ = ["Cmd"] + +PROMPT = '(Cmd) ' +IDENTCHARS = string.ascii_letters + string.digits + '_' + +class Cmd: + """A simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + """ + prompt = PROMPT + identchars = IDENTCHARS + ruler = '=' + lastcmd = '' + intro = None + doc_leader = "" + doc_header = "Documented commands (type help ):" + misc_header = "Miscellaneous help topics:" + undoc_header = "Undocumented commands:" + nohelp = "*** No help on %s" + use_rawinput = 1 + + def __init__(self, completekey='tab', stdin=None, stdout=None): + """Instantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + """ + if stdin is not None: + self.stdin = stdin + else: + self.stdin = sys.stdin + if stdout is not None: + self.stdout = stdout + else: + self.stdout = sys.stdout + self.cmdqueue = [] + self.completekey = completekey + + def cmdloop(self, intro=None): + """Repeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + """ + + self.preloop() + if self.use_rawinput and self.completekey: + try: + import readline + self.old_completer = readline.get_completer() + readline.set_completer(self.complete) + readline.parse_and_bind(self.completekey+": complete") + except ImportError: + pass + try: + if intro is not None: + self.intro = intro + if self.intro: + self.stdout.write(str(self.intro)+"\n") + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue.pop(0) + else: + if self.use_rawinput: + try: + line = input(self.prompt) + except EOFError: + line = 'EOF' + else: + self.stdout.write(self.prompt) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + line = 'EOF' + else: + line = line.rstrip('\r\n') + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + finally: + if self.use_rawinput and self.completekey: + try: + import readline + readline.set_completer(self.old_completer) + except ImportError: + pass + + + def precmd(self, line): + """Hook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + """ + return line + + def postcmd(self, stop, line): + """Hook method executed just after a command dispatch is finished.""" + return stop + + def preloop(self): + """Hook method executed once when the cmdloop() method is called.""" + pass + + def postloop(self): + """Hook method executed once when the cmdloop() method is about to + return. + + """ + pass + + def parseline(self, line): + """Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + """ + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + elif line[0] == '!': + if hasattr(self, 'do_shell'): + line = 'shell ' + line[1:] + else: + return None, None, line + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + """ + cmd, arg, line = self.parseline(line) + if not line: + return self.emptyline() + if cmd is None: + return self.default(line) + self.lastcmd = line + if line == 'EOF' : + self.lastcmd = '' + if cmd == '': + return self.default(line) + else: + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + return self.default(line) + return func(arg) + + def emptyline(self): + """Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + """ + if self.lastcmd: + return self.onecmd(self.lastcmd) + + def default(self, line): + """Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + """ + self.stdout.write('*** Unknown syntax: %s\n'%line) + + def completedefault(self, *ignored): + """Method called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + """ + return [] + + def completenames(self, text, *ignored): + dotext = 'do_'+text + return [a[3:] for a in self.get_names() if a.startswith(dotext)] + + def complete(self, text, state): + """Return the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + """ + if state == 0: + import readline + origline = readline.get_line_buffer() + line = origline.lstrip() + stripped = len(origline) - len(line) + begidx = readline.get_begidx() - stripped + endidx = readline.get_endidx() - stripped + if begidx>0: + cmd, args, foo = self.parseline(line) + if cmd == '': + compfunc = self.completedefault + else: + try: + compfunc = getattr(self, 'complete_' + cmd) + except AttributeError: + compfunc = self.completedefault + else: + compfunc = self.completenames + self.completion_matches = compfunc(text, line, begidx, endidx) + try: + return self.completion_matches[state] + except IndexError: + return None + + def get_names(self): + # This method used to pull in base class attributes + # at a time dir() didn't do it yet. + return dir(self.__class__) + + def complete_help(self, *args): + commands = set(self.completenames(*args)) + topics = set(a[5:] for a in self.get_names() + if a.startswith('help_' + args[0])) + return list(commands | topics) + + def do_help(self, arg): + 'List available commands with "help" or detailed help with "help cmd".' + if arg: + # XXX check arg syntax + try: + func = getattr(self, 'help_' + arg) + except AttributeError: + try: + doc=getattr(self, 'do_' + arg).__doc__ + if doc: + self.stdout.write("%s\n"%str(doc)) + return + except AttributeError: + pass + self.stdout.write("%s\n"%str(self.nohelp % (arg,))) + return + func() + else: + names = self.get_names() + cmds_doc = [] + cmds_undoc = [] + help = {} + for name in names: + if name[:5] == 'help_': + help[name[5:]]=1 + names.sort() + # There can be duplicates if routines overridden + prevname = '' + for name in names: + if name[:3] == 'do_': + if name == prevname: + continue + prevname = name + cmd=name[3:] + if cmd in help: + cmds_doc.append(cmd) + del help[cmd] + elif getattr(self, name).__doc__: + cmds_doc.append(cmd) + else: + cmds_undoc.append(cmd) + self.stdout.write("%s\n"%str(self.doc_leader)) + self.print_topics(self.doc_header, cmds_doc, 15,80) + self.print_topics(self.misc_header, list(help.keys()),15,80) + self.print_topics(self.undoc_header, cmds_undoc, 15,80) + + def print_topics(self, header, cmds, cmdlen, maxcol): + if cmds: + self.stdout.write("%s\n"%str(header)) + if self.ruler: + self.stdout.write("%s\n"%str(self.ruler * len(header))) + self.columnize(cmds, maxcol-1) + self.stdout.write("\n") + + def columnize(self, list, displaywidth=80): + """Display a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + """ + if not list: + self.stdout.write("\n") + return + + nonstrings = [i for i in range(len(list)) + if not isinstance(list[i], str)] + if nonstrings: + raise TypeError("list[i] not a string for i in %s" + % ", ".join(map(str, nonstrings))) + size = len(list) + if size == 1: + self.stdout.write('%s\n'%str(list[0])) + return + # Try every row count from 1 upwards + for nrows in range(1, len(list)): + ncols = (size+nrows-1) // nrows + colwidths = [] + totwidth = -2 + for col in range(ncols): + colwidth = 0 + for row in range(nrows): + i = row + nrows*col + if i >= size: + break + x = list[i] + colwidth = max(colwidth, len(x)) + colwidths.append(colwidth) + totwidth += colwidth + 2 + if totwidth > displaywidth: + break + if totwidth <= displaywidth: + break + else: + nrows = len(list) + ncols = 1 + colwidths = [0] + for row in range(nrows): + texts = [] + for col in range(ncols): + i = row + nrows*col + if i >= size: + x = "" + else: + x = list[i] + texts.append(x) + while texts and not texts[-1]: + del texts[-1] + for col in range(len(texts)): + texts[col] = texts[col].ljust(colwidths[col]) + self.stdout.write("%s\n"%str(" ".join(texts))) diff --git a/python310/code.py b/python310/code.py new file mode 100644 index 0000000000000000000000000000000000000000..76000f8c8b2c1e1c98f8fb4c831c2ea3e2de268d --- /dev/null +++ b/python310/code.py @@ -0,0 +1,315 @@ +"""Utilities needed to emulate Python's interactive interpreter. + +""" + +# Inspired by similar code by Jeff Epler and Fredrik Lundh. + + +import sys +import traceback +from codeop import CommandCompiler, compile_command + +__all__ = ["InteractiveInterpreter", "InteractiveConsole", "interact", + "compile_command"] + +class InteractiveInterpreter: + """Base class for InteractiveConsole. + + This class deals with parsing and interpreter state (the user's + namespace); it doesn't deal with input buffering or prompting or + input file naming (the filename is always passed in explicitly). + + """ + + def __init__(self, locals=None): + """Constructor. + + The optional 'locals' argument specifies the dictionary in + which code will be executed; it defaults to a newly created + dictionary with key "__name__" set to "__console__" and key + "__doc__" set to None. + + """ + if locals is None: + locals = {"__name__": "__console__", "__doc__": None} + self.locals = locals + self.compile = CommandCompiler() + + def runsource(self, source, filename="", symbol="single"): + """Compile and run some source in the interpreter. + + Arguments are as for compile_command(). + + One of several things can happen: + + 1) The input is incorrect; compile_command() raised an + exception (SyntaxError or OverflowError). A syntax traceback + will be printed by calling the showsyntaxerror() method. + + 2) The input is incomplete, and more input is required; + compile_command() returned None. Nothing happens. + + 3) The input is complete; compile_command() returned a code + object. The code is executed by calling self.runcode() (which + also handles run-time exceptions, except for SystemExit). + + The return value is True in case 2, False in the other cases (unless + an exception is raised). The return value can be used to + decide whether to use sys.ps1 or sys.ps2 to prompt the next + line. + + """ + try: + code = self.compile(source, filename, symbol) + except (OverflowError, SyntaxError, ValueError): + # Case 1 + self.showsyntaxerror(filename) + return False + + if code is None: + # Case 2 + return True + + # Case 3 + self.runcode(code) + return False + + def runcode(self, code): + """Execute a code object. + + When an exception occurs, self.showtraceback() is called to + display a traceback. All exceptions are caught except + SystemExit, which is reraised. + + A note about KeyboardInterrupt: this exception may occur + elsewhere in this code, and may not always be caught. The + caller should be prepared to deal with it. + + """ + try: + exec(code, self.locals) + except SystemExit: + raise + except: + self.showtraceback() + + def showsyntaxerror(self, filename=None): + """Display the syntax error that just occurred. + + This doesn't display a stack trace because there isn't one. + + If a filename is given, it is stuffed in the exception instead + of what was there before (because Python's parser always uses + "" when reading from a string). + + The output is written by self.write(), below. + + """ + type, value, tb = sys.exc_info() + sys.last_type = type + sys.last_value = value + sys.last_traceback = tb + if filename and type is SyntaxError: + # Work hard to stuff the correct filename in the exception + try: + msg, (dummy_filename, lineno, offset, line) = value.args + except ValueError: + # Not the format we expect; leave it alone + pass + else: + # Stuff in the right filename + value = SyntaxError(msg, (filename, lineno, offset, line)) + sys.last_value = value + if sys.excepthook is sys.__excepthook__: + lines = traceback.format_exception_only(type, value) + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(type, value, tb) + + def showtraceback(self): + """Display the exception that just occurred. + + We remove the first stack item because it is our own code. + + The output is written by self.write(), below. + + """ + sys.last_type, sys.last_value, last_tb = ei = sys.exc_info() + sys.last_traceback = last_tb + try: + lines = traceback.format_exception(ei[0], ei[1], last_tb.tb_next) + if sys.excepthook is sys.__excepthook__: + self.write(''.join(lines)) + else: + # If someone has set sys.excepthook, we let that take precedence + # over self.write + sys.excepthook(ei[0], ei[1], last_tb) + finally: + last_tb = ei = None + + def write(self, data): + """Write a string. + + The base implementation writes to sys.stderr; a subclass may + replace this with a different implementation. + + """ + sys.stderr.write(data) + + +class InteractiveConsole(InteractiveInterpreter): + """Closely emulate the behavior of the interactive Python interpreter. + + This class builds on InteractiveInterpreter and adds prompting + using the familiar sys.ps1 and sys.ps2, and input buffering. + + """ + + def __init__(self, locals=None, filename=""): + """Constructor. + + The optional locals argument will be passed to the + InteractiveInterpreter base class. + + The optional filename argument should specify the (file)name + of the input stream; it will show up in tracebacks. + + """ + InteractiveInterpreter.__init__(self, locals) + self.filename = filename + self.resetbuffer() + + def resetbuffer(self): + """Reset the input buffer.""" + self.buffer = [] + + def interact(self, banner=None, exitmsg=None): + """Closely emulate the interactive Python console. + + The optional banner argument specifies the banner to print + before the first interaction; by default it prints a banner + similar to the one printed by the real Python interpreter, + followed by the current class name in parentheses (so as not + to confuse this with the real interpreter -- since it's so + close!). + + The optional exitmsg argument specifies the exit message + printed when exiting. Pass the empty string to suppress + printing an exit message. If exitmsg is not given or None, + a default message is printed. + + """ + try: + sys.ps1 + except AttributeError: + sys.ps1 = ">>> " + try: + sys.ps2 + except AttributeError: + sys.ps2 = "... " + cprt = 'Type "help", "copyright", "credits" or "license" for more information.' + if banner is None: + self.write("Python %s on %s\n%s\n(%s)\n" % + (sys.version, sys.platform, cprt, + self.__class__.__name__)) + elif banner: + self.write("%s\n" % str(banner)) + more = 0 + while 1: + try: + if more: + prompt = sys.ps2 + else: + prompt = sys.ps1 + try: + line = self.raw_input(prompt) + except EOFError: + self.write("\n") + break + else: + more = self.push(line) + except KeyboardInterrupt: + self.write("\nKeyboardInterrupt\n") + self.resetbuffer() + more = 0 + if exitmsg is None: + self.write('now exiting %s...\n' % self.__class__.__name__) + elif exitmsg != '': + self.write('%s\n' % exitmsg) + + def push(self, line): + """Push a line to the interpreter. + + The line should not have a trailing newline; it may have + internal newlines. The line is appended to a buffer and the + interpreter's runsource() method is called with the + concatenated contents of the buffer as source. If this + indicates that the command was executed or invalid, the buffer + is reset; otherwise, the command is incomplete, and the buffer + is left as it was after the line was appended. The return + value is 1 if more input is required, 0 if the line was dealt + with in some way (this is the same as runsource()). + + """ + self.buffer.append(line) + source = "\n".join(self.buffer) + more = self.runsource(source, self.filename) + if not more: + self.resetbuffer() + return more + + def raw_input(self, prompt=""): + """Write a prompt and read a line. + + The returned line does not include the trailing newline. + When the user enters the EOF key sequence, EOFError is raised. + + The base implementation uses the built-in function + input(); a subclass may replace this with a different + implementation. + + """ + return input(prompt) + + + +def interact(banner=None, readfunc=None, local=None, exitmsg=None): + """Closely emulate the interactive Python interpreter. + + This is a backwards compatible interface to the InteractiveConsole + class. When readfunc is not specified, it attempts to import the + readline module to enable GNU readline if it is available. + + Arguments (all optional, all default to None): + + banner -- passed to InteractiveConsole.interact() + readfunc -- if not None, replaces InteractiveConsole.raw_input() + local -- passed to InteractiveInterpreter.__init__() + exitmsg -- passed to InteractiveConsole.interact() + + """ + console = InteractiveConsole(local) + if readfunc is not None: + console.raw_input = readfunc + else: + try: + import readline + except ImportError: + pass + console.interact(banner, exitmsg) + + +if __name__ == "__main__": + import argparse + + parser = argparse.ArgumentParser() + parser.add_argument('-q', action='store_true', + help="don't print version and copyright messages") + args = parser.parse_args() + if args.q or sys.flags.quiet: + banner = '' + else: + banner = None + interact(banner) diff --git a/python310/codecs.py b/python310/codecs.py new file mode 100644 index 0000000000000000000000000000000000000000..3b173b612101e7c635303a2129e3fe363791e2eb --- /dev/null +++ b/python310/codecs.py @@ -0,0 +1,1127 @@ +""" codecs -- Python Codec Registry, API and helpers. + + +Written by Marc-Andre Lemburg (mal@lemburg.com). + +(c) Copyright CNRI, All Rights Reserved. NO WARRANTY. + +""" + +import builtins +import sys + +### Registry and builtin stateless codec functions + +try: + from _codecs import * +except ImportError as why: + raise SystemError('Failed to load the builtin codecs: %s' % why) + +__all__ = ["register", "lookup", "open", "EncodedFile", "BOM", "BOM_BE", + "BOM_LE", "BOM32_BE", "BOM32_LE", "BOM64_BE", "BOM64_LE", + "BOM_UTF8", "BOM_UTF16", "BOM_UTF16_LE", "BOM_UTF16_BE", + "BOM_UTF32", "BOM_UTF32_LE", "BOM_UTF32_BE", + "CodecInfo", "Codec", "IncrementalEncoder", "IncrementalDecoder", + "StreamReader", "StreamWriter", + "StreamReaderWriter", "StreamRecoder", + "getencoder", "getdecoder", "getincrementalencoder", + "getincrementaldecoder", "getreader", "getwriter", + "encode", "decode", "iterencode", "iterdecode", + "strict_errors", "ignore_errors", "replace_errors", + "xmlcharrefreplace_errors", + "backslashreplace_errors", "namereplace_errors", + "register_error", "lookup_error"] + +### Constants + +# +# Byte Order Mark (BOM = ZERO WIDTH NO-BREAK SPACE = U+FEFF) +# and its possible byte string values +# for UTF8/UTF16/UTF32 output and little/big endian machines +# + +# UTF-8 +BOM_UTF8 = b'\xef\xbb\xbf' + +# UTF-16, little endian +BOM_LE = BOM_UTF16_LE = b'\xff\xfe' + +# UTF-16, big endian +BOM_BE = BOM_UTF16_BE = b'\xfe\xff' + +# UTF-32, little endian +BOM_UTF32_LE = b'\xff\xfe\x00\x00' + +# UTF-32, big endian +BOM_UTF32_BE = b'\x00\x00\xfe\xff' + +if sys.byteorder == 'little': + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_LE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_LE + +else: + + # UTF-16, native endianness + BOM = BOM_UTF16 = BOM_UTF16_BE + + # UTF-32, native endianness + BOM_UTF32 = BOM_UTF32_BE + +# Old broken names (don't use in new code) +BOM32_LE = BOM_UTF16_LE +BOM32_BE = BOM_UTF16_BE +BOM64_LE = BOM_UTF32_LE +BOM64_BE = BOM_UTF32_BE + + +### Codec base classes (defining the API) + +class CodecInfo(tuple): + """Codec details when looking up the codec registry""" + + # Private API to allow Python 3.4 to denylist the known non-Unicode + # codecs in the standard library. A more general mechanism to + # reliably distinguish test encodings from other codecs will hopefully + # be defined for Python 3.5 + # + # See http://bugs.python.org/issue19619 + _is_text_encoding = True # Assume codecs are text encodings by default + + def __new__(cls, encode, decode, streamreader=None, streamwriter=None, + incrementalencoder=None, incrementaldecoder=None, name=None, + *, _is_text_encoding=None): + self = tuple.__new__(cls, (encode, decode, streamreader, streamwriter)) + self.name = name + self.encode = encode + self.decode = decode + self.incrementalencoder = incrementalencoder + self.incrementaldecoder = incrementaldecoder + self.streamwriter = streamwriter + self.streamreader = streamreader + if _is_text_encoding is not None: + self._is_text_encoding = _is_text_encoding + return self + + def __repr__(self): + return "<%s.%s object for encoding %s at %#x>" % \ + (self.__class__.__module__, self.__class__.__qualname__, + self.name, id(self)) + +class Codec: + + """ Defines the interface for stateless encoders/decoders. + + The .encode()/.decode() methods may use different error + handling schemes by providing the errors argument. These + string values are predefined: + + 'strict' - raise a ValueError error (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace' - replace with a suitable replacement character; + Python will use the official U+FFFD REPLACEMENT + CHARACTER for the builtin Unicode codecs on + decoding and '?' on encoding. + 'surrogateescape' - replace with private code points U+DCnn. + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference (only for encoding). + 'backslashreplace' - Replace with backslashed escape sequences. + 'namereplace' - Replace with \\N{...} escape sequences + (only for encoding). + + The set of allowed values can be extended via register_error. + + """ + def encode(self, input, errors='strict'): + + """ Encodes the object input and returns a tuple (output + object, length consumed). + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamWriter for codecs which have to keep state in order to + make encoding efficient. + + The encoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + + def decode(self, input, errors='strict'): + + """ Decodes the object input and returns a tuple (output + object, length consumed). + + input must be an object which provides the bf_getreadbuf + buffer slot. Python strings, buffer objects and memory + mapped files are examples of objects providing this slot. + + errors defines the error handling to apply. It defaults to + 'strict' handling. + + The method may not store state in the Codec instance. Use + StreamReader for codecs which have to keep state in order to + make decoding efficient. + + The decoder must be able to handle zero length input and + return an empty object of the output object type in this + situation. + + """ + raise NotImplementedError + +class IncrementalEncoder(object): + """ + An IncrementalEncoder encodes an input in multiple steps. The input can + be passed piece by piece to the encode() method. The IncrementalEncoder + remembers the state of the encoding process between calls to encode(). + """ + def __init__(self, errors='strict'): + """ + Creates an IncrementalEncoder instance. + + The IncrementalEncoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + self.buffer = "" + + def encode(self, input, final=False): + """ + Encodes input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Resets the encoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the encoder. + """ + return 0 + + def setstate(self, state): + """ + Set the current state of the encoder. state must have been + returned by getstate(). + """ + +class BufferedIncrementalEncoder(IncrementalEncoder): + """ + This subclass of IncrementalEncoder can be used as the baseclass for an + incremental encoder if the encoder must keep some of the output in a + buffer between calls to encode(). + """ + def __init__(self, errors='strict'): + IncrementalEncoder.__init__(self, errors) + # unencoded input that is kept between calls to encode() + self.buffer = "" + + def _buffer_encode(self, input, errors, final): + # Overwrite this method in subclasses: It must encode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def encode(self, input, final=False): + # encode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_encode(data, self.errors, final) + # keep unencoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalEncoder.reset(self) + self.buffer = "" + + def getstate(self): + return self.buffer or 0 + + def setstate(self, state): + self.buffer = state or "" + +class IncrementalDecoder(object): + """ + An IncrementalDecoder decodes an input in multiple steps. The input can + be passed piece by piece to the decode() method. The IncrementalDecoder + remembers the state of the decoding process between calls to decode(). + """ + def __init__(self, errors='strict'): + """ + Create an IncrementalDecoder instance. + + The IncrementalDecoder may use different error handling schemes by + providing the errors keyword argument. See the module docstring + for a list of possible values. + """ + self.errors = errors + + def decode(self, input, final=False): + """ + Decode input and returns the resulting object. + """ + raise NotImplementedError + + def reset(self): + """ + Reset the decoder to the initial state. + """ + + def getstate(self): + """ + Return the current state of the decoder. + + This must be a (buffered_input, additional_state_info) tuple. + buffered_input must be a bytes object containing bytes that + were passed to decode() that have not yet been converted. + additional_state_info must be a non-negative integer + representing the state of the decoder WITHOUT yet having + processed the contents of buffered_input. In the initial state + and after reset(), getstate() must return (b"", 0). + """ + return (b"", 0) + + def setstate(self, state): + """ + Set the current state of the decoder. + + state must have been returned by getstate(). The effect of + setstate((b"", 0)) must be equivalent to reset(). + """ + +class BufferedIncrementalDecoder(IncrementalDecoder): + """ + This subclass of IncrementalDecoder can be used as the baseclass for an + incremental decoder if the decoder must be able to handle incomplete + byte sequences. + """ + def __init__(self, errors='strict'): + IncrementalDecoder.__init__(self, errors) + # undecoded input that is kept between calls to decode() + self.buffer = b"" + + def _buffer_decode(self, input, errors, final): + # Overwrite this method in subclasses: It must decode input + # and return an (output, length consumed) tuple + raise NotImplementedError + + def decode(self, input, final=False): + # decode input (taking the buffer into account) + data = self.buffer + input + (result, consumed) = self._buffer_decode(data, self.errors, final) + # keep undecoded input until the next call + self.buffer = data[consumed:] + return result + + def reset(self): + IncrementalDecoder.reset(self) + self.buffer = b"" + + def getstate(self): + # additional state info is always 0 + return (self.buffer, 0) + + def setstate(self, state): + # ignore additional state info + self.buffer = state[0] + +# +# The StreamWriter and StreamReader class provide generic working +# interfaces which can be used to implement new encoding submodules +# very easily. See encodings/utf_8.py for an example on how this is +# done. +# + +class StreamWriter(Codec): + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamWriter instance. + + stream must be a file-like object open for writing. + + The StreamWriter may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'xmlcharrefreplace' - Replace with the appropriate XML + character reference. + 'backslashreplace' - Replace with backslashed escape + sequences. + 'namereplace' - Replace with \\N{...} escape sequences. + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + + def write(self, object): + + """ Writes the object's contents encoded to self.stream. + """ + data, consumed = self.encode(object, self.errors) + self.stream.write(data) + + def writelines(self, list): + + """ Writes the concatenated list of strings to the stream + using .write(). + """ + self.write(''.join(list)) + + def reset(self): + + """ Resets the codec buffers used for keeping internal state. + + Calling this method should ensure that the data on the + output is put into a clean state, that allows appending + of new fresh data without having to rescan the whole + stream to recover state. + + """ + pass + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + if whence == 0 and offset == 0: + self.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReader(Codec): + + charbuffertype = str + + def __init__(self, stream, errors='strict'): + + """ Creates a StreamReader instance. + + stream must be a file-like object open for reading. + + The StreamReader may use different error handling + schemes by providing the errors keyword argument. These + parameters are predefined: + + 'strict' - raise a ValueError (or a subclass) + 'ignore' - ignore the character and continue with the next + 'replace'- replace with a suitable replacement character + 'backslashreplace' - Replace with backslashed escape sequences; + + The set of allowed parameter values can be extended via + register_error. + """ + self.stream = stream + self.errors = errors + self.bytebuffer = b"" + self._empty_charbuffer = self.charbuffertype() + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def decode(self, input, errors='strict'): + raise NotImplementedError + + def read(self, size=-1, chars=-1, firstline=False): + + """ Decodes data from the stream self.stream and returns the + resulting object. + + chars indicates the number of decoded code points or bytes to + return. read() will never return more data than requested, + but it might return less, if there is not enough available. + + size indicates the approximate maximum number of decoded + bytes or code points to read for decoding. The decoder + can modify this setting as appropriate. The default value + -1 indicates to read and decode as much as possible. size + is intended to prevent having to decode huge files in one + step. + + If firstline is true, and a UnicodeDecodeError happens + after the first line terminator in the input only the first line + will be returned, the rest of the input will be kept until the + next call to read(). + + The method should use a greedy read strategy, meaning that + it should read as much data as is allowed within the + definition of the encoding and the given size, e.g. if + optional encoding endings or state markers are available + on the stream, these should be read too. + """ + # If we have lines cached, first merge them back into characters + if self.linebuffer: + self.charbuffer = self._empty_charbuffer.join(self.linebuffer) + self.linebuffer = None + + if chars < 0: + # For compatibility with other read() methods that take a + # single argument + chars = size + + # read until we get the required number of characters (if available) + while True: + # can the request be satisfied from the character buffer? + if chars >= 0: + if len(self.charbuffer) >= chars: + break + # we need more data + if size < 0: + newdata = self.stream.read() + else: + newdata = self.stream.read(size) + # decode bytes (those remaining from the last call included) + data = self.bytebuffer + newdata + if not data: + break + try: + newchars, decodedbytes = self.decode(data, self.errors) + except UnicodeDecodeError as exc: + if firstline: + newchars, decodedbytes = \ + self.decode(data[:exc.start], self.errors) + lines = newchars.splitlines(keepends=True) + if len(lines)<=1: + raise + else: + raise + # keep undecoded bytes until the next call + self.bytebuffer = data[decodedbytes:] + # put new characters in the character buffer + self.charbuffer += newchars + # there was no data available + if not newdata: + break + if chars < 0: + # Return everything we've got + result = self.charbuffer + self.charbuffer = self._empty_charbuffer + else: + # Return the first chars characters + result = self.charbuffer[:chars] + self.charbuffer = self.charbuffer[chars:] + return result + + def readline(self, size=None, keepends=True): + + """ Read one line from the input stream and return the + decoded data. + + size, if given, is passed as size argument to the + read() method. + + """ + # If we have lines cached from an earlier read, return + # them unconditionally + if self.linebuffer: + line = self.linebuffer[0] + del self.linebuffer[0] + if len(self.linebuffer) == 1: + # revert to charbuffer mode; we might need more data + # next time + self.charbuffer = self.linebuffer[0] + self.linebuffer = None + if not keepends: + line = line.splitlines(keepends=False)[0] + return line + + readsize = size or 72 + line = self._empty_charbuffer + # If size is given, we call read() only once + while True: + data = self.read(readsize, firstline=True) + if data: + # If we're at a "\r" read one extra character (which might + # be a "\n") to get a proper line ending. If the stream is + # temporarily exhausted we return the wrong line ending. + if (isinstance(data, str) and data.endswith("\r")) or \ + (isinstance(data, bytes) and data.endswith(b"\r")): + data += self.read(size=1, chars=1) + + line += data + lines = line.splitlines(keepends=True) + if lines: + if len(lines) > 1: + # More than one line result; the first line is a full line + # to return + line = lines[0] + del lines[0] + if len(lines) > 1: + # cache the remaining lines + lines[-1] += self.charbuffer + self.linebuffer = lines + self.charbuffer = None + else: + # only one remaining line, put it back into charbuffer + self.charbuffer = lines[0] + self.charbuffer + if not keepends: + line = line.splitlines(keepends=False)[0] + break + line0withend = lines[0] + line0withoutend = lines[0].splitlines(keepends=False)[0] + if line0withend != line0withoutend: # We really have a line end + # Put the rest back together and keep it until the next call + self.charbuffer = self._empty_charbuffer.join(lines[1:]) + \ + self.charbuffer + if keepends: + line = line0withend + else: + line = line0withoutend + break + # we didn't get anything or this was our only try + if not data or size is not None: + if line and not keepends: + line = line.splitlines(keepends=False)[0] + break + if readsize < 8000: + readsize *= 2 + return line + + def readlines(self, sizehint=None, keepends=True): + + """ Read all lines available on the input stream + and return them as a list. + + Line breaks are implemented using the codec's decoder + method and are included in the list entries. + + sizehint, if given, is ignored since there is no efficient + way to finding the true end-of-line. + + """ + data = self.read() + return data.splitlines(keepends) + + def reset(self): + + """ Resets the codec buffers used for keeping internal state. + + Note that no stream repositioning should take place. + This method is primarily intended to be able to recover + from decoding errors. + + """ + self.bytebuffer = b"" + self.charbuffer = self._empty_charbuffer + self.linebuffer = None + + def seek(self, offset, whence=0): + """ Set the input stream's current position. + + Resets the codec buffers used for keeping state. + """ + self.stream.seek(offset, whence) + self.reset() + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + line = self.readline() + if line: + return line + raise StopIteration + + def __iter__(self): + return self + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamReaderWriter: + + """ StreamReaderWriter instances allow wrapping streams which + work in both read and write modes. + + The design is such that one can use the factory functions + returned by the codec.lookup() function to construct the + instance. + + """ + # Optional attributes set by the file wrappers below + encoding = 'unknown' + + def __init__(self, stream, Reader, Writer, errors='strict'): + + """ Creates a StreamReaderWriter instance. + + stream must be a Stream-like object. + + Reader, Writer must be factory functions or classes + providing the StreamReader, StreamWriter interface resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + return self.reader.read(size) + + def readline(self, size=None): + + return self.reader.readline(size) + + def readlines(self, sizehint=None): + + return self.reader.readlines(sizehint) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + return next(self.reader) + + def __iter__(self): + return self + + def write(self, data): + + return self.writer.write(data) + + def writelines(self, list): + + return self.writer.writelines(list) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + self.stream.seek(offset, whence) + self.reader.reset() + if whence == 0 and offset == 0: + self.writer.reset() + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + # these are needed to make "with StreamReaderWriter(...)" work properly + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### + +class StreamRecoder: + + """ StreamRecoder instances translate data from one encoding to another. + + They use the complete set of APIs returned by the + codecs.lookup() function to implement their task. + + Data written to the StreamRecoder is first decoded into an + intermediate format (depending on the "decode" codec) and then + written to the underlying stream using an instance of the provided + Writer class. + + In the other direction, data is read from the underlying stream using + a Reader instance and then encoded and returned to the caller. + + """ + # Optional attributes set by the file wrappers below + data_encoding = 'unknown' + file_encoding = 'unknown' + + def __init__(self, stream, encode, decode, Reader, Writer, + errors='strict'): + + """ Creates a StreamRecoder instance which implements a two-way + conversion: encode and decode work on the frontend (the + data visible to .read() and .write()) while Reader and Writer + work on the backend (the data in stream). + + You can use these objects to do transparent + transcodings from e.g. latin-1 to utf-8 and back. + + stream must be a file-like object. + + encode and decode must adhere to the Codec interface; Reader and + Writer must be factory functions or classes providing the + StreamReader and StreamWriter interfaces resp. + + Error handling is done in the same way as defined for the + StreamWriter/Readers. + + """ + self.stream = stream + self.encode = encode + self.decode = decode + self.reader = Reader(stream, errors) + self.writer = Writer(stream, errors) + self.errors = errors + + def read(self, size=-1): + + data = self.reader.read(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readline(self, size=None): + + if size is None: + data = self.reader.readline() + else: + data = self.reader.readline(size) + data, bytesencoded = self.encode(data, self.errors) + return data + + def readlines(self, sizehint=None): + + data = self.reader.read() + data, bytesencoded = self.encode(data, self.errors) + return data.splitlines(keepends=True) + + def __next__(self): + + """ Return the next decoded line from the input stream.""" + data = next(self.reader) + data, bytesencoded = self.encode(data, self.errors) + return data + + def __iter__(self): + return self + + def write(self, data): + + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def writelines(self, list): + + data = b''.join(list) + data, bytesdecoded = self.decode(data, self.errors) + return self.writer.write(data) + + def reset(self): + + self.reader.reset() + self.writer.reset() + + def seek(self, offset, whence=0): + # Seeks must be propagated to both the readers and writers + # as they might need to reset their internal buffers. + self.reader.seek(offset, whence) + self.writer.seek(offset, whence) + + def __getattr__(self, name, + getattr=getattr): + + """ Inherit all other methods from the underlying stream. + """ + return getattr(self.stream, name) + + def __enter__(self): + return self + + def __exit__(self, type, value, tb): + self.stream.close() + +### Shortcuts + +def open(filename, mode='r', encoding=None, errors='strict', buffering=-1): + + """ Open an encoded file using the given mode and return + a wrapped version providing transparent encoding/decoding. + + Note: The wrapped version will only accept the object format + defined by the codecs, i.e. Unicode objects for most builtin + codecs. Output is also codec dependent and will usually be + Unicode as well. + + If encoding is not None, then the + underlying encoded files are always opened in binary mode. + The default file mode is 'r', meaning to open the file in read mode. + + encoding specifies the encoding which is to be used for the + file. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + buffering has the same meaning as for the builtin open() API. + It defaults to -1 which means that the default buffer size will + be used. + + The returned wrapped file object provides an extra attribute + .encoding which allows querying the used encoding. This + attribute is only available if an encoding was specified as + parameter. + + """ + if encoding is not None and \ + 'b' not in mode: + # Force opening of the file in binary mode + mode = mode + 'b' + file = builtins.open(filename, mode, buffering) + if encoding is None: + return file + + try: + info = lookup(encoding) + srw = StreamReaderWriter(file, info.streamreader, info.streamwriter, errors) + # Add attributes to simplify introspection + srw.encoding = encoding + return srw + except: + file.close() + raise + +def EncodedFile(file, data_encoding, file_encoding=None, errors='strict'): + + """ Return a wrapped version of file which provides transparent + encoding translation. + + Data written to the wrapped file is decoded according + to the given data_encoding and then encoded to the underlying + file using file_encoding. The intermediate data type + will usually be Unicode but depends on the specified codecs. + + Bytes read from the file are decoded using file_encoding and then + passed back to the caller encoded using data_encoding. + + If file_encoding is not given, it defaults to data_encoding. + + errors may be given to define the error handling. It defaults + to 'strict' which causes ValueErrors to be raised in case an + encoding error occurs. + + The returned wrapped file object provides two extra attributes + .data_encoding and .file_encoding which reflect the given + parameters of the same name. The attributes can be used for + introspection by Python programs. + + """ + if file_encoding is None: + file_encoding = data_encoding + data_info = lookup(data_encoding) + file_info = lookup(file_encoding) + sr = StreamRecoder(file, data_info.encode, data_info.decode, + file_info.streamreader, file_info.streamwriter, errors) + # Add attributes to simplify introspection + sr.data_encoding = data_encoding + sr.file_encoding = file_encoding + return sr + +### Helpers for codec lookup + +def getencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its encoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).encode + +def getdecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its decoder function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).decode + +def getincrementalencoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalEncoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental encoder. + + """ + encoder = lookup(encoding).incrementalencoder + if encoder is None: + raise LookupError(encoding) + return encoder + +def getincrementaldecoder(encoding): + + """ Lookup up the codec for the given encoding and return + its IncrementalDecoder class or factory function. + + Raises a LookupError in case the encoding cannot be found + or the codecs doesn't provide an incremental decoder. + + """ + decoder = lookup(encoding).incrementaldecoder + if decoder is None: + raise LookupError(encoding) + return decoder + +def getreader(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamReader class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamreader + +def getwriter(encoding): + + """ Lookup up the codec for the given encoding and return + its StreamWriter class or factory function. + + Raises a LookupError in case the encoding cannot be found. + + """ + return lookup(encoding).streamwriter + +def iterencode(iterator, encoding, errors='strict', **kwargs): + """ + Encoding iterator. + + Encodes the input strings from the iterator using an IncrementalEncoder. + + errors and kwargs are passed through to the IncrementalEncoder + constructor. + """ + encoder = getincrementalencoder(encoding)(errors, **kwargs) + for input in iterator: + output = encoder.encode(input) + if output: + yield output + output = encoder.encode("", True) + if output: + yield output + +def iterdecode(iterator, encoding, errors='strict', **kwargs): + """ + Decoding iterator. + + Decodes the input strings from the iterator using an IncrementalDecoder. + + errors and kwargs are passed through to the IncrementalDecoder + constructor. + """ + decoder = getincrementaldecoder(encoding)(errors, **kwargs) + for input in iterator: + output = decoder.decode(input) + if output: + yield output + output = decoder.decode(b"", True) + if output: + yield output + +### Helpers for charmap-based codecs + +def make_identity_dict(rng): + + """ make_identity_dict(rng) -> dict + + Return a dictionary where elements of the rng sequence are + mapped to themselves. + + """ + return {i:i for i in rng} + +def make_encoding_map(decoding_map): + + """ Creates an encoding map from a decoding map. + + If a target mapping in the decoding map occurs multiple + times, then that target is mapped to None (undefined mapping), + causing an exception when encountered by the charmap codec + during translation. + + One example where this happens is cp875.py which decodes + multiple character to \\u001a. + + """ + m = {} + for k,v in decoding_map.items(): + if not v in m: + m[v] = k + else: + m[v] = None + return m + +### error handlers + +try: + strict_errors = lookup_error("strict") + ignore_errors = lookup_error("ignore") + replace_errors = lookup_error("replace") + xmlcharrefreplace_errors = lookup_error("xmlcharrefreplace") + backslashreplace_errors = lookup_error("backslashreplace") + namereplace_errors = lookup_error("namereplace") +except LookupError: + # In --disable-unicode builds, these error handler are missing + strict_errors = None + ignore_errors = None + replace_errors = None + xmlcharrefreplace_errors = None + backslashreplace_errors = None + namereplace_errors = None + +# Tell modulefinder that using codecs probably needs the encodings +# package +_false = 0 +if _false: + import encodings + +### Tests + +if __name__ == '__main__': + + # Make stdout translate Latin-1 output into UTF-8 output + sys.stdout = EncodedFile(sys.stdout, 'latin-1', 'utf-8') + + # Have stdin translate Latin-1 input into UTF-8 input + sys.stdin = EncodedFile(sys.stdin, 'utf-8', 'latin-1') diff --git a/python310/codeop.py b/python310/codeop.py new file mode 100644 index 0000000000000000000000000000000000000000..fe8713ecf46bfa4a60bed8e110e4c0ab3b8c8d73 --- /dev/null +++ b/python310/codeop.py @@ -0,0 +1,153 @@ +r"""Utilities to compile possibly incomplete Python source code. + +This module provides two interfaces, broadly similar to the builtin +function compile(), which take program text, a filename and a 'mode' +and: + +- Return code object if the command is complete and valid +- Return None if the command is incomplete +- Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + +The two interfaces are: + +compile_command(source, filename, symbol): + + Compiles a single command in the manner described above. + +CommandCompiler(): + + Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force. + +The module also provides another class: + +Compile(): + + Instances of this class act like the built-in function compile, + but with 'memory' in the sense described above. +""" + +import __future__ +import warnings + +_features = [getattr(__future__, fname) + for fname in __future__.all_feature_names] + +__all__ = ["compile_command", "Compile", "CommandCompiler"] + +# The following flags match the values from Include/cpython/compile.h +# Caveat emptor: These flags are undocumented on purpose and depending +# on their effect outside the standard library is **unsupported**. +PyCF_DONT_IMPLY_DEDENT = 0x200 +PyCF_ALLOW_INCOMPLETE_INPUT = 0x4000 + +def _maybe_compile(compiler, source, filename, symbol): + # Check for source consisting of only blank lines and comments. + for line in source.split("\n"): + line = line.strip() + if line and line[0] != '#': + break # Leave it alone. + else: + if symbol != "eval": + source = "pass" # Replace it with a 'pass' statement + + # Disable compiler warnings when checking for incomplete input. + with warnings.catch_warnings(): + warnings.simplefilter("ignore", (SyntaxWarning, DeprecationWarning)) + try: + compiler(source, filename, symbol) + except SyntaxError: # Let other compile() errors propagate. + try: + compiler(source + "\n", filename, symbol) + return None + except SyntaxError as e: + if "incomplete input" in str(e): + return None + # fallthrough + + return compiler(source, filename, symbol) + + +def _is_syntax_error(err1, err2): + rep1 = repr(err1) + rep2 = repr(err2) + if "was never closed" in rep1 and "was never closed" in rep2: + return False + if rep1 == rep2: + return True + return False + +def _compile(source, filename, symbol): + return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT) + +def compile_command(source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; default + "" + symbol -- optional grammar start symbol; "single" (default), "exec" + or "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(_compile, source, filename, symbol) + +class Compile: + """Instances of this class behave much like the built-in compile + function, but if one is used to compile text containing a future + statement, it "remembers" and compiles all subsequent program texts + with the statement in force.""" + def __init__(self): + self.flags = PyCF_DONT_IMPLY_DEDENT | PyCF_ALLOW_INCOMPLETE_INPUT + + def __call__(self, source, filename, symbol): + codeob = compile(source, filename, symbol, self.flags, True) + for feature in _features: + if codeob.co_flags & feature.compiler_flag: + self.flags |= feature.compiler_flag + return codeob + +class CommandCompiler: + """Instances of this class have __call__ methods identical in + signature to compile_command; the difference is that if the + instance compiles program text containing a __future__ statement, + the instance 'remembers' and compiles all subsequent program texts + with the statement in force.""" + + def __init__(self,): + self.compiler = Compile() + + def __call__(self, source, filename="", symbol="single"): + r"""Compile a command and determine whether it is incomplete. + + Arguments: + + source -- the source string; may contain \n characters + filename -- optional filename from which source was read; + default "" + symbol -- optional grammar start symbol; "single" (default) or + "eval" + + Return value / exceptions raised: + + - Return a code object if the command is complete and valid + - Return None if the command is incomplete + - Raise SyntaxError, ValueError or OverflowError if the command is a + syntax error (OverflowError and ValueError can be produced by + malformed literals). + """ + return _maybe_compile(self.compiler, source, filename, symbol) diff --git a/python310/collections/__init__.py b/python310/collections/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..818588f7403182b1bf747b3f0d3c9b79b14ef1b3 --- /dev/null +++ b/python310/collections/__init__.py @@ -0,0 +1,1556 @@ +'''This module implements specialized container datatypes providing +alternatives to Python's general purpose built-in containers, dict, +list, set, and tuple. + +* namedtuple factory function for creating tuple subclasses with named fields +* deque list-like container with fast appends and pops on either end +* ChainMap dict-like class for creating a single view of multiple mappings +* Counter dict subclass for counting hashable objects +* OrderedDict dict subclass that remembers the order entries were added +* defaultdict dict subclass that calls a factory function to supply missing values +* UserDict wrapper around dictionary objects for easier dict subclassing +* UserList wrapper around list objects for easier list subclassing +* UserString wrapper around string objects for easier string subclassing + +''' + +__all__ = [ + 'ChainMap', + 'Counter', + 'OrderedDict', + 'UserDict', + 'UserList', + 'UserString', + 'defaultdict', + 'deque', + 'namedtuple', +] + +import _collections_abc +import sys as _sys + +from itertools import chain as _chain +from itertools import repeat as _repeat +from itertools import starmap as _starmap +from keyword import iskeyword as _iskeyword +from operator import eq as _eq +from operator import itemgetter as _itemgetter +from reprlib import recursive_repr as _recursive_repr +from _weakref import proxy as _proxy + +try: + from _collections import deque +except ImportError: + pass +else: + _collections_abc.MutableSequence.register(deque) + +try: + from _collections import defaultdict +except ImportError: + pass + + +################################################################################ +### OrderedDict +################################################################################ + +class _OrderedDictKeysView(_collections_abc.KeysView): + + def __reversed__(self): + yield from reversed(self._mapping) + +class _OrderedDictItemsView(_collections_abc.ItemsView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield (key, self._mapping[key]) + +class _OrderedDictValuesView(_collections_abc.ValuesView): + + def __reversed__(self): + for key in reversed(self._mapping): + yield self._mapping[key] + +class _Link(object): + __slots__ = 'prev', 'next', 'key', '__weakref__' + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # The sentinel is in self.__hardroot with a weakref proxy in self.__root. + # The prev links are weakref proxies (to prevent circular references). + # Individual links are kept alive by the hard reference in self.__map. + # Those hard references disappear when a key is deleted from an OrderedDict. + + def __init__(self, other=(), /, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries. Keyword argument order is preserved. + ''' + try: + self.__root + except AttributeError: + self.__hardroot = _Link() + self.__root = root = _proxy(self.__hardroot) + root.prev = root.next = root + self.__map = {} + self.__update(other, **kwds) + + def __setitem__(self, key, value, + dict_setitem=dict.__setitem__, proxy=_proxy, Link=_Link): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + self.__map[key] = link = Link() + root = self.__root + last = root.prev + link.prev, link.next, link.key = last, root, key + last.next = link + root.prev = proxy(link) + dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link = self.__map.pop(key) + link_prev = link.prev + link_next = link.next + link_prev.next = link_next + link_next.prev = link_prev + link.prev = None + link.next = None + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + curr = root.next + while curr is not root: + yield curr.key + curr = curr.next + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + curr = root.prev + while curr is not root: + yield curr.key + curr = curr.prev + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root.prev = root.next = root + self.__map.clear() + dict.clear(self) + + def popitem(self, last=True): + '''Remove and return a (key, value) pair from the dictionary. + + Pairs are returned in LIFO order if last is true or FIFO order if false. + ''' + if not self: + raise KeyError('dictionary is empty') + root = self.__root + if last: + link = root.prev + link_prev = link.prev + link_prev.next = root + root.prev = link_prev + else: + link = root.next + link_next = link.next + root.next = link_next + link_next.prev = root + key = link.key + del self.__map[key] + value = dict.pop(self, key) + return key, value + + def move_to_end(self, key, last=True): + '''Move an existing element to the end (or beginning if last is false). + + Raise KeyError if the element does not exist. + ''' + link = self.__map[key] + link_prev = link.prev + link_next = link.next + soft_link = link_next.prev + link_prev.next = link_next + link_next.prev = link_prev + root = self.__root + if last: + last = root.prev + link.prev = last + link.next = root + root.prev = soft_link + last.next = link + else: + first = root.next + link.prev = root + link.next = first + first.prev = soft_link + root.next = link + + def __sizeof__(self): + sizeof = _sys.getsizeof + n = len(self) + 1 # number of links including root + size = sizeof(self.__dict__) # instance dictionary + size += sizeof(self.__map) * 2 # internal dict and inherited dict + size += sizeof(self.__hardroot) * n # link objects + size += sizeof(self.__root) * n # proxy objects + return size + + update = __update = _collections_abc.MutableMapping.update + + def keys(self): + "D.keys() -> a set-like object providing a view on D's keys" + return _OrderedDictKeysView(self) + + def items(self): + "D.items() -> a set-like object providing a view on D's items" + return _OrderedDictItemsView(self) + + def values(self): + "D.values() -> an object providing a view on D's values" + return _OrderedDictValuesView(self) + + __ne__ = _collections_abc.MutableMapping.__ne__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + '''Insert key with a value of default if key is not in the dictionary. + + Return the value for key if key is in the dictionary, else default. + ''' + if key in self: + return self[key] + self[key] = default + return default + + @_recursive_repr() + def __repr__(self): + 'od.__repr__() <==> repr(od)' + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, list(self.items())) + + def __reduce__(self): + 'Return state information for pickling' + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + return self.__class__, (), inst_dict or None, None, iter(self.items()) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''Create a new ordered dictionary with keys from iterable and values set to value. + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return dict.__eq__(self, other) and all(map(_eq, self, other)) + return dict.__eq__(self, other) + + def __ior__(self, other): + self.update(other) + return self + + def __or__(self, other): + if not isinstance(other, dict): + return NotImplemented + new = self.__class__(self) + new.update(other) + return new + + def __ror__(self, other): + if not isinstance(other, dict): + return NotImplemented + new = self.__class__(other) + new.update(self) + return new + + +try: + from _collections import OrderedDict +except ImportError: + # Leave the pure Python version in place. + pass + + +################################################################################ +### namedtuple +################################################################################ + +try: + from _collections import _tuplegetter +except ImportError: + _tuplegetter = lambda index, doc: property(_itemgetter(index), doc=doc) + +def namedtuple(typename, field_names, *, rename=False, defaults=None, module=None): + """Returns a new subclass of tuple with named fields. + + >>> Point = namedtuple('Point', ['x', 'y']) + >>> Point.__doc__ # docstring for the new class + 'Point(x, y)' + >>> p = Point(11, y=22) # instantiate with positional args or keywords + >>> p[0] + p[1] # indexable like a plain tuple + 33 + >>> x, y = p # unpack like a regular tuple + >>> x, y + (11, 22) + >>> p.x + p.y # fields also accessible by name + 33 + >>> d = p._asdict() # convert to a dictionary + >>> d['x'] + 11 + >>> Point(**d) # convert from a dictionary + Point(x=11, y=22) + >>> p._replace(x=100) # _replace() is like str.replace() but targets named fields + Point(x=100, y=22) + + """ + + # Validate the field names. At the user's option, either generate an error + # message or automatically replace the field name with a valid name. + if isinstance(field_names, str): + field_names = field_names.replace(',', ' ').split() + field_names = list(map(str, field_names)) + typename = _sys.intern(str(typename)) + + if rename: + seen = set() + for index, name in enumerate(field_names): + if (not name.isidentifier() + or _iskeyword(name) + or name.startswith('_') + or name in seen): + field_names[index] = f'_{index}' + seen.add(name) + + for name in [typename] + field_names: + if type(name) is not str: + raise TypeError('Type names and field names must be strings') + if not name.isidentifier(): + raise ValueError('Type names and field names must be valid ' + f'identifiers: {name!r}') + if _iskeyword(name): + raise ValueError('Type names and field names cannot be a ' + f'keyword: {name!r}') + + seen = set() + for name in field_names: + if name.startswith('_') and not rename: + raise ValueError('Field names cannot start with an underscore: ' + f'{name!r}') + if name in seen: + raise ValueError(f'Encountered duplicate field name: {name!r}') + seen.add(name) + + field_defaults = {} + if defaults is not None: + defaults = tuple(defaults) + if len(defaults) > len(field_names): + raise TypeError('Got more default values than field names') + field_defaults = dict(reversed(list(zip(reversed(field_names), + reversed(defaults))))) + + # Variables used in the methods and docstrings + field_names = tuple(map(_sys.intern, field_names)) + num_fields = len(field_names) + arg_list = ', '.join(field_names) + if num_fields == 1: + arg_list += ',' + repr_fmt = '(' + ', '.join(f'{name}=%r' for name in field_names) + ')' + tuple_new = tuple.__new__ + _dict, _tuple, _len, _map, _zip = dict, tuple, len, map, zip + + # Create all the named tuple methods to be added to the class namespace + + namespace = { + '_tuple_new': tuple_new, + '__builtins__': {}, + '__name__': f'namedtuple_{typename}', + } + code = f'lambda _cls, {arg_list}: _tuple_new(_cls, ({arg_list}))' + __new__ = eval(code, namespace) + __new__.__name__ = '__new__' + __new__.__doc__ = f'Create new instance of {typename}({arg_list})' + if defaults is not None: + __new__.__defaults__ = defaults + + @classmethod + def _make(cls, iterable): + result = tuple_new(cls, iterable) + if _len(result) != num_fields: + raise TypeError(f'Expected {num_fields} arguments, got {len(result)}') + return result + + _make.__func__.__doc__ = (f'Make a new {typename} object from a sequence ' + 'or iterable') + + def _replace(self, /, **kwds): + result = self._make(_map(kwds.pop, field_names, self)) + if kwds: + raise ValueError(f'Got unexpected field names: {list(kwds)!r}') + return result + + _replace.__doc__ = (f'Return a new {typename} object replacing specified ' + 'fields with new values') + + def __repr__(self): + 'Return a nicely formatted representation string' + return self.__class__.__name__ + repr_fmt % self + + def _asdict(self): + 'Return a new dict which maps field names to their values.' + return _dict(_zip(self._fields, self)) + + def __getnewargs__(self): + 'Return self as a plain tuple. Used by copy and pickle.' + return _tuple(self) + + # Modify function metadata to help with introspection and debugging + for method in ( + __new__, + _make.__func__, + _replace, + __repr__, + _asdict, + __getnewargs__, + ): + method.__qualname__ = f'{typename}.{method.__name__}' + + # Build-up the class namespace dictionary + # and use type() to build the result class + class_namespace = { + '__doc__': f'{typename}({arg_list})', + '__slots__': (), + '_fields': field_names, + '_field_defaults': field_defaults, + '__new__': __new__, + '_make': _make, + '_replace': _replace, + '__repr__': __repr__, + '_asdict': _asdict, + '__getnewargs__': __getnewargs__, + '__match_args__': field_names, + } + for index, name in enumerate(field_names): + doc = _sys.intern(f'Alias for field number {index}') + class_namespace[name] = _tuplegetter(index, doc) + + result = type(typename, (tuple,), class_namespace) + + # For pickling to work, the __module__ variable needs to be set to the frame + # where the named tuple is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython), or where the user has + # specified a particular module. + if module is None: + try: + module = _sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + if module is not None: + result.__module__ = module + + return result + + +######################################################################## +### Counter +######################################################################## + +def _count_elements(mapping, iterable): + 'Tally elements from the iterable.' + mapping_get = mapping.get + for elem in iterable: + mapping[elem] = mapping_get(elem, 0) + 1 + +try: # Load C helper function if available + from _collections import _count_elements +except ImportError: + pass + +class Counter(dict): + '''Dict subclass for counting hashable items. Sometimes called a bag + or multiset. Elements are stored as dictionary keys and their counts + are stored as dictionary values. + + >>> c = Counter('abcdeabcdabcaba') # count elements from a string + + >>> c.most_common(3) # three most common elements + [('a', 5), ('b', 4), ('c', 3)] + >>> sorted(c) # list all unique elements + ['a', 'b', 'c', 'd', 'e'] + >>> ''.join(sorted(c.elements())) # list elements with repetitions + 'aaaaabbbbcccdde' + >>> sum(c.values()) # total of all counts + 15 + + >>> c['a'] # count of letter 'a' + 5 + >>> for elem in 'shazam': # update counts from an iterable + ... c[elem] += 1 # by adding 1 to each element's count + >>> c['a'] # now there are seven 'a' + 7 + >>> del c['b'] # remove all 'b' + >>> c['b'] # now there are zero 'b' + 0 + + >>> d = Counter('simsalabim') # make another counter + >>> c.update(d) # add in the second counter + >>> c['a'] # now there are nine 'a' + 9 + + >>> c.clear() # empty the counter + >>> c + Counter() + + Note: If a count is set to zero or reduced to zero, it will remain + in the counter until the entry is deleted or the counter is cleared: + + >>> c = Counter('aaabbc') + >>> c['b'] -= 2 # reduce the count of 'b' by two + >>> c.most_common() # 'b' is still in, but its count is zero + [('a', 3), ('c', 1), ('b', 0)] + + ''' + # References: + # http://en.wikipedia.org/wiki/Multiset + # http://www.gnu.org/software/smalltalk/manual-base/html_node/Bag.html + # http://www.demo2s.com/Tutorial/Cpp/0380__set-multiset/Catalog0380__set-multiset.htm + # http://code.activestate.com/recipes/259174/ + # Knuth, TAOCP Vol. II section 4.6.3 + + def __init__(self, iterable=None, /, **kwds): + '''Create a new, empty Counter object. And if given, count elements + from an input iterable. Or, initialize the count from another mapping + of elements to their counts. + + >>> c = Counter() # a new, empty counter + >>> c = Counter('gallahad') # a new counter from an iterable + >>> c = Counter({'a': 4, 'b': 2}) # a new counter from a mapping + >>> c = Counter(a=4, b=2) # a new counter from keyword args + + ''' + super().__init__() + self.update(iterable, **kwds) + + def __missing__(self, key): + 'The count of elements not in the Counter is zero.' + # Needed so that self[missing_item] does not raise KeyError + return 0 + + def total(self): + 'Sum of the counts' + return sum(self.values()) + + def most_common(self, n=None): + '''List the n most common elements and their counts from the most + common to the least. If n is None, then list all element counts. + + >>> Counter('abracadabra').most_common(3) + [('a', 5), ('b', 2), ('r', 2)] + + ''' + # Emulate Bag.sortedByCount from Smalltalk + if n is None: + return sorted(self.items(), key=_itemgetter(1), reverse=True) + + # Lazy import to speedup Python startup time + import heapq + return heapq.nlargest(n, self.items(), key=_itemgetter(1)) + + def elements(self): + '''Iterator over elements repeating each as many times as its count. + + >>> c = Counter('ABCABC') + >>> sorted(c.elements()) + ['A', 'A', 'B', 'B', 'C', 'C'] + + # Knuth's example for prime factors of 1836: 2**2 * 3**3 * 17**1 + >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) + >>> product = 1 + >>> for factor in prime_factors.elements(): # loop over factors + ... product *= factor # and multiply them + >>> product + 1836 + + Note, if an element's count has been set to zero or is a negative + number, elements() will ignore it. + + ''' + # Emulate Bag.do from Smalltalk and Multiset.begin from C++. + return _chain.from_iterable(_starmap(_repeat, self.items())) + + # Override dict methods where necessary + + @classmethod + def fromkeys(cls, iterable, v=None): + # There is no equivalent method for counters because the semantics + # would be ambiguous in cases such as Counter.fromkeys('aaabbc', v=2). + # Initializing counters to zero values isn't necessary because zero + # is already the default value for counter lookups. Initializing + # to one is easily accomplished with Counter(set(iterable)). For + # more exotic cases, create a dictionary first using a dictionary + # comprehension or dict.fromkeys(). + raise NotImplementedError( + 'Counter.fromkeys() is undefined. Use Counter(iterable) instead.') + + def update(self, iterable=None, /, **kwds): + '''Like dict.update() but add counts instead of replacing them. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.update('witch') # add elements from another iterable + >>> d = Counter('watch') + >>> c.update(d) # add elements from another counter + >>> c['h'] # four 'h' in which, witch, and watch + 4 + + ''' + # The regular dict.update() operation makes no sense here because the + # replace behavior results in the some of original untouched counts + # being mixed-in with all of the other counts for a mismash that + # doesn't have a straight-forward interpretation in most counting + # contexts. Instead, we implement straight-addition. Both the inputs + # and outputs are allowed to contain zero and negative counts. + + if iterable is not None: + if isinstance(iterable, _collections_abc.Mapping): + if self: + self_get = self.get + for elem, count in iterable.items(): + self[elem] = count + self_get(elem, 0) + else: + # fast path when counter is empty + super().update(iterable) + else: + _count_elements(self, iterable) + if kwds: + self.update(kwds) + + def subtract(self, iterable=None, /, **kwds): + '''Like dict.update() but subtracts counts instead of replacing them. + Counts can be reduced below zero. Both the inputs and outputs are + allowed to contain zero and negative counts. + + Source can be an iterable, a dictionary, or another Counter instance. + + >>> c = Counter('which') + >>> c.subtract('witch') # subtract elements from another iterable + >>> c.subtract(Counter('watch')) # subtract elements from another counter + >>> c['h'] # 2 in which, minus 1 in witch, minus 1 in watch + 0 + >>> c['w'] # 1 in which, minus 1 in witch, minus 1 in watch + -1 + + ''' + if iterable is not None: + self_get = self.get + if isinstance(iterable, _collections_abc.Mapping): + for elem, count in iterable.items(): + self[elem] = self_get(elem, 0) - count + else: + for elem in iterable: + self[elem] = self_get(elem, 0) - 1 + if kwds: + self.subtract(kwds) + + def copy(self): + 'Return a shallow copy.' + return self.__class__(self) + + def __reduce__(self): + return self.__class__, (dict(self),) + + def __delitem__(self, elem): + 'Like dict.__delitem__() but does not raise KeyError for missing values.' + if elem in self: + super().__delitem__(elem) + + def __eq__(self, other): + 'True if all counts agree. Missing counts are treated as zero.' + if not isinstance(other, Counter): + return NotImplemented + return all(self[e] == other[e] for c in (self, other) for e in c) + + def __ne__(self, other): + 'True if any counts disagree. Missing counts are treated as zero.' + if not isinstance(other, Counter): + return NotImplemented + return not self == other + + def __le__(self, other): + 'True if all counts in self are a subset of those in other.' + if not isinstance(other, Counter): + return NotImplemented + return all(self[e] <= other[e] for c in (self, other) for e in c) + + def __lt__(self, other): + 'True if all counts in self are a proper subset of those in other.' + if not isinstance(other, Counter): + return NotImplemented + return self <= other and self != other + + def __ge__(self, other): + 'True if all counts in self are a superset of those in other.' + if not isinstance(other, Counter): + return NotImplemented + return all(self[e] >= other[e] for c in (self, other) for e in c) + + def __gt__(self, other): + 'True if all counts in self are a proper superset of those in other.' + if not isinstance(other, Counter): + return NotImplemented + return self >= other and self != other + + def __repr__(self): + if not self: + return f'{self.__class__.__name__}()' + try: + # dict() preserves the ordering returned by most_common() + d = dict(self.most_common()) + except TypeError: + # handle case where values are not orderable + d = dict(self) + return f'{self.__class__.__name__}({d!r})' + + # Multiset-style mathematical operations discussed in: + # Knuth TAOCP Volume II section 4.6.3 exercise 19 + # and at http://en.wikipedia.org/wiki/Multiset + # + # Outputs guaranteed to only include positive counts. + # + # To strip negative and zero counts, add-in an empty counter: + # c += Counter() + # + # Results are ordered according to when an element is first + # encountered in the left operand and then by the order + # encountered in the right operand. + # + # When the multiplicities are all zero or one, multiset operations + # are guaranteed to be equivalent to the corresponding operations + # for regular sets. + # Given counter multisets such as: + # cp = Counter(a=1, b=0, c=1) + # cq = Counter(c=1, d=0, e=1) + # The corresponding regular sets would be: + # sp = {'a', 'c'} + # sq = {'c', 'e'} + # All of the following relations would hold: + # set(cp + cq) == sp | sq + # set(cp - cq) == sp - sq + # set(cp | cq) == sp | sq + # set(cp & cq) == sp & sq + # (cp == cq) == (sp == sq) + # (cp != cq) == (sp != sq) + # (cp <= cq) == (sp <= sq) + # (cp < cq) == (sp < sq) + # (cp >= cq) == (sp >= sq) + # (cp > cq) == (sp > sq) + + def __add__(self, other): + '''Add counts from two counters. + + >>> Counter('abbb') + Counter('bcc') + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count + other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __sub__(self, other): + ''' Subtract count, but keep only results with positive counts. + + >>> Counter('abbbc') - Counter('bccd') + Counter({'b': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + newcount = count - other[elem] + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count < 0: + result[elem] = 0 - count + return result + + def __or__(self, other): + '''Union is the maximum of value in either of the input counters. + + >>> Counter('abbb') | Counter('bcc') + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = other_count if count < other_count else count + if newcount > 0: + result[elem] = newcount + for elem, count in other.items(): + if elem not in self and count > 0: + result[elem] = count + return result + + def __and__(self, other): + ''' Intersection is the minimum of corresponding counts. + + >>> Counter('abbb') & Counter('bcc') + Counter({'b': 1}) + + ''' + if not isinstance(other, Counter): + return NotImplemented + result = Counter() + for elem, count in self.items(): + other_count = other[elem] + newcount = count if count < other_count else other_count + if newcount > 0: + result[elem] = newcount + return result + + def __pos__(self): + 'Adds an empty counter, effectively stripping negative and zero counts' + result = Counter() + for elem, count in self.items(): + if count > 0: + result[elem] = count + return result + + def __neg__(self): + '''Subtracts from an empty counter. Strips positive and zero counts, + and flips the sign on negative counts. + + ''' + result = Counter() + for elem, count in self.items(): + if count < 0: + result[elem] = 0 - count + return result + + def _keep_positive(self): + '''Internal method to strip elements with a negative or zero count''' + nonpositive = [elem for elem, count in self.items() if not count > 0] + for elem in nonpositive: + del self[elem] + return self + + def __iadd__(self, other): + '''Inplace add from another counter, keeping only positive counts. + + >>> c = Counter('abbb') + >>> c += Counter('bcc') + >>> c + Counter({'b': 4, 'c': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] += count + return self._keep_positive() + + def __isub__(self, other): + '''Inplace subtract counter, but keep only results with positive counts. + + >>> c = Counter('abbbc') + >>> c -= Counter('bccd') + >>> c + Counter({'b': 2, 'a': 1}) + + ''' + for elem, count in other.items(): + self[elem] -= count + return self._keep_positive() + + def __ior__(self, other): + '''Inplace union is the maximum of value from either counter. + + >>> c = Counter('abbb') + >>> c |= Counter('bcc') + >>> c + Counter({'b': 3, 'c': 2, 'a': 1}) + + ''' + for elem, other_count in other.items(): + count = self[elem] + if other_count > count: + self[elem] = other_count + return self._keep_positive() + + def __iand__(self, other): + '''Inplace intersection is the minimum of corresponding counts. + + >>> c = Counter('abbb') + >>> c &= Counter('bcc') + >>> c + Counter({'b': 1}) + + ''' + for elem, count in self.items(): + other_count = other[elem] + if other_count < count: + self[elem] = other_count + return self._keep_positive() + + +######################################################################## +### ChainMap +######################################################################## + +class ChainMap(_collections_abc.MutableMapping): + ''' A ChainMap groups multiple dicts (or other mappings) together + to create a single, updateable view. + + The underlying mappings are stored in a list. That list is public and can + be accessed or updated using the *maps* attribute. There is no other + state. + + Lookups search the underlying mappings successively until a key is found. + In contrast, writes, updates, and deletions only operate on the first + mapping. + + ''' + + def __init__(self, *maps): + '''Initialize a ChainMap by setting *maps* to the given mappings. + If no mappings are provided, a single empty dictionary is used. + + ''' + self.maps = list(maps) or [{}] # always at least one map + + def __missing__(self, key): + raise KeyError(key) + + def __getitem__(self, key): + for mapping in self.maps: + try: + return mapping[key] # can't use 'key in mapping' with defaultdict + except KeyError: + pass + return self.__missing__(key) # support subclasses that define __missing__ + + def get(self, key, default=None): + return self[key] if key in self else default + + def __len__(self): + return len(set().union(*self.maps)) # reuses stored hash values if possible + + def __iter__(self): + d = {} + for mapping in reversed(self.maps): + d.update(dict.fromkeys(mapping)) # reuses stored hash values if possible + return iter(d) + + def __contains__(self, key): + return any(key in m for m in self.maps) + + def __bool__(self): + return any(self.maps) + + @_recursive_repr() + def __repr__(self): + return f'{self.__class__.__name__}({", ".join(map(repr, self.maps))})' + + @classmethod + def fromkeys(cls, iterable, *args): + 'Create a ChainMap with a single dict created from the iterable.' + return cls(dict.fromkeys(iterable, *args)) + + def copy(self): + 'New ChainMap or subclass with a new copy of maps[0] and refs to maps[1:]' + return self.__class__(self.maps[0].copy(), *self.maps[1:]) + + __copy__ = copy + + def new_child(self, m=None, **kwargs): # like Django's Context.push() + '''New ChainMap with a new map followed by all previous maps. + If no map is provided, an empty dict is used. + Keyword arguments update the map or new empty dict. + ''' + if m is None: + m = kwargs + elif kwargs: + m.update(kwargs) + return self.__class__(m, *self.maps) + + @property + def parents(self): # like Django's Context.pop() + 'New ChainMap from maps[1:].' + return self.__class__(*self.maps[1:]) + + def __setitem__(self, key, value): + self.maps[0][key] = value + + def __delitem__(self, key): + try: + del self.maps[0][key] + except KeyError: + raise KeyError(f'Key not found in the first mapping: {key!r}') + + def popitem(self): + 'Remove and return an item pair from maps[0]. Raise KeyError is maps[0] is empty.' + try: + return self.maps[0].popitem() + except KeyError: + raise KeyError('No keys found in the first mapping.') + + def pop(self, key, *args): + 'Remove *key* from maps[0] and return its value. Raise KeyError if *key* not in maps[0].' + try: + return self.maps[0].pop(key, *args) + except KeyError: + raise KeyError(f'Key not found in the first mapping: {key!r}') + + def clear(self): + 'Clear maps[0], leaving maps[1:] intact.' + self.maps[0].clear() + + def __ior__(self, other): + self.maps[0].update(other) + return self + + def __or__(self, other): + if not isinstance(other, _collections_abc.Mapping): + return NotImplemented + m = self.copy() + m.maps[0].update(other) + return m + + def __ror__(self, other): + if not isinstance(other, _collections_abc.Mapping): + return NotImplemented + m = dict(other) + for child in reversed(self.maps): + m.update(child) + return self.__class__(m) + + +################################################################################ +### UserDict +################################################################################ + +class UserDict(_collections_abc.MutableMapping): + + # Start by filling-out the abstract methods + def __init__(self, dict=None, /, **kwargs): + self.data = {} + if dict is not None: + self.update(dict) + if kwargs: + self.update(kwargs) + + def __len__(self): + return len(self.data) + + def __getitem__(self, key): + if key in self.data: + return self.data[key] + if hasattr(self.__class__, "__missing__"): + return self.__class__.__missing__(self, key) + raise KeyError(key) + + def __setitem__(self, key, item): + self.data[key] = item + + def __delitem__(self, key): + del self.data[key] + + def __iter__(self): + return iter(self.data) + + # Modify __contains__ to work correctly when __missing__ is present + def __contains__(self, key): + return key in self.data + + # Now, add the methods in dicts but not in MutableMapping + def __repr__(self): + return repr(self.data) + + def __or__(self, other): + if isinstance(other, UserDict): + return self.__class__(self.data | other.data) + if isinstance(other, dict): + return self.__class__(self.data | other) + return NotImplemented + + def __ror__(self, other): + if isinstance(other, UserDict): + return self.__class__(other.data | self.data) + if isinstance(other, dict): + return self.__class__(other | self.data) + return NotImplemented + + def __ior__(self, other): + if isinstance(other, UserDict): + self.data |= other.data + else: + self.data |= other + return self + + def __copy__(self): + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"].copy() + return inst + + def copy(self): + if self.__class__ is UserDict: + return UserDict(self.data.copy()) + import copy + data = self.data + try: + self.data = {} + c = copy.copy(self) + finally: + self.data = data + c.update(self) + return c + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + +################################################################################ +### UserList +################################################################################ + +class UserList(_collections_abc.MutableSequence): + """A more or less complete user-defined wrapper around list objects.""" + + def __init__(self, initlist=None): + self.data = [] + if initlist is not None: + # XXX should this accept an arbitrary sequence? + if type(initlist) == type(self.data): + self.data[:] = initlist + elif isinstance(initlist, UserList): + self.data[:] = initlist.data[:] + else: + self.data = list(initlist) + + def __repr__(self): + return repr(self.data) + + def __lt__(self, other): + return self.data < self.__cast(other) + + def __le__(self, other): + return self.data <= self.__cast(other) + + def __eq__(self, other): + return self.data == self.__cast(other) + + def __gt__(self, other): + return self.data > self.__cast(other) + + def __ge__(self, other): + return self.data >= self.__cast(other) + + def __cast(self, other): + return other.data if isinstance(other, UserList) else other + + def __contains__(self, item): + return item in self.data + + def __len__(self): + return len(self.data) + + def __getitem__(self, i): + if isinstance(i, slice): + return self.__class__(self.data[i]) + else: + return self.data[i] + + def __setitem__(self, i, item): + self.data[i] = item + + def __delitem__(self, i): + del self.data[i] + + def __add__(self, other): + if isinstance(other, UserList): + return self.__class__(self.data + other.data) + elif isinstance(other, type(self.data)): + return self.__class__(self.data + other) + return self.__class__(self.data + list(other)) + + def __radd__(self, other): + if isinstance(other, UserList): + return self.__class__(other.data + self.data) + elif isinstance(other, type(self.data)): + return self.__class__(other + self.data) + return self.__class__(list(other) + self.data) + + def __iadd__(self, other): + if isinstance(other, UserList): + self.data += other.data + elif isinstance(other, type(self.data)): + self.data += other + else: + self.data += list(other) + return self + + def __mul__(self, n): + return self.__class__(self.data * n) + + __rmul__ = __mul__ + + def __imul__(self, n): + self.data *= n + return self + + def __copy__(self): + inst = self.__class__.__new__(self.__class__) + inst.__dict__.update(self.__dict__) + # Create a copy and avoid triggering descriptors + inst.__dict__["data"] = self.__dict__["data"][:] + return inst + + def append(self, item): + self.data.append(item) + + def insert(self, i, item): + self.data.insert(i, item) + + def pop(self, i=-1): + return self.data.pop(i) + + def remove(self, item): + self.data.remove(item) + + def clear(self): + self.data.clear() + + def copy(self): + return self.__class__(self) + + def count(self, item): + return self.data.count(item) + + def index(self, item, *args): + return self.data.index(item, *args) + + def reverse(self): + self.data.reverse() + + def sort(self, /, *args, **kwds): + self.data.sort(*args, **kwds) + + def extend(self, other): + if isinstance(other, UserList): + self.data.extend(other.data) + else: + self.data.extend(other) + + +################################################################################ +### UserString +################################################################################ + +class UserString(_collections_abc.Sequence): + + def __init__(self, seq): + if isinstance(seq, str): + self.data = seq + elif isinstance(seq, UserString): + self.data = seq.data[:] + else: + self.data = str(seq) + + def __str__(self): + return str(self.data) + + def __repr__(self): + return repr(self.data) + + def __int__(self): + return int(self.data) + + def __float__(self): + return float(self.data) + + def __complex__(self): + return complex(self.data) + + def __hash__(self): + return hash(self.data) + + def __getnewargs__(self): + return (self.data[:],) + + def __eq__(self, string): + if isinstance(string, UserString): + return self.data == string.data + return self.data == string + + def __lt__(self, string): + if isinstance(string, UserString): + return self.data < string.data + return self.data < string + + def __le__(self, string): + if isinstance(string, UserString): + return self.data <= string.data + return self.data <= string + + def __gt__(self, string): + if isinstance(string, UserString): + return self.data > string.data + return self.data > string + + def __ge__(self, string): + if isinstance(string, UserString): + return self.data >= string.data + return self.data >= string + + def __contains__(self, char): + if isinstance(char, UserString): + char = char.data + return char in self.data + + def __len__(self): + return len(self.data) + + def __getitem__(self, index): + return self.__class__(self.data[index]) + + def __add__(self, other): + if isinstance(other, UserString): + return self.__class__(self.data + other.data) + elif isinstance(other, str): + return self.__class__(self.data + other) + return self.__class__(self.data + str(other)) + + def __radd__(self, other): + if isinstance(other, str): + return self.__class__(other + self.data) + return self.__class__(str(other) + self.data) + + def __mul__(self, n): + return self.__class__(self.data * n) + + __rmul__ = __mul__ + + def __mod__(self, args): + return self.__class__(self.data % args) + + def __rmod__(self, template): + return self.__class__(str(template) % self) + + # the following methods are defined in alphabetical order: + def capitalize(self): + return self.__class__(self.data.capitalize()) + + def casefold(self): + return self.__class__(self.data.casefold()) + + def center(self, width, *args): + return self.__class__(self.data.center(width, *args)) + + def count(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.count(sub, start, end) + + def removeprefix(self, prefix, /): + if isinstance(prefix, UserString): + prefix = prefix.data + return self.__class__(self.data.removeprefix(prefix)) + + def removesuffix(self, suffix, /): + if isinstance(suffix, UserString): + suffix = suffix.data + return self.__class__(self.data.removesuffix(suffix)) + + def encode(self, encoding='utf-8', errors='strict'): + encoding = 'utf-8' if encoding is None else encoding + errors = 'strict' if errors is None else errors + return self.data.encode(encoding, errors) + + def endswith(self, suffix, start=0, end=_sys.maxsize): + return self.data.endswith(suffix, start, end) + + def expandtabs(self, tabsize=8): + return self.__class__(self.data.expandtabs(tabsize)) + + def find(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.find(sub, start, end) + + def format(self, /, *args, **kwds): + return self.data.format(*args, **kwds) + + def format_map(self, mapping): + return self.data.format_map(mapping) + + def index(self, sub, start=0, end=_sys.maxsize): + return self.data.index(sub, start, end) + + def isalpha(self): + return self.data.isalpha() + + def isalnum(self): + return self.data.isalnum() + + def isascii(self): + return self.data.isascii() + + def isdecimal(self): + return self.data.isdecimal() + + def isdigit(self): + return self.data.isdigit() + + def isidentifier(self): + return self.data.isidentifier() + + def islower(self): + return self.data.islower() + + def isnumeric(self): + return self.data.isnumeric() + + def isprintable(self): + return self.data.isprintable() + + def isspace(self): + return self.data.isspace() + + def istitle(self): + return self.data.istitle() + + def isupper(self): + return self.data.isupper() + + def join(self, seq): + return self.data.join(seq) + + def ljust(self, width, *args): + return self.__class__(self.data.ljust(width, *args)) + + def lower(self): + return self.__class__(self.data.lower()) + + def lstrip(self, chars=None): + return self.__class__(self.data.lstrip(chars)) + + maketrans = str.maketrans + + def partition(self, sep): + return self.data.partition(sep) + + def replace(self, old, new, maxsplit=-1): + if isinstance(old, UserString): + old = old.data + if isinstance(new, UserString): + new = new.data + return self.__class__(self.data.replace(old, new, maxsplit)) + + def rfind(self, sub, start=0, end=_sys.maxsize): + if isinstance(sub, UserString): + sub = sub.data + return self.data.rfind(sub, start, end) + + def rindex(self, sub, start=0, end=_sys.maxsize): + return self.data.rindex(sub, start, end) + + def rjust(self, width, *args): + return self.__class__(self.data.rjust(width, *args)) + + def rpartition(self, sep): + return self.data.rpartition(sep) + + def rstrip(self, chars=None): + return self.__class__(self.data.rstrip(chars)) + + def split(self, sep=None, maxsplit=-1): + return self.data.split(sep, maxsplit) + + def rsplit(self, sep=None, maxsplit=-1): + return self.data.rsplit(sep, maxsplit) + + def splitlines(self, keepends=False): + return self.data.splitlines(keepends) + + def startswith(self, prefix, start=0, end=_sys.maxsize): + return self.data.startswith(prefix, start, end) + + def strip(self, chars=None): + return self.__class__(self.data.strip(chars)) + + def swapcase(self): + return self.__class__(self.data.swapcase()) + + def title(self): + return self.__class__(self.data.title()) + + def translate(self, *args): + return self.__class__(self.data.translate(*args)) + + def upper(self): + return self.__class__(self.data.upper()) + + def zfill(self, width): + return self.__class__(self.data.zfill(width)) diff --git a/python310/collections/__pycache__/__init__.cpython-310.pyc b/python310/collections/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12f3155d20d5470be44e2ac35105c0441a7d2eff Binary files /dev/null and b/python310/collections/__pycache__/__init__.cpython-310.pyc differ diff --git a/python310/collections/__pycache__/abc.cpython-310.pyc b/python310/collections/__pycache__/abc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3cbbfdc0c2a5d66f8cc4f637dbb7571efcfe01d Binary files /dev/null and b/python310/collections/__pycache__/abc.cpython-310.pyc differ diff --git a/python310/collections/abc.py b/python310/collections/abc.py new file mode 100644 index 0000000000000000000000000000000000000000..86ca8b8a8414b3860aa80599394e806e156f4b5b --- /dev/null +++ b/python310/collections/abc.py @@ -0,0 +1,3 @@ +from _collections_abc import * +from _collections_abc import __all__ +from _collections_abc import _CallableGenericAlias diff --git a/python310/colorsys.py b/python310/colorsys.py new file mode 100644 index 0000000000000000000000000000000000000000..0f52512a67d87c571835467b411ec8ec4e691230 --- /dev/null +++ b/python310/colorsys.py @@ -0,0 +1,165 @@ +"""Conversion functions between RGB and other color systems. + +This modules provides two functions for each color system ABC: + + rgb_to_abc(r, g, b) --> a, b, c + abc_to_rgb(a, b, c) --> r, g, b + +All inputs and outputs are triples of floats in the range [0.0...1.0] +(with the exception of I and Q, which covers a slightly larger range). +Inputs outside the valid range may cause exceptions or invalid outputs. + +Supported color systems: +RGB: Red, Green, Blue components +YIQ: Luminance, Chrominance (used by composite video signals) +HLS: Hue, Luminance, Saturation +HSV: Hue, Saturation, Value +""" + +# References: +# http://en.wikipedia.org/wiki/YIQ +# http://en.wikipedia.org/wiki/HLS_color_space +# http://en.wikipedia.org/wiki/HSV_color_space + +__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb", + "rgb_to_hsv","hsv_to_rgb"] + +# Some floating point constants + +ONE_THIRD = 1.0/3.0 +ONE_SIXTH = 1.0/6.0 +TWO_THIRD = 2.0/3.0 + +# YIQ: used by composite video signals (linear combinations of RGB) +# Y: perceived grey level (0.0 == black, 1.0 == white) +# I, Q: color components +# +# There are a great many versions of the constants used in these formulae. +# The ones in this library uses constants from the FCC version of NTSC. + +def rgb_to_yiq(r, g, b): + y = 0.30*r + 0.59*g + 0.11*b + i = 0.74*(r-y) - 0.27*(b-y) + q = 0.48*(r-y) + 0.41*(b-y) + return (y, i, q) + +def yiq_to_rgb(y, i, q): + # r = y + (0.27*q + 0.41*i) / (0.74*0.41 + 0.27*0.48) + # b = y + (0.74*q - 0.48*i) / (0.74*0.41 + 0.27*0.48) + # g = y - (0.30*(r-y) + 0.11*(b-y)) / 0.59 + + r = y + 0.9468822170900693*i + 0.6235565819861433*q + g = y - 0.27478764629897834*i - 0.6356910791873801*q + b = y - 1.1085450346420322*i + 1.7090069284064666*q + + if r < 0.0: + r = 0.0 + if g < 0.0: + g = 0.0 + if b < 0.0: + b = 0.0 + if r > 1.0: + r = 1.0 + if g > 1.0: + g = 1.0 + if b > 1.0: + b = 1.0 + return (r, g, b) + + +# HLS: Hue, Luminance, Saturation +# H: position in the spectrum +# L: color lightness +# S: color saturation + +def rgb_to_hls(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + sumc = (maxc+minc) + rangec = (maxc-minc) + l = sumc/2.0 + if minc == maxc: + return 0.0, l, 0.0 + if l <= 0.5: + s = rangec / sumc + else: + s = rangec / (2.0-sumc) + rc = (maxc-r) / rangec + gc = (maxc-g) / rangec + bc = (maxc-b) / rangec + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, l, s + +def hls_to_rgb(h, l, s): + if s == 0.0: + return l, l, l + if l <= 0.5: + m2 = l * (1.0+s) + else: + m2 = l+s-(l*s) + m1 = 2.0*l - m2 + return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD)) + +def _v(m1, m2, hue): + hue = hue % 1.0 + if hue < ONE_SIXTH: + return m1 + (m2-m1)*hue*6.0 + if hue < 0.5: + return m2 + if hue < TWO_THIRD: + return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0 + return m1 + + +# HSV: Hue, Saturation, Value +# H: position in the spectrum +# S: color saturation ("purity") +# V: color brightness + +def rgb_to_hsv(r, g, b): + maxc = max(r, g, b) + minc = min(r, g, b) + v = maxc + if minc == maxc: + return 0.0, 0.0, v + s = (maxc-minc) / maxc + rc = (maxc-r) / (maxc-minc) + gc = (maxc-g) / (maxc-minc) + bc = (maxc-b) / (maxc-minc) + if r == maxc: + h = bc-gc + elif g == maxc: + h = 2.0+rc-bc + else: + h = 4.0+gc-rc + h = (h/6.0) % 1.0 + return h, s, v + +def hsv_to_rgb(h, s, v): + if s == 0.0: + return v, v, v + i = int(h*6.0) # XXX assume int() truncates! + f = (h*6.0) - i + p = v*(1.0 - s) + q = v*(1.0 - s*f) + t = v*(1.0 - s*(1.0-f)) + i = i%6 + if i == 0: + return v, t, p + if i == 1: + return q, v, p + if i == 2: + return p, v, t + if i == 3: + return p, q, v + if i == 4: + return t, p, v + if i == 5: + return v, p, q + # Cannot get here diff --git a/python310/compileall.py b/python310/compileall.py new file mode 100644 index 0000000000000000000000000000000000000000..50183ea85468aa74c367576a6666c490e4d4de2f --- /dev/null +++ b/python310/compileall.py @@ -0,0 +1,463 @@ +"""Module/script to byte-compile all .py files to .pyc files. + +When called as a script with arguments, this compiles the directories +given as arguments recursively; the -l option prevents it from +recursing into directories. + +Without arguments, if compiles all modules on sys.path, without +recursing into subdirectories. (Even though it should do so for +packages -- for now, you'll have to deal with packages separately.) + +See module py_compile for details of the actual byte-compilation. +""" +import os +import sys +import importlib.util +import py_compile +import struct +import filecmp + +from functools import partial +from pathlib import Path + +__all__ = ["compile_dir","compile_file","compile_path"] + +def _walk_dir(dir, maxlevels, quiet=0): + if quiet < 2 and isinstance(dir, os.PathLike): + dir = os.fspath(dir) + if not quiet: + print('Listing {!r}...'.format(dir)) + try: + names = os.listdir(dir) + except OSError: + if quiet < 2: + print("Can't list {!r}".format(dir)) + names = [] + names.sort() + for name in names: + if name == '__pycache__': + continue + fullname = os.path.join(dir, name) + if not os.path.isdir(fullname): + yield fullname + elif (maxlevels > 0 and name != os.curdir and name != os.pardir and + os.path.isdir(fullname) and not os.path.islink(fullname)): + yield from _walk_dir(fullname, maxlevels=maxlevels - 1, + quiet=quiet) + +def compile_dir(dir, maxlevels=None, ddir=None, force=False, + rx=None, quiet=0, legacy=False, optimize=-1, workers=1, + invalidation_mode=None, *, stripdir=None, + prependdir=None, limit_sl_dest=None, hardlink_dupes=False): + """Byte-compile all modules in the given directory tree. + + Arguments (only dir is required): + + dir: the directory to byte-compile + maxlevels: maximum recursion level (default `sys.getrecursionlimit()`) + ddir: the directory that will be prepended to the path to the + file as it is compiled into each byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: int or list of optimization levels or -1 for level of + the interpreter. Multiple levels leads to multiple compiled + files each with one optimization level. + workers: maximum number of parallel workers + invalidation_mode: how the up-to-dateness of the pyc will be checked + stripdir: part of path to left-strip from source file path + prependdir: path to prepend to beginning of original file path, applied + after stripdir + limit_sl_dest: ignore symlinks if they are pointing outside of + the defined path + hardlink_dupes: hardlink duplicated pyc files + """ + ProcessPoolExecutor = None + if ddir is not None and (stripdir is not None or prependdir is not None): + raise ValueError(("Destination dir (ddir) cannot be used " + "in combination with stripdir or prependdir")) + if ddir is not None: + stripdir = dir + prependdir = ddir + ddir = None + if workers < 0: + raise ValueError('workers must be greater or equal to 0') + if workers != 1: + # Check if this is a system where ProcessPoolExecutor can function. + from concurrent.futures.process import _check_system_limits + try: + _check_system_limits() + except NotImplementedError: + workers = 1 + else: + from concurrent.futures import ProcessPoolExecutor + if maxlevels is None: + maxlevels = sys.getrecursionlimit() + files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels) + success = True + if workers != 1 and ProcessPoolExecutor is not None: + # If workers == 0, let ProcessPoolExecutor choose + workers = workers or None + with ProcessPoolExecutor(max_workers=workers) as executor: + results = executor.map(partial(compile_file, + ddir=ddir, force=force, + rx=rx, quiet=quiet, + legacy=legacy, + optimize=optimize, + invalidation_mode=invalidation_mode, + stripdir=stripdir, + prependdir=prependdir, + limit_sl_dest=limit_sl_dest, + hardlink_dupes=hardlink_dupes), + files) + success = min(results, default=True) + else: + for file in files: + if not compile_file(file, ddir, force, rx, quiet, + legacy, optimize, invalidation_mode, + stripdir=stripdir, prependdir=prependdir, + limit_sl_dest=limit_sl_dest, + hardlink_dupes=hardlink_dupes): + success = False + return success + +def compile_file(fullname, ddir=None, force=False, rx=None, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None, *, stripdir=None, prependdir=None, + limit_sl_dest=None, hardlink_dupes=False): + """Byte-compile one file. + + Arguments (only fullname is required): + + fullname: the file to byte-compile + ddir: if given, the directory name compiled in to the + byte-code file. + force: if True, force compilation, even if timestamps are up-to-date + quiet: full output with False or 0, errors only with 1, + no output with 2 + legacy: if True, produce legacy pyc paths instead of PEP 3147 paths + optimize: int or list of optimization levels or -1 for level of + the interpreter. Multiple levels leads to multiple compiled + files each with one optimization level. + invalidation_mode: how the up-to-dateness of the pyc will be checked + stripdir: part of path to left-strip from source file path + prependdir: path to prepend to beginning of original file path, applied + after stripdir + limit_sl_dest: ignore symlinks if they are pointing outside of + the defined path. + hardlink_dupes: hardlink duplicated pyc files + """ + + if ddir is not None and (stripdir is not None or prependdir is not None): + raise ValueError(("Destination dir (ddir) cannot be used " + "in combination with stripdir or prependdir")) + + success = True + fullname = os.fspath(fullname) + stripdir = os.fspath(stripdir) if stripdir is not None else None + name = os.path.basename(fullname) + + dfile = None + + if ddir is not None: + dfile = os.path.join(ddir, name) + + if stripdir is not None: + fullname_parts = fullname.split(os.path.sep) + stripdir_parts = stripdir.split(os.path.sep) + ddir_parts = list(fullname_parts) + + for spart, opart in zip(stripdir_parts, fullname_parts): + if spart == opart: + ddir_parts.remove(spart) + + dfile = os.path.join(*ddir_parts) + + if prependdir is not None: + if dfile is None: + dfile = os.path.join(prependdir, fullname) + else: + dfile = os.path.join(prependdir, dfile) + + if isinstance(optimize, int): + optimize = [optimize] + + # Use set() to remove duplicates. + # Use sorted() to create pyc files in a deterministic order. + optimize = sorted(set(optimize)) + + if hardlink_dupes and len(optimize) < 2: + raise ValueError("Hardlinking of duplicated bytecode makes sense " + "only for more than one optimization level") + + if rx is not None: + mo = rx.search(fullname) + if mo: + return success + + if limit_sl_dest is not None and os.path.islink(fullname): + if Path(limit_sl_dest).resolve() not in Path(fullname).resolve().parents: + return success + + opt_cfiles = {} + + if os.path.isfile(fullname): + for opt_level in optimize: + if legacy: + opt_cfiles[opt_level] = fullname + 'c' + else: + if opt_level >= 0: + opt = opt_level if opt_level >= 1 else '' + cfile = (importlib.util.cache_from_source( + fullname, optimization=opt)) + opt_cfiles[opt_level] = cfile + else: + cfile = importlib.util.cache_from_source(fullname) + opt_cfiles[opt_level] = cfile + + head, tail = name[:-3], name[-3:] + if tail == '.py': + if not force: + try: + mtime = int(os.stat(fullname).st_mtime) + expect = struct.pack('<4sLL', importlib.util.MAGIC_NUMBER, + 0, mtime & 0xFFFF_FFFF) + for cfile in opt_cfiles.values(): + with open(cfile, 'rb') as chandle: + actual = chandle.read(12) + if expect != actual: + break + else: + return success + except OSError: + pass + if not quiet: + print('Compiling {!r}...'.format(fullname)) + try: + for index, opt_level in enumerate(optimize): + cfile = opt_cfiles[opt_level] + ok = py_compile.compile(fullname, cfile, dfile, True, + optimize=opt_level, + invalidation_mode=invalidation_mode) + if index > 0 and hardlink_dupes: + previous_cfile = opt_cfiles[optimize[index - 1]] + if filecmp.cmp(cfile, previous_cfile, shallow=False): + os.unlink(cfile) + os.link(previous_cfile, cfile) + except py_compile.PyCompileError as err: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + # escape non-printable characters in msg + encoding = sys.stdout.encoding or sys.getdefaultencoding() + msg = err.msg.encode(encoding, errors='backslashreplace').decode(encoding) + print(msg) + except (SyntaxError, UnicodeError, OSError) as e: + success = False + if quiet >= 2: + return success + elif quiet: + print('*** Error compiling {!r}...'.format(fullname)) + else: + print('*** ', end='') + print(e.__class__.__name__ + ':', e) + else: + if ok == 0: + success = False + return success + +def compile_path(skip_curdir=1, maxlevels=0, force=False, quiet=0, + legacy=False, optimize=-1, + invalidation_mode=None): + """Byte-compile all module on sys.path. + + Arguments (all optional): + + skip_curdir: if true, skip current directory (default True) + maxlevels: max recursion level (default 0) + force: as for compile_dir() (default False) + quiet: as for compile_dir() (default 0) + legacy: as for compile_dir() (default False) + optimize: as for compile_dir() (default -1) + invalidation_mode: as for compiler_dir() + """ + success = True + for dir in sys.path: + if (not dir or dir == os.curdir) and skip_curdir: + if quiet < 2: + print('Skipping current directory') + else: + success = success and compile_dir( + dir, + maxlevels, + None, + force, + quiet=quiet, + legacy=legacy, + optimize=optimize, + invalidation_mode=invalidation_mode, + ) + return success + + +def main(): + """Script main program.""" + import argparse + + parser = argparse.ArgumentParser( + description='Utilities to support installing Python libraries.') + parser.add_argument('-l', action='store_const', const=0, + default=None, dest='maxlevels', + help="don't recurse into subdirectories") + parser.add_argument('-r', type=int, dest='recursion', + help=('control the maximum recursion level. ' + 'if `-l` and `-r` options are specified, ' + 'then `-r` takes precedence.')) + parser.add_argument('-f', action='store_true', dest='force', + help='force rebuild even if timestamps are up to date') + parser.add_argument('-q', action='count', dest='quiet', default=0, + help='output only error messages; -qq will suppress ' + 'the error messages as well.') + parser.add_argument('-b', action='store_true', dest='legacy', + help='use legacy (pre-PEP3147) compiled file locations') + parser.add_argument('-d', metavar='DESTDIR', dest='ddir', default=None, + help=('directory to prepend to file paths for use in ' + 'compile-time tracebacks and in runtime ' + 'tracebacks in cases where the source file is ' + 'unavailable')) + parser.add_argument('-s', metavar='STRIPDIR', dest='stripdir', + default=None, + help=('part of path to left-strip from path ' + 'to source file - for example buildroot. ' + '`-d` and `-s` options cannot be ' + 'specified together.')) + parser.add_argument('-p', metavar='PREPENDDIR', dest='prependdir', + default=None, + help=('path to add as prefix to path ' + 'to source file - for example / to make ' + 'it absolute when some part is removed ' + 'by `-s` option. ' + '`-d` and `-p` options cannot be ' + 'specified together.')) + parser.add_argument('-x', metavar='REGEXP', dest='rx', default=None, + help=('skip files matching the regular expression; ' + 'the regexp is searched for in the full path ' + 'of each file considered for compilation')) + parser.add_argument('-i', metavar='FILE', dest='flist', + help=('add all the files and directories listed in ' + 'FILE to the list considered for compilation; ' + 'if "-", names are read from stdin')) + parser.add_argument('compile_dest', metavar='FILE|DIR', nargs='*', + help=('zero or more file and directory names ' + 'to compile; if no arguments given, defaults ' + 'to the equivalent of -l sys.path')) + parser.add_argument('-j', '--workers', default=1, + type=int, help='Run compileall concurrently') + invalidation_modes = [mode.name.lower().replace('_', '-') + for mode in py_compile.PycInvalidationMode] + parser.add_argument('--invalidation-mode', + choices=sorted(invalidation_modes), + help=('set .pyc invalidation mode; defaults to ' + '"checked-hash" if the SOURCE_DATE_EPOCH ' + 'environment variable is set, and ' + '"timestamp" otherwise.')) + parser.add_argument('-o', action='append', type=int, dest='opt_levels', + help=('Optimization levels to run compilation with. ' + 'Default is -1 which uses the optimization level ' + 'of the Python interpreter itself (see -O).')) + parser.add_argument('-e', metavar='DIR', dest='limit_sl_dest', + help='Ignore symlinks pointing outsite of the DIR') + parser.add_argument('--hardlink-dupes', action='store_true', + dest='hardlink_dupes', + help='Hardlink duplicated pyc files') + + args = parser.parse_args() + compile_dests = args.compile_dest + + if args.rx: + import re + args.rx = re.compile(args.rx) + + if args.limit_sl_dest == "": + args.limit_sl_dest = None + + if args.recursion is not None: + maxlevels = args.recursion + else: + maxlevels = args.maxlevels + + if args.opt_levels is None: + args.opt_levels = [-1] + + if len(args.opt_levels) == 1 and args.hardlink_dupes: + parser.error(("Hardlinking of duplicated bytecode makes sense " + "only for more than one optimization level.")) + + if args.ddir is not None and ( + args.stripdir is not None or args.prependdir is not None + ): + parser.error("-d cannot be used in combination with -s or -p") + + # if flist is provided then load it + if args.flist: + try: + with (sys.stdin if args.flist=='-' else + open(args.flist, encoding="utf-8")) as f: + for line in f: + compile_dests.append(line.strip()) + except OSError: + if args.quiet < 2: + print("Error reading file list {}".format(args.flist)) + return False + + if args.invalidation_mode: + ivl_mode = args.invalidation_mode.replace('-', '_').upper() + invalidation_mode = py_compile.PycInvalidationMode[ivl_mode] + else: + invalidation_mode = None + + success = True + try: + if compile_dests: + for dest in compile_dests: + if os.path.isfile(dest): + if not compile_file(dest, args.ddir, args.force, args.rx, + args.quiet, args.legacy, + invalidation_mode=invalidation_mode, + stripdir=args.stripdir, + prependdir=args.prependdir, + optimize=args.opt_levels, + limit_sl_dest=args.limit_sl_dest, + hardlink_dupes=args.hardlink_dupes): + success = False + else: + if not compile_dir(dest, maxlevels, args.ddir, + args.force, args.rx, args.quiet, + args.legacy, workers=args.workers, + invalidation_mode=invalidation_mode, + stripdir=args.stripdir, + prependdir=args.prependdir, + optimize=args.opt_levels, + limit_sl_dest=args.limit_sl_dest, + hardlink_dupes=args.hardlink_dupes): + success = False + return success + else: + return compile_path(legacy=args.legacy, force=args.force, + quiet=args.quiet, + invalidation_mode=invalidation_mode) + except KeyboardInterrupt: + if args.quiet < 2: + print("\n[interrupted]") + return False + return True + + +if __name__ == '__main__': + exit_status = int(not main()) + sys.exit(exit_status) diff --git a/python310/concurrent/__init__.py b/python310/concurrent/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..196d3788575993eb403588b01ac6b0a3fbba13da --- /dev/null +++ b/python310/concurrent/__init__.py @@ -0,0 +1 @@ +# This directory is a Python package. diff --git a/python310/concurrent/__pycache__/__init__.cpython-310.pyc b/python310/concurrent/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21ba4a00c1de3addcfa5a7b8f3a7094e61271d55 Binary files /dev/null and b/python310/concurrent/__pycache__/__init__.cpython-310.pyc differ diff --git a/python310/concurrent/futures/__init__.py b/python310/concurrent/futures/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d746aeac50a99763ecce100d17c75fbca123a19d --- /dev/null +++ b/python310/concurrent/futures/__init__.py @@ -0,0 +1,53 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Execute computations asynchronously using threads or processes.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures._base import (FIRST_COMPLETED, + FIRST_EXCEPTION, + ALL_COMPLETED, + CancelledError, + TimeoutError, + InvalidStateError, + BrokenExecutor, + Future, + Executor, + wait, + as_completed) + +__all__ = ( + 'FIRST_COMPLETED', + 'FIRST_EXCEPTION', + 'ALL_COMPLETED', + 'CancelledError', + 'TimeoutError', + 'BrokenExecutor', + 'Future', + 'Executor', + 'wait', + 'as_completed', + 'ProcessPoolExecutor', + 'ThreadPoolExecutor', +) + + +def __dir__(): + return __all__ + ('__author__', '__doc__') + + +def __getattr__(name): + global ProcessPoolExecutor, ThreadPoolExecutor + + if name == 'ProcessPoolExecutor': + from .process import ProcessPoolExecutor as pe + ProcessPoolExecutor = pe + return pe + + if name == 'ThreadPoolExecutor': + from .thread import ThreadPoolExecutor as te + ThreadPoolExecutor = te + return te + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/python310/concurrent/futures/__pycache__/__init__.cpython-310.pyc b/python310/concurrent/futures/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe70f8a313adb60ce6fefb15b26ae2985bf04d1f Binary files /dev/null and b/python310/concurrent/futures/__pycache__/__init__.cpython-310.pyc differ diff --git a/python310/concurrent/futures/__pycache__/_base.cpython-310.pyc b/python310/concurrent/futures/__pycache__/_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d329fc6aa68b5be95bbdd89b44fa67158cfe7830 Binary files /dev/null and b/python310/concurrent/futures/__pycache__/_base.cpython-310.pyc differ diff --git a/python310/concurrent/futures/__pycache__/process.cpython-310.pyc b/python310/concurrent/futures/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d10162d4fd5c599c22e771ff7fa691d6bca9ba82 Binary files /dev/null and b/python310/concurrent/futures/__pycache__/process.cpython-310.pyc differ diff --git a/python310/concurrent/futures/__pycache__/thread.cpython-310.pyc b/python310/concurrent/futures/__pycache__/thread.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..61cc434880e3c6738235761506658c6e6efddfc1 Binary files /dev/null and b/python310/concurrent/futures/__pycache__/thread.cpython-310.pyc differ diff --git a/python310/concurrent/futures/_base.py b/python310/concurrent/futures/_base.py new file mode 100644 index 0000000000000000000000000000000000000000..a329e74d11fdf2b1f93e00701e4cc64ef0b52e9b --- /dev/null +++ b/python310/concurrent/futures/_base.py @@ -0,0 +1,656 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import collections +import logging +import threading +import time +import types + +FIRST_COMPLETED = 'FIRST_COMPLETED' +FIRST_EXCEPTION = 'FIRST_EXCEPTION' +ALL_COMPLETED = 'ALL_COMPLETED' +_AS_COMPLETED = '_AS_COMPLETED' + +# Possible future states (for internal use by the futures package). +PENDING = 'PENDING' +RUNNING = 'RUNNING' +# The future was cancelled by the user... +CANCELLED = 'CANCELLED' +# ...and _Waiter.add_cancelled() was called by a worker. +CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' +FINISHED = 'FINISHED' + +_FUTURE_STATES = [ + PENDING, + RUNNING, + CANCELLED, + CANCELLED_AND_NOTIFIED, + FINISHED +] + +_STATE_TO_DESCRIPTION_MAP = { + PENDING: "pending", + RUNNING: "running", + CANCELLED: "cancelled", + CANCELLED_AND_NOTIFIED: "cancelled", + FINISHED: "finished" +} + +# Logger for internal use by the futures package. +LOGGER = logging.getLogger("concurrent.futures") + +class Error(Exception): + """Base class for all future-related exceptions.""" + pass + +class CancelledError(Error): + """The Future was cancelled.""" + pass + +class TimeoutError(Error): + """The operation exceeded the given deadline.""" + pass + +class InvalidStateError(Error): + """The operation is not allowed in this state.""" + pass + +class _Waiter(object): + """Provides the event that wait() and as_completed() block on.""" + def __init__(self): + self.event = threading.Event() + self.finished_futures = [] + + def add_result(self, future): + self.finished_futures.append(future) + + def add_exception(self, future): + self.finished_futures.append(future) + + def add_cancelled(self, future): + self.finished_futures.append(future) + +class _AsCompletedWaiter(_Waiter): + """Used by as_completed().""" + + def __init__(self): + super(_AsCompletedWaiter, self).__init__() + self.lock = threading.Lock() + + def add_result(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_result(future) + self.event.set() + + def add_exception(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_exception(future) + self.event.set() + + def add_cancelled(self, future): + with self.lock: + super(_AsCompletedWaiter, self).add_cancelled(future) + self.event.set() + +class _FirstCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_COMPLETED).""" + + def add_result(self, future): + super().add_result(future) + self.event.set() + + def add_exception(self, future): + super().add_exception(future) + self.event.set() + + def add_cancelled(self, future): + super().add_cancelled(future) + self.event.set() + +class _AllCompletedWaiter(_Waiter): + """Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED).""" + + def __init__(self, num_pending_calls, stop_on_exception): + self.num_pending_calls = num_pending_calls + self.stop_on_exception = stop_on_exception + self.lock = threading.Lock() + super().__init__() + + def _decrement_pending_calls(self): + with self.lock: + self.num_pending_calls -= 1 + if not self.num_pending_calls: + self.event.set() + + def add_result(self, future): + super().add_result(future) + self._decrement_pending_calls() + + def add_exception(self, future): + super().add_exception(future) + if self.stop_on_exception: + self.event.set() + else: + self._decrement_pending_calls() + + def add_cancelled(self, future): + super().add_cancelled(future) + self._decrement_pending_calls() + +class _AcquireFutures(object): + """A context manager that does an ordered acquire of Future conditions.""" + + def __init__(self, futures): + self.futures = sorted(futures, key=id) + + def __enter__(self): + for future in self.futures: + future._condition.acquire() + + def __exit__(self, *args): + for future in self.futures: + future._condition.release() + +def _create_and_install_waiters(fs, return_when): + if return_when == _AS_COMPLETED: + waiter = _AsCompletedWaiter() + elif return_when == FIRST_COMPLETED: + waiter = _FirstCompletedWaiter() + else: + pending_count = sum( + f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs) + + if return_when == FIRST_EXCEPTION: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True) + elif return_when == ALL_COMPLETED: + waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False) + else: + raise ValueError("Invalid return condition: %r" % return_when) + + for f in fs: + f._waiters.append(waiter) + + return waiter + + +def _yield_finished_futures(fs, waiter, ref_collect): + """ + Iterate on the list *fs*, yielding finished futures one by one in + reverse order. + Before yielding a future, *waiter* is removed from its waiters + and the future is removed from each set in the collection of sets + *ref_collect*. + + The aim of this function is to avoid keeping stale references after + the future is yielded and before the iterator resumes. + """ + while fs: + f = fs[-1] + for futures_set in ref_collect: + futures_set.remove(f) + with f._condition: + f._waiters.remove(waiter) + del f + # Careful not to keep a reference to the popped value + yield fs.pop() + + +def as_completed(fs, timeout=None): + """An iterator over the given futures that yields each as it completes. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + iterate over. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + + Returns: + An iterator that yields the given Futures as they complete (finished or + cancelled). If any given Futures are duplicated, they will be returned + once. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + """ + if timeout is not None: + end_time = timeout + time.monotonic() + + fs = set(fs) + total_futures = len(fs) + with _AcquireFutures(fs): + finished = set( + f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) + pending = fs - finished + waiter = _create_and_install_waiters(fs, _AS_COMPLETED) + finished = list(finished) + try: + yield from _yield_finished_futures(finished, waiter, + ref_collect=(fs,)) + + while pending: + if timeout is None: + wait_timeout = None + else: + wait_timeout = end_time - time.monotonic() + if wait_timeout < 0: + raise TimeoutError( + '%d (of %d) futures unfinished' % ( + len(pending), total_futures)) + + waiter.event.wait(wait_timeout) + + with waiter.lock: + finished = waiter.finished_futures + waiter.finished_futures = [] + waiter.event.clear() + + # reverse to keep finishing order + finished.reverse() + yield from _yield_finished_futures(finished, waiter, + ref_collect=(fs, pending)) + + finally: + # Remove waiter from unfinished futures + for f in fs: + with f._condition: + f._waiters.remove(waiter) + +DoneAndNotDoneFutures = collections.namedtuple( + 'DoneAndNotDoneFutures', 'done not_done') +def wait(fs, timeout=None, return_when=ALL_COMPLETED): + """Wait for the futures in the given sequence to complete. + + Args: + fs: The sequence of Futures (possibly created by different Executors) to + wait upon. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + return_when: Indicates when this function should return. The options + are: + + FIRST_COMPLETED - Return when any future finishes or is + cancelled. + FIRST_EXCEPTION - Return when any future finishes by raising an + exception. If no future raises an exception + then it is equivalent to ALL_COMPLETED. + ALL_COMPLETED - Return when all futures finish or are cancelled. + + Returns: + A named 2-tuple of sets. The first set, named 'done', contains the + futures that completed (is finished or cancelled) before the wait + completed. The second set, named 'not_done', contains uncompleted + futures. Duplicate futures given to *fs* are removed and will be + returned only once. + """ + fs = set(fs) + with _AcquireFutures(fs): + done = {f for f in fs + if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]} + not_done = fs - done + if (return_when == FIRST_COMPLETED) and done: + return DoneAndNotDoneFutures(done, not_done) + elif (return_when == FIRST_EXCEPTION) and done: + if any(f for f in done + if not f.cancelled() and f.exception() is not None): + return DoneAndNotDoneFutures(done, not_done) + + if len(done) == len(fs): + return DoneAndNotDoneFutures(done, not_done) + + waiter = _create_and_install_waiters(fs, return_when) + + waiter.event.wait(timeout) + for f in fs: + with f._condition: + f._waiters.remove(waiter) + + done.update(waiter.finished_futures) + return DoneAndNotDoneFutures(done, fs - done) + + +def _result_or_cancel(fut, timeout=None): + try: + try: + return fut.result(timeout) + finally: + fut.cancel() + finally: + # Break a reference cycle with the exception in self._exception + del fut + + +class Future(object): + """Represents the result of an asynchronous computation.""" + + def __init__(self): + """Initializes the future. Should not be called by clients.""" + self._condition = threading.Condition() + self._state = PENDING + self._result = None + self._exception = None + self._waiters = [] + self._done_callbacks = [] + + def _invoke_callbacks(self): + for callback in self._done_callbacks: + try: + callback(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def __repr__(self): + with self._condition: + if self._state == FINISHED: + if self._exception: + return '<%s at %#x state=%s raised %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._exception.__class__.__name__) + else: + return '<%s at %#x state=%s returned %s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state], + self._result.__class__.__name__) + return '<%s at %#x state=%s>' % ( + self.__class__.__name__, + id(self), + _STATE_TO_DESCRIPTION_MAP[self._state]) + + def cancel(self): + """Cancel the future if possible. + + Returns True if the future was cancelled, False otherwise. A future + cannot be cancelled if it is running or has already completed. + """ + with self._condition: + if self._state in [RUNNING, FINISHED]: + return False + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + return True + + self._state = CANCELLED + self._condition.notify_all() + + self._invoke_callbacks() + return True + + def cancelled(self): + """Return True if the future was cancelled.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED] + + def running(self): + """Return True if the future is currently executing.""" + with self._condition: + return self._state == RUNNING + + def done(self): + """Return True if the future was cancelled or finished executing.""" + with self._condition: + return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED] + + def __get_result(self): + if self._exception: + try: + raise self._exception + finally: + # Break a reference cycle with the exception in self._exception + self = None + else: + return self._result + + def add_done_callback(self, fn): + """Attaches a callable that will be called when the future finishes. + + Args: + fn: A callable that will be called with this future as its only + argument when the future completes or is cancelled. The callable + will always be called by a thread in the same process in which + it was added. If the future has already completed or been + cancelled then the callable will be called immediately. These + callables are called in the order that they were added. + """ + with self._condition: + if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]: + self._done_callbacks.append(fn) + return + try: + fn(self) + except Exception: + LOGGER.exception('exception calling callback for %r', self) + + def result(self, timeout=None): + """Return the result of the call that the future represents. + + Args: + timeout: The number of seconds to wait for the result if the future + isn't done. If None, then there is no limit on the wait time. + + Returns: + The result of the call that the future represents. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + Exception: If the call raised then that exception will be raised. + """ + try: + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self.__get_result() + else: + raise TimeoutError() + finally: + # Break a reference cycle with the exception in self._exception + self = None + + def exception(self, timeout=None): + """Return the exception raised by the call that the future represents. + + Args: + timeout: The number of seconds to wait for the exception if the + future isn't done. If None, then there is no limit on the wait + time. + + Returns: + The exception raised by the call that the future represents or None + if the call completed without raising. + + Raises: + CancelledError: If the future was cancelled. + TimeoutError: If the future didn't finish executing before the given + timeout. + """ + + with self._condition: + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + + self._condition.wait(timeout) + + if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]: + raise CancelledError() + elif self._state == FINISHED: + return self._exception + else: + raise TimeoutError() + + # The following methods should only be used by Executors and in tests. + def set_running_or_notify_cancel(self): + """Mark the future as running or process any cancel notifications. + + Should only be used by Executor implementations and unit tests. + + If the future has been cancelled (cancel() was called and returned + True) then any threads waiting on the future completing (though calls + to as_completed() or wait()) are notified and False is returned. + + If the future was not cancelled then it is put in the running state + (future calls to running() will return True) and True is returned. + + This method should be called by Executor implementations before + executing the work associated with this future. If this method returns + False then the work should not be executed. + + Returns: + False if the Future was cancelled, True otherwise. + + Raises: + RuntimeError: if this method was already called or if set_result() + or set_exception() was called. + """ + with self._condition: + if self._state == CANCELLED: + self._state = CANCELLED_AND_NOTIFIED + for waiter in self._waiters: + waiter.add_cancelled(self) + # self._condition.notify_all() is not necessary because + # self.cancel() triggers a notification. + return False + elif self._state == PENDING: + self._state = RUNNING + return True + else: + LOGGER.critical('Future %s in unexpected state: %s', + id(self), + self._state) + raise RuntimeError('Future in unexpected state') + + def set_result(self, result): + """Sets the return value of work associated with the future. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + self._result = result + self._state = FINISHED + for waiter in self._waiters: + waiter.add_result(self) + self._condition.notify_all() + self._invoke_callbacks() + + def set_exception(self, exception): + """Sets the result of the future as being the given exception. + + Should only be used by Executor implementations and unit tests. + """ + with self._condition: + if self._state in {CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED}: + raise InvalidStateError('{}: {!r}'.format(self._state, self)) + self._exception = exception + self._state = FINISHED + for waiter in self._waiters: + waiter.add_exception(self) + self._condition.notify_all() + self._invoke_callbacks() + + __class_getitem__ = classmethod(types.GenericAlias) + +class Executor(object): + """This is an abstract base class for concrete asynchronous executors.""" + + def submit(self, fn, /, *args, **kwargs): + """Submits a callable to be executed with the given arguments. + + Schedules the callable to be executed as fn(*args, **kwargs) and returns + a Future instance representing the execution of the callable. + + Returns: + A Future representing the given call. + """ + raise NotImplementedError() + + def map(self, fn, *iterables, timeout=None, chunksize=1): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: The size of the chunks the iterable will be broken into + before being passed to a child process. This argument is only + used by ProcessPoolExecutor; it is ignored by + ThreadPoolExecutor. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if timeout is not None: + end_time = timeout + time.monotonic() + + fs = [self.submit(fn, *args) for args in zip(*iterables)] + + # Yield must be hidden in closure so that the futures are submitted + # before the first iterator value is required. + def result_iterator(): + try: + # reverse to keep finishing order + fs.reverse() + while fs: + # Careful not to keep a reference to the popped future + if timeout is None: + yield _result_or_cancel(fs.pop()) + else: + yield _result_or_cancel(fs.pop(), end_time - time.monotonic()) + finally: + for future in fs: + future.cancel() + return result_iterator() + + def shutdown(self, wait=True, *, cancel_futures=False): + """Clean-up the resources associated with the Executor. + + It is safe to call this method several times. Otherwise, no other + methods can be called after this one. + + Args: + wait: If True then shutdown will not return until all running + futures have finished executing and the resources used by the + executor have been reclaimed. + cancel_futures: If True then shutdown will cancel all pending + futures. Futures that are completed or running will not be + cancelled. + """ + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.shutdown(wait=True) + return False + + +class BrokenExecutor(RuntimeError): + """ + Raised when a executor has become non-functional after a severe failure. + """ diff --git a/python310/concurrent/futures/process.py b/python310/concurrent/futures/process.py new file mode 100644 index 0000000000000000000000000000000000000000..81cc1fb63026cbcdbf3c8a73dda8c42f47367a3c --- /dev/null +++ b/python310/concurrent/futures/process.py @@ -0,0 +1,791 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ProcessPoolExecutor. + +The following diagram and text describe the data-flow through the system: + +|======================= In-process =====================|== Out-of-process ==| + ++----------+ +----------+ +--------+ +-----------+ +---------+ +| | => | Work Ids | | | | Call Q | | Process | +| | +----------+ | | +-----------+ | Pool | +| | | ... | | | | ... | +---------+ +| | | 6 | => | | => | 5, call() | => | | +| | | 7 | | | | ... | | | +| Process | | ... | | Local | +-----------+ | Process | +| Pool | +----------+ | Worker | | #1..n | +| Executor | | Thread | | | +| | +----------- + | | +-----------+ | | +| | <=> | Work Items | <=> | | <= | Result Q | <= | | +| | +------------+ | | +-----------+ | | +| | | 6: call() | | | | ... | | | +| | | future | | | | 4, result | | | +| | | ... | | | | 3, except | | | ++----------+ +------------+ +--------+ +-----------+ +---------+ + +Executor.submit() called: +- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict +- adds the id of the _WorkItem to the "Work Ids" queue + +Local worker thread: +- reads work ids from the "Work Ids" queue and looks up the corresponding + WorkItem from the "Work Items" dict: if the work item has been cancelled then + it is simply removed from the dict, otherwise it is repackaged as a + _CallItem and put in the "Call Q". New _CallItems are put in the "Call Q" + until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because + calls placed in the "Call Q" can no longer be cancelled with Future.cancel(). +- reads _ResultItems from "Result Q", updates the future stored in the + "Work Items" dict and deletes the dict entry + +Process #1..n: +- reads _CallItems from "Call Q", executes the calls, and puts the resulting + _ResultItems in "Result Q" +""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +import os +from concurrent.futures import _base +import queue +import multiprocessing as mp +import multiprocessing.connection +from multiprocessing.queues import Queue +import threading +import weakref +from functools import partial +import itertools +import sys +import traceback + + +_threads_wakeups = weakref.WeakKeyDictionary() +_global_shutdown = False + + +class _ThreadWakeup: + def __init__(self): + self._closed = False + self._reader, self._writer = mp.Pipe(duplex=False) + + def close(self): + if not self._closed: + self._closed = True + self._writer.close() + self._reader.close() + + def wakeup(self): + if not self._closed: + self._writer.send_bytes(b"") + + def clear(self): + if not self._closed: + while self._reader.poll(): + self._reader.recv_bytes() + + +def _python_exit(): + global _global_shutdown + _global_shutdown = True + items = list(_threads_wakeups.items()) + for _, thread_wakeup in items: + # call not protected by ProcessPoolExecutor._shutdown_lock + thread_wakeup.wakeup() + for t, _ in items: + t.join() + +# Register for `_python_exit()` to be called just before joining all +# non-daemon threads. This is used instead of `atexit.register()` for +# compatibility with subinterpreters, which no longer support daemon threads. +# See bpo-39812 for context. +threading._register_atexit(_python_exit) + +# Controls how many more calls than processes will be queued in the call queue. +# A smaller number will mean that processes spend more time idle waiting for +# work while a larger number will make Future.cancel() succeed less frequently +# (Futures in the call queue cannot be cancelled). +EXTRA_QUEUED_CALLS = 1 + + +# On Windows, WaitForMultipleObjects is used to wait for processes to finish. +# It can wait on, at most, 63 objects. There is an overhead of two objects: +# - the result queue reader +# - the thread wakeup reader +_MAX_WINDOWS_WORKERS = 63 - 2 + +# Hack to embed stringification of remote traceback in local traceback + +class _RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class _ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = traceback.format_exception(type(exc), exc, tb) + tb = ''.join(tb) + self.exc = exc + # Traceback object needs to be garbage-collected as its frames + # contain references to all the objects in the exception scope + self.exc.__traceback__ = None + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return _rebuild_exc, (self.exc, self.tb) + +def _rebuild_exc(exc, tb): + exc.__cause__ = _RemoteTraceback(tb) + return exc + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + +class _ResultItem(object): + def __init__(self, work_id, exception=None, result=None): + self.work_id = work_id + self.exception = exception + self.result = result + +class _CallItem(object): + def __init__(self, work_id, fn, args, kwargs): + self.work_id = work_id + self.fn = fn + self.args = args + self.kwargs = kwargs + + +class _SafeQueue(Queue): + """Safe Queue set exception to the future object linked to a job""" + def __init__(self, max_size=0, *, ctx, pending_work_items, shutdown_lock, + thread_wakeup): + self.pending_work_items = pending_work_items + self.shutdown_lock = shutdown_lock + self.thread_wakeup = thread_wakeup + super().__init__(max_size, ctx=ctx) + + def _on_queue_feeder_error(self, e, obj): + if isinstance(obj, _CallItem): + tb = traceback.format_exception(type(e), e, e.__traceback__) + e.__cause__ = _RemoteTraceback('\n"""\n{}"""'.format(''.join(tb))) + work_item = self.pending_work_items.pop(obj.work_id, None) + with self.shutdown_lock: + self.thread_wakeup.wakeup() + # work_item can be None if another process terminated. In this + # case, the executor_manager_thread fails all work_items + # with BrokenProcessPool + if work_item is not None: + work_item.future.set_exception(e) + else: + super()._on_queue_feeder_error(e, obj) + + +def _get_chunks(*iterables, chunksize): + """ Iterates over zip()ed iterables in chunks. """ + it = zip(*iterables) + while True: + chunk = tuple(itertools.islice(it, chunksize)) + if not chunk: + return + yield chunk + + +def _process_chunk(fn, chunk): + """ Processes a chunk of an iterable passed to map. + + Runs the function passed to map() on a chunk of the + iterable passed to map. + + This function is run in a separate process. + + """ + return [fn(*args) for args in chunk] + + +def _sendback_result(result_queue, work_id, result=None, exception=None): + """Safely send back the given result or exception""" + try: + result_queue.put(_ResultItem(work_id, result=result, + exception=exception)) + except BaseException as e: + exc = _ExceptionWithTraceback(e, e.__traceback__) + result_queue.put(_ResultItem(work_id, exception=exc)) + + +def _process_worker(call_queue, result_queue, initializer, initargs): + """Evaluates calls from call_queue and places the results in result_queue. + + This worker is run in a separate process. + + Args: + call_queue: A ctx.Queue of _CallItems that will be read and + evaluated by the worker. + result_queue: A ctx.Queue of _ResultItems that will written + to by the worker. + initializer: A callable initializer, or None + initargs: A tuple of args for the initializer + """ + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + _base.LOGGER.critical('Exception in initializer:', exc_info=True) + # The parent will notice that the process stopped and + # mark the pool broken + return + while True: + call_item = call_queue.get(block=True) + if call_item is None: + # Wake up queue management thread + result_queue.put(os.getpid()) + return + try: + r = call_item.fn(*call_item.args, **call_item.kwargs) + except BaseException as e: + exc = _ExceptionWithTraceback(e, e.__traceback__) + _sendback_result(result_queue, call_item.work_id, exception=exc) + else: + _sendback_result(result_queue, call_item.work_id, result=r) + del r + + # Liberate the resource as soon as possible, to avoid holding onto + # open files or shared memory that is not needed anymore + del call_item + + +class _ExecutorManagerThread(threading.Thread): + """Manages the communication between this process and the worker processes. + + The manager is run in a local thread. + + Args: + executor: A reference to the ProcessPoolExecutor that owns + this thread. A weakref will be own by the manager as well as + references to internal objects used to introspect the state of + the executor. + """ + + def __init__(self, executor): + # Store references to necessary internals of the executor. + + # A _ThreadWakeup to allow waking up the queue_manager_thread from the + # main Thread and avoid deadlocks caused by permanently locked queues. + self.thread_wakeup = executor._executor_manager_thread_wakeup + self.shutdown_lock = executor._shutdown_lock + + # A weakref.ref to the ProcessPoolExecutor that owns this thread. Used + # to determine if the ProcessPoolExecutor has been garbage collected + # and that the manager can exit. + # When the executor gets garbage collected, the weakref callback + # will wake up the queue management thread so that it can terminate + # if there is no pending work item. + def weakref_cb(_, + thread_wakeup=self.thread_wakeup, + shutdown_lock=self.shutdown_lock): + mp.util.debug('Executor collected: triggering callback for' + ' QueueManager wakeup') + with shutdown_lock: + thread_wakeup.wakeup() + + self.executor_reference = weakref.ref(executor, weakref_cb) + + # A list of the ctx.Process instances used as workers. + self.processes = executor._processes + + # A ctx.Queue that will be filled with _CallItems derived from + # _WorkItems for processing by the process workers. + self.call_queue = executor._call_queue + + # A ctx.SimpleQueue of _ResultItems generated by the process workers. + self.result_queue = executor._result_queue + + # A queue.Queue of work ids e.g. Queue([5, 6, ...]). + self.work_ids_queue = executor._work_ids + + # A dict mapping work ids to _WorkItems e.g. + # {5: <_WorkItem...>, 6: <_WorkItem...>, ...} + self.pending_work_items = executor._pending_work_items + + super().__init__() + + def run(self): + # Main loop for the executor manager thread. + + while True: + self.add_call_item_to_queue() + + result_item, is_broken, cause = self.wait_result_broken_or_wakeup() + + if is_broken: + self.terminate_broken(cause) + return + if result_item is not None: + self.process_result_item(result_item) + # Delete reference to result_item to avoid keeping references + # while waiting on new results. + del result_item + + # attempt to increment idle process count + executor = self.executor_reference() + if executor is not None: + executor._idle_worker_semaphore.release() + del executor + + if self.is_shutting_down(): + self.flag_executor_shutting_down() + + # When only canceled futures remain in pending_work_items, our + # next call to wait_result_broken_or_wakeup would hang forever. + # This makes sure we have some running futures or none at all. + self.add_call_item_to_queue() + + # Since no new work items can be added, it is safe to shutdown + # this thread if there are no pending work items. + if not self.pending_work_items: + self.join_executor_internals() + return + + def add_call_item_to_queue(self): + # Fills call_queue with _WorkItems from pending_work_items. + # This function never blocks. + while True: + if self.call_queue.full(): + return + try: + work_id = self.work_ids_queue.get(block=False) + except queue.Empty: + return + else: + work_item = self.pending_work_items[work_id] + + if work_item.future.set_running_or_notify_cancel(): + self.call_queue.put(_CallItem(work_id, + work_item.fn, + work_item.args, + work_item.kwargs), + block=True) + else: + del self.pending_work_items[work_id] + continue + + def wait_result_broken_or_wakeup(self): + # Wait for a result to be ready in the result_queue while checking + # that all worker processes are still running, or for a wake up + # signal send. The wake up signals come either from new tasks being + # submitted, from the executor being shutdown/gc-ed, or from the + # shutdown of the python interpreter. + result_reader = self.result_queue._reader + assert not self.thread_wakeup._closed + wakeup_reader = self.thread_wakeup._reader + readers = [result_reader, wakeup_reader] + worker_sentinels = [p.sentinel for p in list(self.processes.values())] + ready = mp.connection.wait(readers + worker_sentinels) + + cause = None + is_broken = True + result_item = None + if result_reader in ready: + try: + result_item = result_reader.recv() + is_broken = False + except BaseException as e: + cause = traceback.format_exception(type(e), e, e.__traceback__) + + elif wakeup_reader in ready: + is_broken = False + + with self.shutdown_lock: + self.thread_wakeup.clear() + + return result_item, is_broken, cause + + def process_result_item(self, result_item): + # Process the received a result_item. This can be either the PID of a + # worker that exited gracefully or a _ResultItem + + if isinstance(result_item, int): + # Clean shutdown of a worker using its PID + # (avoids marking the executor broken) + assert self.is_shutting_down() + p = self.processes.pop(result_item) + p.join() + if not self.processes: + self.join_executor_internals() + return + else: + # Received a _ResultItem so mark the future as completed. + work_item = self.pending_work_items.pop(result_item.work_id, None) + # work_item can be None if another process terminated (see above) + if work_item is not None: + if result_item.exception: + work_item.future.set_exception(result_item.exception) + else: + work_item.future.set_result(result_item.result) + + def is_shutting_down(self): + # Check whether we should start shutting down the executor. + executor = self.executor_reference() + # No more work items can be added if: + # - The interpreter is shutting down OR + # - The executor that owns this worker has been collected OR + # - The executor that owns this worker has been shutdown. + return (_global_shutdown or executor is None + or executor._shutdown_thread) + + def terminate_broken(self, cause): + # Terminate the executor because it is in a broken state. The cause + # argument can be used to display more information on the error that + # lead the executor into becoming broken. + + # Mark the process pool broken so that submits fail right now. + executor = self.executor_reference() + if executor is not None: + executor._broken = ('A child process terminated ' + 'abruptly, the process pool is not ' + 'usable anymore') + executor._shutdown_thread = True + executor = None + + # All pending tasks are to be marked failed with the following + # BrokenProcessPool error + bpe = BrokenProcessPool("A process in the process pool was " + "terminated abruptly while the future was " + "running or pending.") + if cause is not None: + bpe.__cause__ = _RemoteTraceback( + f"\n'''\n{''.join(cause)}'''") + + # Mark pending tasks as failed. + for work_id, work_item in self.pending_work_items.items(): + work_item.future.set_exception(bpe) + # Delete references to object. See issue16284 + del work_item + self.pending_work_items.clear() + + # Terminate remaining workers forcibly: the queues or their + # locks may be in a dirty state and block forever. + for p in self.processes.values(): + p.terminate() + + # clean up resources + self.join_executor_internals() + + def flag_executor_shutting_down(self): + # Flag the executor as shutting down and cancel remaining tasks if + # requested as early as possible if it is not gc-ed yet. + executor = self.executor_reference() + if executor is not None: + executor._shutdown_thread = True + # Cancel pending work items if requested. + if executor._cancel_pending_futures: + # Cancel all pending futures and update pending_work_items + # to only have futures that are currently running. + new_pending_work_items = {} + for work_id, work_item in self.pending_work_items.items(): + if not work_item.future.cancel(): + new_pending_work_items[work_id] = work_item + self.pending_work_items = new_pending_work_items + # Drain work_ids_queue since we no longer need to + # add items to the call queue. + while True: + try: + self.work_ids_queue.get_nowait() + except queue.Empty: + break + # Make sure we do this only once to not waste time looping + # on running processes over and over. + executor._cancel_pending_futures = False + + def shutdown_workers(self): + n_children_to_stop = self.get_n_children_alive() + n_sentinels_sent = 0 + # Send the right number of sentinels, to make sure all children are + # properly terminated. + while (n_sentinels_sent < n_children_to_stop + and self.get_n_children_alive() > 0): + for i in range(n_children_to_stop - n_sentinels_sent): + try: + self.call_queue.put_nowait(None) + n_sentinels_sent += 1 + except queue.Full: + break + + def join_executor_internals(self): + self.shutdown_workers() + # Release the queue's resources as soon as possible. + self.call_queue.close() + self.call_queue.join_thread() + with self.shutdown_lock: + self.thread_wakeup.close() + # If .join() is not called on the created processes then + # some ctx.Queue methods may deadlock on Mac OS X. + for p in self.processes.values(): + p.join() + + def get_n_children_alive(self): + # This is an upper bound on the number of children alive. + return sum(p.is_alive() for p in self.processes.values()) + + +_system_limits_checked = False +_system_limited = None + + +def _check_system_limits(): + global _system_limits_checked, _system_limited + if _system_limits_checked: + if _system_limited: + raise NotImplementedError(_system_limited) + _system_limits_checked = True + try: + import multiprocessing.synchronize + except ImportError: + _system_limited = ( + "This Python build lacks multiprocessing.synchronize, usually due " + "to named semaphores being unavailable on this platform." + ) + raise NotImplementedError(_system_limited) + try: + nsems_max = os.sysconf("SC_SEM_NSEMS_MAX") + except (AttributeError, ValueError): + # sysconf not available or setting not available + return + if nsems_max == -1: + # indetermined limit, assume that limit is determined + # by available memory only + return + if nsems_max >= 256: + # minimum number of semaphores available + # according to POSIX + return + _system_limited = ("system provides too few semaphores (%d" + " available, 256 necessary)" % nsems_max) + raise NotImplementedError(_system_limited) + + +def _chain_from_iterable_of_lists(iterable): + """ + Specialized implementation of itertools.chain.from_iterable. + Each item in *iterable* should be a list. This function is + careful not to keep references to yielded objects. + """ + for element in iterable: + element.reverse() + while element: + yield element.pop() + + +class BrokenProcessPool(_base.BrokenExecutor): + """ + Raised when a process in a ProcessPoolExecutor terminated abruptly + while a future was in the running state. + """ + + +class ProcessPoolExecutor(_base.Executor): + def __init__(self, max_workers=None, mp_context=None, + initializer=None, initargs=()): + """Initializes a new ProcessPoolExecutor instance. + + Args: + max_workers: The maximum number of processes that can be used to + execute the given calls. If None or not given then as many + worker processes will be created as the machine has processors. + mp_context: A multiprocessing context to launch the workers. This + object should provide SimpleQueue, Queue and Process. + initializer: A callable used to initialize worker processes. + initargs: A tuple of arguments to pass to the initializer. + """ + _check_system_limits() + + if max_workers is None: + self._max_workers = os.cpu_count() or 1 + if sys.platform == 'win32': + self._max_workers = min(_MAX_WINDOWS_WORKERS, + self._max_workers) + else: + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + elif (sys.platform == 'win32' and + max_workers > _MAX_WINDOWS_WORKERS): + raise ValueError( + f"max_workers must be <= {_MAX_WINDOWS_WORKERS}") + + self._max_workers = max_workers + + if mp_context is None: + mp_context = mp.get_context() + self._mp_context = mp_context + + # https://github.com/python/cpython/issues/90622 + self._safe_to_dynamically_spawn_children = ( + self._mp_context.get_start_method(allow_none=False) != "fork") + + if initializer is not None and not callable(initializer): + raise TypeError("initializer must be a callable") + self._initializer = initializer + self._initargs = initargs + + # Management thread + self._executor_manager_thread = None + + # Map of pids to processes + self._processes = {} + + # Shutdown is a two-step process. + self._shutdown_thread = False + self._shutdown_lock = threading.Lock() + self._idle_worker_semaphore = threading.Semaphore(0) + self._broken = False + self._queue_count = 0 + self._pending_work_items = {} + self._cancel_pending_futures = False + + # _ThreadWakeup is a communication channel used to interrupt the wait + # of the main loop of executor_manager_thread from another thread (e.g. + # when calling executor.submit or executor.shutdown). We do not use the + # _result_queue to send wakeup signals to the executor_manager_thread + # as it could result in a deadlock if a worker process dies with the + # _result_queue write lock still acquired. + # + # _shutdown_lock must be locked to access _ThreadWakeup. + self._executor_manager_thread_wakeup = _ThreadWakeup() + + # Create communication channels for the executor + # Make the call queue slightly larger than the number of processes to + # prevent the worker processes from idling. But don't make it too big + # because futures in the call queue cannot be cancelled. + queue_size = self._max_workers + EXTRA_QUEUED_CALLS + self._call_queue = _SafeQueue( + max_size=queue_size, ctx=self._mp_context, + pending_work_items=self._pending_work_items, + shutdown_lock=self._shutdown_lock, + thread_wakeup=self._executor_manager_thread_wakeup) + # Killed worker processes can produce spurious "broken pipe" + # tracebacks in the queue's own worker thread. But we detect killed + # processes anyway, so silence the tracebacks. + self._call_queue._ignore_epipe = True + self._result_queue = mp_context.SimpleQueue() + self._work_ids = queue.Queue() + + def _start_executor_manager_thread(self): + if self._executor_manager_thread is None: + # Start the processes so that their sentinels are known. + if not self._safe_to_dynamically_spawn_children: # ie, using fork. + self._launch_processes() + self._executor_manager_thread = _ExecutorManagerThread(self) + self._executor_manager_thread.start() + _threads_wakeups[self._executor_manager_thread] = \ + self._executor_manager_thread_wakeup + + def _adjust_process_count(self): + # if there's an idle process, we don't need to spawn a new one. + if self._idle_worker_semaphore.acquire(blocking=False): + return + + process_count = len(self._processes) + if process_count < self._max_workers: + # Assertion disabled as this codepath is also used to replace a + # worker that unexpectedly dies, even when using the 'fork' start + # method. That means there is still a potential deadlock bug. If a + # 'fork' mp_context worker dies, we'll be forking a new one when + # we know a thread is running (self._executor_manager_thread). + #assert self._safe_to_dynamically_spawn_children or not self._executor_manager_thread, 'https://github.com/python/cpython/issues/90622' + self._spawn_process() + + def _launch_processes(self): + # https://github.com/python/cpython/issues/90622 + assert not self._executor_manager_thread, ( + 'Processes cannot be fork()ed after the thread has started, ' + 'deadlock in the child processes could result.') + for _ in range(len(self._processes), self._max_workers): + self._spawn_process() + + def _spawn_process(self): + p = self._mp_context.Process( + target=_process_worker, + args=(self._call_queue, + self._result_queue, + self._initializer, + self._initargs)) + p.start() + self._processes[p.pid] = p + + def submit(self, fn, /, *args, **kwargs): + with self._shutdown_lock: + if self._broken: + raise BrokenProcessPool(self._broken) + if self._shutdown_thread: + raise RuntimeError('cannot schedule new futures after shutdown') + if _global_shutdown: + raise RuntimeError('cannot schedule new futures after ' + 'interpreter shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._pending_work_items[self._queue_count] = w + self._work_ids.put(self._queue_count) + self._queue_count += 1 + # Wake up queue management thread + self._executor_manager_thread_wakeup.wakeup() + + if self._safe_to_dynamically_spawn_children: + self._adjust_process_count() + self._start_executor_manager_thread() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def map(self, fn, *iterables, timeout=None, chunksize=1): + """Returns an iterator equivalent to map(fn, iter). + + Args: + fn: A callable that will take as many arguments as there are + passed iterables. + timeout: The maximum number of seconds to wait. If None, then there + is no limit on the wait time. + chunksize: If greater than one, the iterables will be chopped into + chunks of size chunksize and submitted to the process pool. + If set to one, the items in the list will be sent one at a time. + + Returns: + An iterator equivalent to: map(func, *iterables) but the calls may + be evaluated out-of-order. + + Raises: + TimeoutError: If the entire result iterator could not be generated + before the given timeout. + Exception: If fn(*args) raises for any values. + """ + if chunksize < 1: + raise ValueError("chunksize must be >= 1.") + + results = super().map(partial(_process_chunk, fn), + _get_chunks(*iterables, chunksize=chunksize), + timeout=timeout) + return _chain_from_iterable_of_lists(results) + + def shutdown(self, wait=True, *, cancel_futures=False): + with self._shutdown_lock: + self._cancel_pending_futures = cancel_futures + self._shutdown_thread = True + if self._executor_manager_thread_wakeup is not None: + # Wake up queue management thread + self._executor_manager_thread_wakeup.wakeup() + + if self._executor_manager_thread is not None and wait: + self._executor_manager_thread.join() + # To reduce the risk of opening too many files, remove references to + # objects that use file descriptors. + self._executor_manager_thread = None + self._call_queue = None + if self._result_queue is not None and wait: + self._result_queue.close() + self._result_queue = None + self._processes = None + self._executor_manager_thread_wakeup = None + + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/python310/concurrent/futures/thread.py b/python310/concurrent/futures/thread.py new file mode 100644 index 0000000000000000000000000000000000000000..51c942f51abd371e80ffa07c2b212336afb8eae2 --- /dev/null +++ b/python310/concurrent/futures/thread.py @@ -0,0 +1,236 @@ +# Copyright 2009 Brian Quinlan. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Implements ThreadPoolExecutor.""" + +__author__ = 'Brian Quinlan (brian@sweetapp.com)' + +from concurrent.futures import _base +import itertools +import queue +import threading +import types +import weakref +import os + + +_threads_queues = weakref.WeakKeyDictionary() +_shutdown = False +# Lock that ensures that new workers are not created while the interpreter is +# shutting down. Must be held while mutating _threads_queues and _shutdown. +_global_shutdown_lock = threading.Lock() + +def _python_exit(): + global _shutdown + with _global_shutdown_lock: + _shutdown = True + items = list(_threads_queues.items()) + for t, q in items: + q.put(None) + for t, q in items: + t.join() + +# Register for `_python_exit()` to be called just before joining all +# non-daemon threads. This is used instead of `atexit.register()` for +# compatibility with subinterpreters, which no longer support daemon threads. +# See bpo-39812 for context. +threading._register_atexit(_python_exit) + +# At fork, reinitialize the `_global_shutdown_lock` lock in the child process +if hasattr(os, 'register_at_fork'): + os.register_at_fork(before=_global_shutdown_lock.acquire, + after_in_child=_global_shutdown_lock._at_fork_reinit, + after_in_parent=_global_shutdown_lock.release) + + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException as exc: + self.future.set_exception(exc) + # Break a reference cycle with the exception 'exc' + self = None + else: + self.future.set_result(result) + + __class_getitem__ = classmethod(types.GenericAlias) + + +def _worker(executor_reference, work_queue, initializer, initargs): + if initializer is not None: + try: + initializer(*initargs) + except BaseException: + _base.LOGGER.critical('Exception in initializer:', exc_info=True) + executor = executor_reference() + if executor is not None: + executor._initializer_failed() + return + try: + while True: + work_item = work_queue.get(block=True) + if work_item is not None: + work_item.run() + # Delete references to object. See issue16284 + del work_item + + # attempt to increment idle count + executor = executor_reference() + if executor is not None: + executor._idle_semaphore.release() + del executor + continue + + executor = executor_reference() + # Exit if: + # - The interpreter is shutting down OR + # - The executor that owns the worker has been collected OR + # - The executor that owns the worker has been shutdown. + if _shutdown or executor is None or executor._shutdown: + # Flag the executor as shutting down as early as possible if it + # is not gc-ed yet. + if executor is not None: + executor._shutdown = True + # Notice other workers + work_queue.put(None) + return + del executor + except BaseException: + _base.LOGGER.critical('Exception in worker', exc_info=True) + + +class BrokenThreadPool(_base.BrokenExecutor): + """ + Raised when a worker thread in a ThreadPoolExecutor failed initializing. + """ + + +class ThreadPoolExecutor(_base.Executor): + + # Used to assign unique thread names when thread_name_prefix is not supplied. + _counter = itertools.count().__next__ + + def __init__(self, max_workers=None, thread_name_prefix='', + initializer=None, initargs=()): + """Initializes a new ThreadPoolExecutor instance. + + Args: + max_workers: The maximum number of threads that can be used to + execute the given calls. + thread_name_prefix: An optional name prefix to give our threads. + initializer: A callable used to initialize worker threads. + initargs: A tuple of arguments to pass to the initializer. + """ + if max_workers is None: + # ThreadPoolExecutor is often used to: + # * CPU bound task which releases GIL + # * I/O bound task (which releases GIL, of course) + # + # We use cpu_count + 4 for both types of tasks. + # But we limit it to 32 to avoid consuming surprisingly large resource + # on many core machine. + max_workers = min(32, (os.cpu_count() or 1) + 4) + if max_workers <= 0: + raise ValueError("max_workers must be greater than 0") + + if initializer is not None and not callable(initializer): + raise TypeError("initializer must be a callable") + + self._max_workers = max_workers + self._work_queue = queue.SimpleQueue() + self._idle_semaphore = threading.Semaphore(0) + self._threads = set() + self._broken = False + self._shutdown = False + self._shutdown_lock = threading.Lock() + self._thread_name_prefix = (thread_name_prefix or + ("ThreadPoolExecutor-%d" % self._counter())) + self._initializer = initializer + self._initargs = initargs + + def submit(self, fn, /, *args, **kwargs): + with self._shutdown_lock, _global_shutdown_lock: + if self._broken: + raise BrokenThreadPool(self._broken) + + if self._shutdown: + raise RuntimeError('cannot schedule new futures after shutdown') + if _shutdown: + raise RuntimeError('cannot schedule new futures after ' + 'interpreter shutdown') + + f = _base.Future() + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + return f + submit.__doc__ = _base.Executor.submit.__doc__ + + def _adjust_thread_count(self): + # if idle threads are available, don't spin new threads + if self._idle_semaphore.acquire(timeout=0): + return + + # When the executor gets lost, the weakref callback will wake up + # the worker threads. + def weakref_cb(_, q=self._work_queue): + q.put(None) + + num_threads = len(self._threads) + if num_threads < self._max_workers: + thread_name = '%s_%d' % (self._thread_name_prefix or self, + num_threads) + t = threading.Thread(name=thread_name, target=_worker, + args=(weakref.ref(self, weakref_cb), + self._work_queue, + self._initializer, + self._initargs)) + t.start() + self._threads.add(t) + _threads_queues[t] = self._work_queue + + def _initializer_failed(self): + with self._shutdown_lock: + self._broken = ('A thread initializer failed, the thread pool ' + 'is not usable anymore') + # Drain work queue and mark pending futures failed + while True: + try: + work_item = self._work_queue.get_nowait() + except queue.Empty: + break + if work_item is not None: + work_item.future.set_exception(BrokenThreadPool(self._broken)) + + def shutdown(self, wait=True, *, cancel_futures=False): + with self._shutdown_lock: + self._shutdown = True + if cancel_futures: + # Drain all work items from the queue, and then cancel their + # associated futures. + while True: + try: + work_item = self._work_queue.get_nowait() + except queue.Empty: + break + if work_item is not None: + work_item.future.cancel() + + # Send a wake-up to prevent threads calling + # _work_queue.get(block=True) from permanently blocking. + self._work_queue.put(None) + if wait: + for t in self._threads: + t.join() + shutdown.__doc__ = _base.Executor.shutdown.__doc__ diff --git a/python310/config-3.10-x86_64-linux-gnu/Makefile b/python310/config-3.10-x86_64-linux-gnu/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..af93a8f3987531449da3cc2840a8ad1681a0e424 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/Makefile @@ -0,0 +1,2175 @@ +# Generated automatically from Makefile.pre by makesetup. +# Top-level Makefile for Python +# +# As distributed, this file is called Makefile.pre.in; it is processed +# into the real Makefile by running the script ./configure, which +# replaces things like @spam@ with values appropriate for your system. +# This means that if you edit Makefile, your changes get lost the next +# time you run the configure script. Ideally, you can do: +# +# ./configure +# make +# make test +# make install +# +# If you have a previous version of Python installed that you don't +# want to overwrite, you can use "make altinstall" instead of "make +# install". Refer to the "Installing" section in the README file for +# additional details. +# +# See also the section "Build instructions" in the README file. + +# === Variables set by makesetup === + +MODBUILT_NAMES= array cmath math _struct _random _elementtree _pickle _datetime _bisect _heapq _statistics unicodedata fcntl spwd grp select _csv _socket _posixsubprocess _md5 _sha1 _sha256 _sha512 _sha3 _blake2 syslog binascii zlib posix errno pwd _sre _codecs _weakref _functools _operator _collections _abc itertools atexit _signal _stat time _thread _locale _io faulthandler _tracemalloc _symtable pyexpat xxsubtype +MODDISABLED_NAMES= +# filter out duplicate object files (_math.o) +MODOBJS= $(sort Modules/arraymodule.o Modules/cmathmodule.o Modules/_math.o Modules/mathmodule.o Modules/_math.o Modules/_struct.o Modules/_randommodule.o Modules/_elementtree.o Modules/_pickle.o Modules/_datetimemodule.o Modules/_bisectmodule.o Modules/_heapqmodule.o Modules/_statisticsmodule.o Modules/unicodedata.o Modules/fcntlmodule.o Modules/spwdmodule.o Modules/grpmodule.o Modules/selectmodule.o Modules/_csv.o Modules/socketmodule.o Modules/_posixsubprocess.o Modules/md5module.o Modules/sha1module.o Modules/sha256module.o Modules/sha512module.o Modules/sha3module.o Modules/blake2module.o Modules/blake2b_impl.o Modules/blake2s_impl.o Modules/syslogmodule.o Modules/binascii.o Modules/zlibmodule.o Modules/posixmodule.o Modules/errnomodule.o Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o Modules/_weakref.o Modules/_functoolsmodule.o Modules/_operator.o Modules/_collectionsmodule.o Modules/_abc.o Modules/itertoolsmodule.o Modules/atexitmodule.o Modules/signalmodule.o Modules/_stat.o Modules/timemodule.o Modules/_threadmodule.o Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o Modules/faulthandler.o Modules/_tracemalloc.o Modules/symtablemodule.o Modules/pyexpat.o Modules/xxsubtype.o) +MODLIBS= $(LOCALMODLIBS) $(BASEMODLIBS) + +# === Variables set by configure +VERSION= 3.10 +srcdir= .. +VPATH= .. +abs_srcdir= /build/python3.10-UfU9Sd/python3.10-3.10.12/build-shared/.. +abs_builddir= /build/python3.10-UfU9Sd/python3.10-3.10.12/build-shared + + +CC= x86_64-linux-gnu-gcc +CXX= x86_64-linux-gnu-g++ +MAINCC= $(CC) +LINKCC= $(PURIFY) $(MAINCC) +AR= x86_64-linux-gnu-gcc-ar +READELF= readelf +SOABI= cpython-310-x86_64-linux-gnu +LDVERSION= $(VERSION)$(ABIFLAGS) +LIBPYTHON= +GITVERSION= +GITTAG= +GITBRANCH= +PGO_PROF_GEN_FLAG=-fprofile-generate +PGO_PROF_USE_FLAG= +LLVM_PROF_MERGER=true +LLVM_PROF_FILE= +LLVM_PROF_ERR=no +DTRACE= /usr/bin/dtrace +DFLAGS= +DTRACE_HEADERS= Include/pydtrace_probes.h +DTRACE_OBJS= Python/pydtrace.o + +GNULD= yes + +# Shell used by make (some versions default to the login shell, which is bad) +SHELL= /bin/sh + +# Use this to make a link between python$(VERSION) and python in $(BINDIR) +LN= ln + +# Portable install script (configure doesn't always guess right) +INSTALL= /usr/bin/install -c +INSTALL_PROGRAM=${INSTALL} +INSTALL_SCRIPT= ${INSTALL} +INSTALL_DATA= ${INSTALL} -m 644 +# Shared libraries must be installed with executable mode on some systems; +# rather than figuring out exactly which, we always give them executable mode. +INSTALL_SHARED= ${INSTALL} -m 755 + +MKDIR_P= /bin/mkdir -p + +MAKESETUP= $(srcdir)/Modules/makesetup + +# Compiler options +OPT= -DNDEBUG -g -fwrapv -O2 -Wall +BASECFLAGS= -Wno-unused-result -Wsign-compare +BASECPPFLAGS= -IObjects -IInclude -IPython +CONFIGURE_CFLAGS= -g -fstack-protector-strong -Wformat -Werror=format-security +# CFLAGS_NODIST is used for building the interpreter and stdlib C extensions. +# Use it when a compiler flag should _not_ be part of the distutils CFLAGS +# once Python is installed (Issue #21121). +CONFIGURE_CFLAGS_NODIST= -std=c99 -Wextra -Wno-unused-result -Wno-unused-parameter -Wno-missing-field-initializers -Wstrict-prototypes -Werror=implicit-function-declaration -fvisibility=hidden +# LDFLAGS_NODIST is used in the same manner as CFLAGS_NODIST. +# Use it when a linker flag should _not_ be part of the distutils LDFLAGS +# once Python is installed (bpo-35257) +CONFIGURE_LDFLAGS_NODIST= +CONFIGURE_CPPFLAGS= -Wdate-time -D_FORTIFY_SOURCE=2 +CONFIGURE_LDFLAGS= -Wl,-Bsymbolic-functions -g -fwrapv -O2 +# Avoid assigning CFLAGS, LDFLAGS, etc. so users can use them on the +# command line to append to these values without stomping the pre-set +# values. +PY_CFLAGS= $(BASECFLAGS) $(OPT) $(CONFIGURE_CFLAGS) $(CFLAGS) $(EXTRA_CFLAGS) +PY_CFLAGS_NODIST=$(CONFIGURE_CFLAGS_NODIST) $(CFLAGS_NODIST) -I$(srcdir)/Include/internal +# Both CPPFLAGS and LDFLAGS need to contain the shell's value for setup.py to +# be able to build extension modules using the directories specified in the +# environment variables +PY_CPPFLAGS= $(BASECPPFLAGS) -I. -I$(srcdir)/Include $(CONFIGURE_CPPFLAGS) $(CPPFLAGS) +PY_LDFLAGS= $(CONFIGURE_LDFLAGS) $(LDFLAGS) +PY_LDFLAGS_NODIST=$(CONFIGURE_LDFLAGS_NODIST) $(LDFLAGS_NODIST) +NO_AS_NEEDED= -Wl,--no-as-needed +CCSHARED= -fPIC +# LINKFORSHARED are the flags passed to the $(CC) command that links +# the python executable -- this is only needed for a few systems +LINKFORSHARED= -Xlinker -export-dynamic -Wl,-O1 -Wl,-Bsymbolic-functions +ARFLAGS= rcs +# Extra C flags added for building the interpreter object files. +CFLAGSFORSHARED=$(CCSHARED) +# C flags used for building the interpreter object files +PY_STDMODULE_CFLAGS= $(PY_CFLAGS) $(PY_CFLAGS_NODIST) $(PY_CPPFLAGS) $(CFLAGSFORSHARED) +PY_BUILTIN_MODULE_CFLAGS= $(PY_STDMODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN +PY_CORE_CFLAGS= $(PY_STDMODULE_CFLAGS) -DPy_BUILD_CORE +# Linker flags used for building the interpreter object files +PY_CORE_LDFLAGS=$(PY_LDFLAGS) $(PY_LDFLAGS_NODIST) +# Strict or non-strict aliasing flags used to compile dtoa.c, see above +CFLAGS_ALIASING= + + +# Machine-dependent subdirectories +MACHDEP= linux + +# Multiarch directory (may be empty) +MULTIARCH= x86_64-linux-gnu +MULTIARCH_CPPFLAGS = -DMULTIARCH=\"x86_64-linux-gnu\" + +# Install prefix for architecture-independent files +prefix= /usr + +# Install prefix for architecture-dependent files +exec_prefix= ${prefix} + +# Install prefix for data files +datarootdir= ${prefix}/share + +# Expanded directories +BINDIR= ${exec_prefix}/bin +LIBDIR= /usr/lib/x86_64-linux-gnu +MANDIR= ${datarootdir}/man +INCLUDEDIR= ${prefix}/include +CONFINCLUDEDIR= $(exec_prefix)/include +PLATLIBDIR= lib +SCRIPTDIR= $(prefix)/$(PLATLIBDIR) +ABIFLAGS= +# Variable used by ensurepip +WHEEL_PKG_DIR= /usr/share/python-wheels/ + +# Detailed destination directories +BINLIBDEST= $(LIBDIR)/python$(VERSION) +LIBDEST= $(SCRIPTDIR)/python$(VERSION) +INCLUDEPY= $(INCLUDEDIR)/python$(LDVERSION) +CONFINCLUDEPY= $(CONFINCLUDEDIR)/python$(LDVERSION) + +# Symbols used for using shared libraries +SHLIB_SUFFIX= .so +EXT_SUFFIX= .cpython-310-x86_64-linux-gnu.so +LDSHARED= $(CC) -shared -Wl,-O1 -Wl,-Bsymbolic-functions $(PY_LDFLAGS) +BLDSHARED= $(CC) -shared -Wl,-O1 -Wl,-Bsymbolic-functions $(PY_CORE_LDFLAGS) +LDCXXSHARED= $(CXX) -shared -Wl,-O1 -Wl,-Bsymbolic-functions +DESTSHARED= $(LIBDEST)/lib-dynload + +# List of exported symbols for AIX +EXPORTSYMS= +EXPORTSFROM= + +# Executable suffix (.exe on Windows and Mac OS X) +EXE= +BUILDEXE= + +# Short name and location for Mac OS X Python framework +UNIVERSALSDK= +PYTHONFRAMEWORK= +PYTHONFRAMEWORKDIR= no-framework +PYTHONFRAMEWORKPREFIX= +PYTHONFRAMEWORKINSTALLDIR= +# Deployment target selected during configure, to be checked +# by distutils. The export statement is needed to ensure that the +# deployment target is active during build. +MACOSX_DEPLOYMENT_TARGET= +#export MACOSX_DEPLOYMENT_TARGET + +# Option to install to strip binaries +STRIPFLAG=-s + +# Flags to lipo to produce a 32-bit-only universal executable +LIPO_32BIT_FLAGS= + +# Flags to lipo to produce an intel-64-only universal executable +LIPO_INTEL64_FLAGS= + +# Options to enable prebinding (for fast startup prior to Mac OS X 10.3) +OTHER_LIBTOOL_OPT= + +# Environment to run shared python without installed libraries +RUNSHARED= + +# ensurepip options +ENSUREPIP= no + +# OpenSSL options for setup.py so sysconfig can pick up AC_SUBST() vars. +OPENSSL_INCLUDES= +OPENSSL_LIBS=-lssl -lcrypto +OPENSSL_LDFLAGS= +OPENSSL_RPATH= + +# Default zoneinfo.TZPATH. Added here to expose it in sysconfig.get_config_var +TZPATH=/usr/share/zoneinfo:/usr/lib/zoneinfo:/usr/share/lib/zoneinfo:/etc/zoneinfo + +# Modes for directories, executables and data files created by the +# install process. Default to user-only-writable for all file types. +DIRMODE= 755 +EXEMODE= 755 +FILEMODE= 644 + +# configure script arguments +CONFIG_ARGS= '--enable-shared' '--prefix=/usr' '--libdir=/usr/lib/x86_64-linux-gnu' '--enable-ipv6' '--enable-loadable-sqlite-extensions' '--with-dbmliborder=bdb:gdbm' '--with-computed-gotos' '--without-ensurepip' '--with-system-expat' '--with-dtrace' '--with-system-libmpdec' '--with-wheel-pkg-dir=/usr/share/python-wheels/' 'MKDIR_P=/bin/mkdir -p' '--with-system-ffi' 'CC=x86_64-linux-gnu-gcc' 'CFLAGS=-g -fstack-protector-strong -Wformat -Werror=format-security ' 'LDFLAGS=-Wl,-Bsymbolic-functions -g -fwrapv -O2 ' 'CPPFLAGS=-Wdate-time -D_FORTIFY_SOURCE=2' + + +# Subdirectories with code +SRCDIRS= Parser Objects Python Modules Modules/_io Programs + +# Other subdirectories +SUBDIRSTOO= Include Lib Misc + +# Files and directories to be distributed +CONFIGFILES= configure configure.ac acconfig.h pyconfig.h.in Makefile.pre.in +DISTFILES= README.rst ChangeLog $(CONFIGFILES) +DISTDIRS= $(SUBDIRS) $(SUBDIRSTOO) Ext-dummy +DIST= $(DISTFILES) $(DISTDIRS) + + +LIBRARY= libpython$(VERSION)$(ABIFLAGS).a +LDLIBRARY= libpython$(LDVERSION).so +BLDLIBRARY= -lpython$(LDVERSION) +PY3LIBRARY= libpython3.so +DLLLIBRARY= +LDLIBRARYDIR= +INSTSONAME= libpython$(LDVERSION).so.1.0 +LIBRARY_DEPS= $(LIBRARY) $(LDLIBRARY) $(PY3LIBRARY) $(EXPORTSYMS) +PY_ENABLE_SHARED= 1 +STATIC_LIBPYTHON= 1 + + +LIBS= -lcrypt -ldl -lm +LIBM= -lm +LIBC= +SYSLIBS= $(LIBM) $(LIBC) +SHLIBS= $(LIBS) + +DLINCLDIR= . +DYNLOADFILE= dynload_shlib.o +MACHDEP_OBJS= +LIBOBJDIR= Python/ +LIBOBJS= + +PYTHON= python$(EXE) +BUILDPYTHON= python$(BUILDEXE) + +PYTHON_FOR_REGEN?=python3.10 +UPDATE_FILE=$(PYTHON_FOR_REGEN) $(srcdir)/Tools/scripts/update_file.py +PYTHON_FOR_BUILD=./$(BUILDPYTHON) -E +_PYTHON_HOST_PLATFORM= +BUILD_GNU_TYPE= x86_64-pc-linux-gnu +HOST_GNU_TYPE= x86_64-pc-linux-gnu + +# Tcl and Tk config info from --with-tcltk-includes and -libs options +TCLTK_INCLUDES= -I/usr/include/tcl8.6 +TCLTK_LIBS= -ltk8.6 -ltkstub8.6 -ltcl8.6 -ltclstub8.6 + +# The task to run while instrumented when building the profile-opt target. +# To speed up profile generation, we don't run the full unit test suite +# by default. The default is "-m test --pgo". To run more tests, use +# PROFILE_TASK="-m test --pgo-extended" +PROFILE_TASK= -m test --pgo --timeout=$(TESTTIMEOUT) + +# report files for gcov / lcov coverage report +COVERAGE_INFO= $(abs_builddir)/coverage.info +COVERAGE_REPORT=$(abs_builddir)/lcov-report +COVERAGE_REPORT_OPTIONS=--no-branch-coverage --title "CPython lcov report" + + +# === Definitions added by makesetup === + +LOCALMODLIBS= -lexpat -L$(exec_prefix)/lib -lz -lexpat +BASEMODLIBS= +PYTHONPATH=$(COREPYTHONPATH) +COREPYTHONPATH=$(DESTPATH)$(SITEPATH)$(TESTPATH) +TESTPATH= +SITEPATH= +DESTPATH= +MACHDESTLIB=$(BINLIBDEST) +DESTLIB=$(LIBDEST) + + + +########################################################################## +# Modules +MODULE_OBJS= \ + Modules/config.o \ + Modules/getpath.o \ + Modules/main.o \ + Modules/gcmodule.o + +IO_H= Modules/_io/_iomodule.h + +IO_OBJS= \ + Modules/_io/_iomodule.o \ + Modules/_io/iobase.o \ + Modules/_io/fileio.o \ + Modules/_io/bufferedio.o \ + Modules/_io/textio.o \ + Modules/_io/bytesio.o \ + Modules/_io/stringio.o + +########################################################################## + +LIBFFI_INCLUDEDIR= + +########################################################################## +# Parser + +PEGEN_OBJS= \ + Parser/pegen.o \ + Parser/parser.o \ + Parser/string_parser.o \ + Parser/peg_api.o + + +PEGEN_HEADERS= \ + $(srcdir)/Include/internal/pycore_parser.h \ + $(srcdir)/Parser/pegen.h \ + $(srcdir)/Parser/string_parser.h + +POBJS= \ + Parser/token.o \ + +PARSER_OBJS= $(POBJS) $(PEGEN_OBJS) Parser/myreadline.o Parser/tokenizer.o + +PARSER_HEADERS= \ + $(PEGEN_HEADERS) \ + $(srcdir)/Parser/tokenizer.h + +########################################################################## +# Python + +PYTHON_OBJS= \ + Python/_warnings.o \ + Python/Python-ast.o \ + Python/asdl.o \ + Python/ast.o \ + Python/ast_opt.o \ + Python/ast_unparse.o \ + Python/bltinmodule.o \ + Python/ceval.o \ + Python/codecs.o \ + Python/compile.o \ + Python/context.o \ + Python/dynamic_annotations.o \ + Python/errors.o \ + Python/frozenmain.o \ + Python/future.o \ + Python/getargs.o \ + Python/getcompiler.o \ + Python/getcopyright.o \ + Python/getplatform.o \ + Python/getversion.o \ + Python/hamt.o \ + Python/hashtable.o \ + Python/import.o \ + Python/importdl.o \ + Python/initconfig.o \ + Python/marshal.o \ + Python/modsupport.o \ + Python/mysnprintf.o \ + Python/mystrtoul.o \ + Python/pathconfig.o \ + Python/preconfig.o \ + Python/pyarena.o \ + Python/pyctype.o \ + Python/pyfpe.o \ + Python/pyhash.o \ + Python/pylifecycle.o \ + Python/pymath.o \ + Python/pystate.o \ + Python/pythonrun.o \ + Python/pytime.o \ + Python/bootstrap_hash.o \ + Python/structmember.o \ + Python/symtable.o \ + Python/sysmodule.o \ + Python/thread.o \ + Python/traceback.o \ + Python/getopt.o \ + Python/pystrcmp.o \ + Python/pystrtod.o \ + Python/pystrhex.o \ + Python/dtoa.o \ + Python/formatter_unicode.o \ + Python/fileutils.o \ + Python/suggestions.o \ + Python/$(DYNLOADFILE) \ + $(LIBOBJS) \ + $(MACHDEP_OBJS) \ + $(DTRACE_OBJS) + + +########################################################################## +# Objects +OBJECT_OBJS= \ + Objects/abstract.o \ + Objects/accu.o \ + Objects/boolobject.o \ + Objects/bytes_methods.o \ + Objects/bytearrayobject.o \ + Objects/bytesobject.o \ + Objects/call.o \ + Objects/capsule.o \ + Objects/cellobject.o \ + Objects/classobject.o \ + Objects/codeobject.o \ + Objects/complexobject.o \ + Objects/descrobject.o \ + Objects/enumobject.o \ + Objects/exceptions.o \ + Objects/genericaliasobject.o \ + Objects/genobject.o \ + Objects/fileobject.o \ + Objects/floatobject.o \ + Objects/frameobject.o \ + Objects/funcobject.o \ + Objects/interpreteridobject.o \ + Objects/iterobject.o \ + Objects/listobject.o \ + Objects/longobject.o \ + Objects/dictobject.o \ + Objects/odictobject.o \ + Objects/memoryobject.o \ + Objects/methodobject.o \ + Objects/moduleobject.o \ + Objects/namespaceobject.o \ + Objects/object.o \ + Objects/obmalloc.o \ + Objects/picklebufobject.o \ + Objects/rangeobject.o \ + Objects/setobject.o \ + Objects/sliceobject.o \ + Objects/structseq.o \ + Objects/tupleobject.o \ + Objects/typeobject.o \ + Objects/unicodeobject.o \ + Objects/unicodectype.o \ + Objects/unionobject.o \ + Objects/weakrefobject.o + +########################################################################## +# objects that get linked into the Python library +LIBRARY_OBJS_OMIT_FROZEN= \ + Modules/getbuildinfo.o \ + $(PARSER_OBJS) \ + $(OBJECT_OBJS) \ + $(PYTHON_OBJS) \ + $(MODULE_OBJS) \ + $(MODOBJS) + +LIBRARY_OBJS= \ + $(LIBRARY_OBJS_OMIT_FROZEN) \ + Python/frozen.o + +########################################################################## +# DTrace + +# On some systems, object files that reference DTrace probes need to be modified +# in-place by dtrace(1). +DTRACE_DEPS = \ + Python/ceval.o Python/import.o Python/sysmodule.o Modules/gcmodule.o + +######################################################################### +# Rules + +# Default target +all: build_all +build_all: check-clean-src $(BUILDPYTHON) oldsharedmods sharedmods gdbhooks \ + Programs/_testembed python-config + +# Check that the source is clean when building out of source. +check-clean-src: + @if test -n "$(VPATH)" -a -f "$(srcdir)/Programs/python.o"; then \ + echo "Error: The source directory ($(srcdir)) is not clean" ; \ + echo "Building Python out of the source tree (in $(abs_builddir)) requires a clean source tree ($(abs_srcdir))" ; \ + echo "Try to run: make -C \"$(srcdir)\" clean" ; \ + exit 1; \ + fi + +# Profile generation build must start from a clean tree. +profile-clean-stamp: + $(MAKE) clean + touch $@ + +# Compile with profile generation enabled. +profile-gen-stamp: profile-clean-stamp + @if [ $(LLVM_PROF_ERR) = yes ]; then \ + echo "Error: Cannot perform PGO build because llvm-profdata was not found in PATH" ;\ + echo "Please add it to PATH and run ./configure again" ;\ + exit 1;\ + fi + @echo "Building with support for profile generation:" + $(MAKE) build_all_generate_profile + touch $@ + +# Run task with profile generation build to create profile information. +profile-run-stamp: + @echo "Running code to generate profile data (this can take a while):" + # First, we need to create a clean build with profile generation + # enabled. + $(MAKE) profile-gen-stamp + # Next, run the profile task to generate the profile information. + $(MAKE) run_profile_task + $(MAKE) build_all_merge_profile + # Remove profile generation binary since we are done with it. + $(MAKE) clean-retain-profile + # This is an expensive target to build and it does not have proper + # makefile dependency information. So, we create a "stamp" file + # to record its completion and avoid re-running it. + touch $@ + +build_all_generate_profile: + $(MAKE) all CFLAGS_NODIST="$(CFLAGS_NODIST) $(PGO_PROF_GEN_FLAG)" LDFLAGS_NODIST="$(LDFLAGS_NODIST) $(PGO_PROF_GEN_FLAG)" LIBS="$(LIBS)" + +run_profile_task: + @ # FIXME: can't run for a cross build + $(LLVM_PROF_FILE) $(RUNSHARED) ./$(BUILDPYTHON) $(PROFILE_TASK) || true + task="$(PROFILE_TASK)"; \ + case "$$task" in \ + *-s\ *) \ + $(LLVM_PROF_FILE) $(RUNSHARED) ./$(BUILDPYTHON) $$task; \ + while [ -f $(abs_builddir)/build/pynexttest ]; do \ + $(LLVM_PROF_FILE) $(RUNSHARED) ./$(BUILDPYTHON) $$task; \ + done;; \ + *) \ + $(LLVM_PROF_FILE) $(RUNSHARED) ./$(BUILDPYTHON) $$task; \ + esac + +build_all_merge_profile: + $(LLVM_PROF_MERGER) + +# Compile Python binary with profile guided optimization. +# To force re-running of the profile task, remove the profile-run-stamp file. +profile-opt: profile-run-stamp + @echo "Rebuilding with profile guided optimizations:" + -rm -f profile-clean-stamp + $(MAKE) all CFLAGS_NODIST="$(CFLAGS_NODIST) $(PGO_PROF_USE_FLAG)" LDFLAGS_NODIST="$(LDFLAGS_NODIST)" + +# Compile and run with gcov +.PHONY=coverage coverage-lcov coverage-report +coverage: + @echo "Building with support for coverage checking:" + $(MAKE) clean + $(MAKE) all CFLAGS="$(CFLAGS) -O0 -pg --coverage" LIBS="$(LIBS) --coverage" + +coverage-lcov: + @echo "Creating Coverage HTML report with LCOV:" + @rm -f $(COVERAGE_INFO) + @rm -rf $(COVERAGE_REPORT) + @lcov --capture --directory $(abs_builddir) \ + --base-directory $(realpath $(abs_builddir)) \ + --path $(realpath $(abs_srcdir)) \ + --output-file $(COVERAGE_INFO) + @ # remove 3rd party modules, system headers and internal files with + @ # debug, test or dummy functions. + @lcov --remove $(COVERAGE_INFO) \ + '*/Modules/_blake2/impl/*' \ + '*/Modules/_ctypes/libffi*/*' \ + '*/Modules/_decimal/libmpdec/*' \ + '*/Modules/_sha3/kcp/*' \ + '*/Modules/expat/*' \ + '*/Modules/zlib/*' \ + '*/Include/*' \ + '*/Modules/xx*.c' \ + '*/Python/pyfpe.c' \ + '*/Python/pystrcmp.c' \ + '/usr/include/*' \ + '/usr/local/include/*' \ + '/usr/lib/gcc/*' \ + --output-file $(COVERAGE_INFO) + @genhtml $(COVERAGE_INFO) --output-directory $(COVERAGE_REPORT) \ + $(COVERAGE_REPORT_OPTIONS) + @echo + @echo "lcov report at $(COVERAGE_REPORT)/index.html" + @echo + +# Force regeneration of parser and importlib +coverage-report: regen-token regen-importlib + @ # build with coverage info + $(MAKE) coverage + @ # run tests, ignore failures + $(TESTRUNNER) $(TESTOPTS) || true + @ # build lcov report + $(MAKE) coverage-lcov + +# Run "Argument Clinic" over all source files +.PHONY=clinic +clinic: check-clean-src $(srcdir)/Modules/_blake2/blake2s_impl.c + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/clinic/clinic.py --make --srcdir $(srcdir) + +# Build the interpreter +$(BUILDPYTHON): Programs/python.o $(LIBRARY_DEPS) + $(LINKCC) $(PY_CORE_LDFLAGS) $(LINKFORSHARED) -o $@ Programs/python.o $(BLDLIBRARY) $(LIBS) $(MODLIBS) $(SYSLIBS) + +platform: $(BUILDPYTHON) pybuilddir.txt + $(RUNSHARED) $(PYTHON_FOR_BUILD) -c 'import sys ; from sysconfig import get_platform ; print("%s-%d.%d" % (get_platform(), *sys.version_info[:2]))' >platform + +# Create build directory and generate the sysconfig build-time data there. +# pybuilddir.txt contains the name of the build dir and is used for +# sys.path fixup -- see Modules/getpath.c. +# Since this step runs before shared modules are built, try to avoid bootstrap +# problems by creating a dummy pybuilddir.txt just to allow interpreter +# initialization to succeed. It will be overwritten by generate-posix-vars +# or removed in case of failure. +pybuilddir.txt: $(BUILDPYTHON) + @echo "none" > ./pybuilddir.txt + $(RUNSHARED) $(PYTHON_FOR_BUILD) -S -m sysconfig --generate-posix-vars ;\ + if test $$? -ne 0 ; then \ + echo "generate-posix-vars failed" ; \ + rm -f ./pybuilddir.txt ; \ + exit 1 ; \ + fi + +# This is shared by the math and cmath modules +# don't build with -fPIC when building as math and cmath as builtins +Modules/_math.o: Modules/_math.c Modules/_math.h + $(CC) -c $(if $(findstring _math.o, $(MODOBJS)),,$(CCSHARED)) $(PY_CORE_CFLAGS) -o $@ $< + +# blake2s is auto-generated from blake2b +$(srcdir)/Modules/_blake2/blake2s_impl.c: $(srcdir)/Modules/_blake2/blake2b_impl.c $(srcdir)/Modules/_blake2/blake2b2s.py + $(PYTHON_FOR_REGEN) $(srcdir)/Modules/_blake2/blake2b2s.py + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/clinic/clinic.py -f $@ + +# Build the shared modules +# Under GNU make, MAKEFLAGS are sorted and normalized; the 's' for +# -s, --silent or --quiet is always the first char. +# Under BSD make, MAKEFLAGS might be " -s -v x=y". +# Ignore macros passed by GNU make, passed after -- +sharedmods: $(BUILDPYTHON) pybuilddir.txt Modules/_math.o + @case "`echo X $$MAKEFLAGS | sed 's/^X //;s/ -- .*//'`" in \ + *\ -s*|s*) quiet="-q";; \ + *) quiet="";; \ + esac; \ + echo "$(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' \ + _TCLTK_INCLUDES='$(TCLTK_INCLUDES)' _TCLTK_LIBS='$(TCLTK_LIBS)' \ + $(PYTHON_FOR_BUILD) $(srcdir)/setup.py $$quiet build"; \ + $(RUNSHARED) CC='$(CC)' LDSHARED='$(BLDSHARED)' OPT='$(OPT)' \ + _TCLTK_INCLUDES='$(TCLTK_INCLUDES)' _TCLTK_LIBS='$(TCLTK_LIBS)' \ + $(PYTHON_FOR_BUILD) $(srcdir)/setup.py $$quiet build + + +# Build static library +$(LIBRARY): $(LIBRARY_OBJS) + -rm -f $@ + $(AR) $(ARFLAGS) $@ $(LIBRARY_OBJS) + +libpython$(LDVERSION).so: $(LIBRARY_OBJS) $(DTRACE_OBJS) + if test $(INSTSONAME) != $(LDLIBRARY); then \ + $(BLDSHARED) -Wl,-h$(INSTSONAME) -o $(INSTSONAME) $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM); \ + $(LN) -f $(INSTSONAME) $@; \ + else \ + $(BLDSHARED) -o $@ $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM); \ + fi + +libpython3.so: libpython$(LDVERSION).so + $(BLDSHARED) $(NO_AS_NEEDED) -o $@ -Wl,-h$@ $^ + +libpython$(LDVERSION).dylib: $(LIBRARY_OBJS) + $(CC) -dynamiclib -Wl,-single_module $(PY_CORE_LDFLAGS) -undefined dynamic_lookup -Wl,-install_name,$(prefix)/lib/libpython$(LDVERSION).dylib -Wl,-compatibility_version,$(VERSION) -Wl,-current_version,$(VERSION) -o $@ $(LIBRARY_OBJS) $(DTRACE_OBJS) $(SHLIBS) $(LIBC) $(LIBM); \ + + +libpython$(VERSION).sl: $(LIBRARY_OBJS) + $(LDSHARED) -o $@ $(LIBRARY_OBJS) $(MODLIBS) $(SHLIBS) $(LIBC) $(LIBM) + +# List of exported symbols for AIX +Modules/python.exp: $(LIBRARY) + $(srcdir)/Modules/makexp_aix $@ "$(EXPORTSFROM)" $? + +# Copy up the gdb python hooks into a position where they can be automatically +# loaded by gdb during Lib/test/test_gdb.py +# +# Distributors are likely to want to install this somewhere else e.g. relative +# to the stripped DWARF data for the shared library. +gdbhooks: $(BUILDPYTHON)-gdb.py + +SRC_GDB_HOOKS=$(srcdir)/Tools/gdb/libpython.py +$(BUILDPYTHON)-gdb.py: $(SRC_GDB_HOOKS) + $(INSTALL_DATA) $(SRC_GDB_HOOKS) $(BUILDPYTHON)-gdb.py + +# This rule is here for OPENSTEP/Rhapsody/MacOSX. It builds a temporary +# minimal framework (not including the Lib directory and such) in the current +# directory. +RESSRCDIR=Mac/Resources/framework +$(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK): \ + $(LIBRARY) \ + $(RESSRCDIR)/Info.plist + $(INSTALL) -d -m $(DIRMODE) $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION) + $(CC) -o $(LDLIBRARY) $(PY_CORE_LDFLAGS) -dynamiclib \ + -all_load $(LIBRARY) -Wl,-single_module \ + -install_name $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Versions/$(VERSION)/$(PYTHONFRAMEWORK) \ + -compatibility_version $(VERSION) \ + -current_version $(VERSION) \ + -framework CoreFoundation $(LIBS); + $(INSTALL) -d -m $(DIRMODE) \ + $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/Resources/English.lproj + $(INSTALL_DATA) $(RESSRCDIR)/Info.plist \ + $(PYTHONFRAMEWORKDIR)/Versions/$(VERSION)/Resources/Info.plist + $(LN) -fsn $(VERSION) $(PYTHONFRAMEWORKDIR)/Versions/Current + $(LN) -fsn Versions/Current/$(PYTHONFRAMEWORK) $(PYTHONFRAMEWORKDIR)/$(PYTHONFRAMEWORK) + $(LN) -fsn Versions/Current/Resources $(PYTHONFRAMEWORKDIR)/Resources + +# This rule builds the Cygwin Python DLL and import library if configured +# for a shared core library; otherwise, this rule is a noop. +$(DLLLIBRARY) libpython$(LDVERSION).dll.a: $(LIBRARY_OBJS) + if test -n "$(DLLLIBRARY)"; then \ + $(LDSHARED) -Wl,--out-implib=$@ -o $(DLLLIBRARY) $^ \ + $(LIBS) $(MODLIBS) $(SYSLIBS); \ + else true; \ + fi + + +oldsharedmods: $(SHAREDMODS) + + +Makefile Modules/config.c: Makefile.pre \ + $(srcdir)/Modules/config.c.in \ + $(MAKESETUP) \ + $(srcdir)/Modules/Setup \ + Modules/Setup.local + $(SHELL) $(MAKESETUP) -c $(srcdir)/Modules/config.c.in \ + -s Modules \ + Modules/Setup.local \ + $(srcdir)/Modules/Setup + @mv config.c Modules + @echo "The Makefile was updated, you may need to re-run make." + + +Programs/_testembed: Programs/_testembed.o $(LIBRARY_DEPS) + $(LINKCC) $(PY_CORE_LDFLAGS) $(LINKFORSHARED) -o $@ Programs/_testembed.o $(BLDLIBRARY) $(LIBS) $(MODLIBS) $(SYSLIBS) + +############################################################################ +# Importlib + +Programs/_freeze_importlib.o: Programs/_freeze_importlib.c Makefile + +Programs/_freeze_importlib: Programs/_freeze_importlib.o $(LIBRARY_OBJS_OMIT_FROZEN) + $(LINKCC) $(PY_CORE_LDFLAGS) -o $@ Programs/_freeze_importlib.o $(LIBRARY_OBJS_OMIT_FROZEN) $(LIBS) $(MODLIBS) $(SYSLIBS) + +.PHONY: regen-importlib +regen-importlib: Programs/_freeze_importlib + # Regenerate Python/importlib_external.h + # from Lib/importlib/_bootstrap_external.py using _freeze_importlib + ./Programs/_freeze_importlib importlib._bootstrap_external \ + $(srcdir)/Lib/importlib/_bootstrap_external.py \ + $(srcdir)/Python/importlib_external.h.new + $(UPDATE_FILE) $(srcdir)/Python/importlib_external.h $(srcdir)/Python/importlib_external.h.new + # Regenerate Python/importlib.h from Lib/importlib/_bootstrap.py + # using _freeze_importlib + ./Programs/_freeze_importlib importlib._bootstrap \ + $(srcdir)/Lib/importlib/_bootstrap.py \ + $(srcdir)/Python/importlib.h.new + $(UPDATE_FILE) $(srcdir)/Python/importlib.h $(srcdir)/Python/importlib.h.new + # Regenerate Python/importlib_zipimport.h from Lib/zipimport.py + # using _freeze_importlib + ./Programs/_freeze_importlib zipimport \ + $(srcdir)/Lib/zipimport.py \ + $(srcdir)/Python/importlib_zipimport.h.new + $(UPDATE_FILE) $(srcdir)/Python/importlib_zipimport.h $(srcdir)/Python/importlib_zipimport.h.new + +regen-abidump: all + @$(MKDIR_P) $(srcdir)/Doc/data/ + abidw "libpython$(LDVERSION).so" --no-architecture --out-file $(srcdir)/Doc/data/python$(LDVERSION).abi.new + @$(UPDATE_FILE) $(srcdir)/Doc/data/python$(LDVERSION).abi $(srcdir)/Doc/data/python$(LDVERSION).abi.new + +check-abidump: all + abidiff $(srcdir)/Doc/data/python$(LDVERSION).abi "libpython$(LDVERSION).so" --drop-private-types --no-architecture --no-added-syms + +regen-limited-abi: all + $(RUNSHARED) ./$(BUILDPYTHON) $(srcdir)/Tools/scripts/stable_abi.py --generate-all $(srcdir)/Misc/stable_abi.txt + + +############################################################################ +# Regenerate all generated files + +regen-all: regen-opcode regen-opcode-targets regen-typeslots \ + regen-token regen-ast regen-keyword regen-importlib clinic \ + regen-pegen-metaparser regen-pegen regen-frozen + @echo + @echo "Note: make regen-stdlib-module-names and autoconf should be run manually" + +############################################################################ +# Special rules for object files + +Modules/getbuildinfo.o: $(PARSER_OBJS) \ + $(OBJECT_OBJS) \ + $(PYTHON_OBJS) \ + $(MODULE_OBJS) \ + $(MODOBJS) \ + $(DTRACE_OBJS) \ + $(srcdir)/Modules/getbuildinfo.c + $(CC) -c $(PY_CORE_CFLAGS) \ + -DGITVERSION="\"`LC_ALL=C $(GITVERSION)`\"" \ + -DGITTAG="\"`LC_ALL=C $(GITTAG)`\"" \ + -DGITBRANCH="\"`LC_ALL=C $(GITBRANCH)`\"" \ + $(if $(BUILD_DATE),-DDATE='"$(BUILD_DATE)"') \ + $(if $(BUILD_TIME),-DTIME='"$(BUILD_TIME)"') \ + -o $@ $(srcdir)/Modules/getbuildinfo.c + +Modules/getpath.o: $(srcdir)/Modules/getpath.c Makefile + $(CC) -c $(PY_CORE_CFLAGS) -DPYTHONPATH='"$(PYTHONPATH)"' \ + -DPREFIX='"$(prefix)"' \ + -DEXEC_PREFIX='"$(exec_prefix)"' \ + -DVERSION='"$(VERSION)"' \ + -DVPATH='"$(VPATH)"' \ + -o $@ $(srcdir)/Modules/getpath.c + +Programs/python.o: $(srcdir)/Programs/python.c + $(MAINCC) -c $(PY_CORE_CFLAGS) -o $@ $(srcdir)/Programs/python.c + +Programs/_testembed.o: $(srcdir)/Programs/_testembed.c + $(MAINCC) -c $(PY_CORE_CFLAGS) -o $@ $(srcdir)/Programs/_testembed.c + +Modules/_sre.o: $(srcdir)/Modules/_sre.c $(srcdir)/Modules/sre.h $(srcdir)/Modules/sre_constants.h $(srcdir)/Modules/sre_lib.h + +Modules/posixmodule.o: $(srcdir)/Modules/posixmodule.c $(srcdir)/Modules/posixmodule.h + +Modules/grpmodule.o: $(srcdir)/Modules/grpmodule.c $(srcdir)/Modules/posixmodule.h + +Modules/pwdmodule.o: $(srcdir)/Modules/pwdmodule.c $(srcdir)/Modules/posixmodule.h + +Modules/signalmodule.o: $(srcdir)/Modules/signalmodule.c $(srcdir)/Modules/posixmodule.h + +Python/dynload_shlib.o: $(srcdir)/Python/dynload_shlib.c Makefile + $(CC) -c $(PY_CORE_CFLAGS) \ + $(if $(MULTIARCH),-DMULTIARCH='"$(MULTIARCH)"') \ + -DSOABI='"$(SOABI)"' \ + -o $@ $(srcdir)/Python/dynload_shlib.c + +Python/dynload_hpux.o: $(srcdir)/Python/dynload_hpux.c Makefile + $(CC) -c $(PY_CORE_CFLAGS) \ + -DSHLIB_EXT='"$(EXT_SUFFIX)"' \ + -o $@ $(srcdir)/Python/dynload_hpux.c + +Python/sysmodule.o: $(srcdir)/Python/sysmodule.c Makefile $(srcdir)/Include/pydtrace.h + $(CC) -c $(PY_CORE_CFLAGS) \ + -DABIFLAGS='"$(ABIFLAGS)"' \ + $(MULTIARCH_CPPFLAGS) \ + -o $@ $(srcdir)/Python/sysmodule.c + +Python/initconfig.o: $(srcdir)/Python/initconfig.c + $(CC) -c $(PY_CORE_CFLAGS) \ + -DPLATLIBDIR='"$(PLATLIBDIR)"' \ + -o $@ $(srcdir)/Python/initconfig.c + +$(IO_OBJS): $(IO_H) + +.PHONY: regen-pegen-metaparser +regen-pegen-metaparser: + @$(MKDIR_P) $(srcdir)/Tools/peg_generator/pegen + PYTHONPATH=$(srcdir)/Tools/peg_generator $(PYTHON_FOR_REGEN) -m pegen -q python \ + $(srcdir)/Tools/peg_generator/pegen/metagrammar.gram \ + -o $(srcdir)/Tools/peg_generator/pegen/grammar_parser.py.new + $(UPDATE_FILE) $(srcdir)/Tools/peg_generator/pegen/grammar_parser.py \ + $(srcdir)/Tools/peg_generator/pegen/grammar_parser.py.new + +.PHONY: regen-pegen +regen-pegen: + @$(MKDIR_P) $(srcdir)/Parser + PYTHONPATH=$(srcdir)/Tools/peg_generator $(PYTHON_FOR_REGEN) -m pegen -q c \ + $(srcdir)/Grammar/python.gram \ + $(srcdir)/Grammar/Tokens \ + -o $(srcdir)/Parser/parser.new.c + $(UPDATE_FILE) $(srcdir)/Parser/parser.c $(srcdir)/Parser/parser.new.c + +.PHONY=regen-ast +regen-ast: + # Regenerate 3 files using using Parser/asdl_c.py: + # - Include/internal/pycore_ast.h + # - Include/internal/pycore_ast_state.h + # - Python/Python-ast.c + $(MKDIR_P) $(srcdir)/Include + $(MKDIR_P) $(srcdir)/Python + $(PYTHON_FOR_REGEN) $(srcdir)/Parser/asdl_c.py \ + $(srcdir)/Parser/Python.asdl \ + -H $(srcdir)/Include/internal/pycore_ast.h.new \ + -I $(srcdir)/Include/internal/pycore_ast_state.h.new \ + -C $(srcdir)/Python/Python-ast.c.new + + $(UPDATE_FILE) $(srcdir)/Include/internal/pycore_ast.h $(srcdir)/Include/internal/pycore_ast.h.new + $(UPDATE_FILE) $(srcdir)/Include/internal/pycore_ast_state.h $(srcdir)/Include/internal/pycore_ast_state.h.new + $(UPDATE_FILE) $(srcdir)/Python/Python-ast.c $(srcdir)/Python/Python-ast.c.new + +.PHONY: regen-opcode +regen-opcode: + # Regenerate Include/opcode.h from Lib/opcode.py + # using Tools/scripts/generate_opcode_h.py + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/scripts/generate_opcode_h.py \ + $(srcdir)/Lib/opcode.py \ + $(srcdir)/Include/opcode.h.new + $(UPDATE_FILE) $(srcdir)/Include/opcode.h $(srcdir)/Include/opcode.h.new + +.PHONY: regen-frozen +regen-frozen: Programs/_freeze_importlib + # Regenerate code for frozen module "__hello__". + ./Programs/_freeze_importlib hello \ + $(srcdir)/Tools/freeze/flag.py \ + $(srcdir)/Python/frozen_hello.h.new + $(UPDATE_FILE) $(srcdir)/Python/frozen_hello.h \ + $(srcdir)/Python/frozen_hello.h.new + +.PHONY: regen-token +regen-token: + # Regenerate Doc/library/token-list.inc from Grammar/Tokens + # using Tools/scripts/generate_token.py + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/scripts/generate_token.py rst \ + $(srcdir)/Grammar/Tokens \ + $(srcdir)/Doc/library/token-list.inc + # Regenerate Include/token.h from Grammar/Tokens + # using Tools/scripts/generate_token.py + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/scripts/generate_token.py h \ + $(srcdir)/Grammar/Tokens \ + $(srcdir)/Include/token.h + # Regenerate Parser/token.c from Grammar/Tokens + # using Tools/scripts/generate_token.py + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/scripts/generate_token.py c \ + $(srcdir)/Grammar/Tokens \ + $(srcdir)/Parser/token.c + # Regenerate Lib/token.py from Grammar/Tokens + # using Tools/scripts/generate_token.py + $(PYTHON_FOR_REGEN) $(srcdir)/Tools/scripts/generate_token.py py \ + $(srcdir)/Grammar/Tokens \ + $(srcdir)/Lib/token.py + +.PHONY: regen-keyword +regen-keyword: + # Regenerate Lib/keyword.py from Grammar/python.gram and Grammar/Tokens + # using Tools/peg_generator/pegen + PYTHONPATH=$(srcdir)/Tools/peg_generator $(PYTHON_FOR_REGEN) -m pegen.keywordgen \ + $(srcdir)/Grammar/python.gram \ + $(srcdir)/Grammar/Tokens \ + $(srcdir)/Lib/keyword.py.new + $(UPDATE_FILE) $(srcdir)/Lib/keyword.py $(srcdir)/Lib/keyword.py.new + +.PHONY: regen-stdlib-module-names +regen-stdlib-module-names: build_all Programs/_testembed + # Regenerate Python/stdlib_module_names.h + # using Tools/scripts/generate_stdlib_module_names.py + $(RUNSHARED) ./$(BUILDPYTHON) \ + $(srcdir)/Tools/scripts/generate_stdlib_module_names.py \ + > $(srcdir)/Python/stdlib_module_names.h.new + $(UPDATE_FILE) $(srcdir)/Python/stdlib_module_names.h $(srcdir)/Python/stdlib_module_names.h.new + +Python/compile.o Python/symtable.o Python/ast_unparse.o Python/ast.o Python/future.o: $(srcdir)/Include/internal/pycore_ast.h + +Python/getplatform.o: $(srcdir)/Python/getplatform.c + $(CC) -c $(PY_CORE_CFLAGS) -DPLATFORM='"$(MACHDEP)"' -o $@ $(srcdir)/Python/getplatform.c + +Python/importdl.o: $(srcdir)/Python/importdl.c + $(CC) -c $(PY_CORE_CFLAGS) -I$(DLINCLDIR) -o $@ $(srcdir)/Python/importdl.c + +Objects/unicodectype.o: $(srcdir)/Objects/unicodectype.c \ + $(srcdir)/Objects/unicodetype_db.h + +BYTESTR_DEPS = \ + $(srcdir)/Objects/stringlib/count.h \ + $(srcdir)/Objects/stringlib/ctype.h \ + $(srcdir)/Objects/stringlib/fastsearch.h \ + $(srcdir)/Objects/stringlib/find.h \ + $(srcdir)/Objects/stringlib/join.h \ + $(srcdir)/Objects/stringlib/partition.h \ + $(srcdir)/Objects/stringlib/split.h \ + $(srcdir)/Objects/stringlib/stringdefs.h \ + $(srcdir)/Objects/stringlib/transmogrify.h + +UNICODE_DEPS = \ + $(srcdir)/Objects/stringlib/asciilib.h \ + $(srcdir)/Objects/stringlib/codecs.h \ + $(srcdir)/Objects/stringlib/count.h \ + $(srcdir)/Objects/stringlib/fastsearch.h \ + $(srcdir)/Objects/stringlib/find.h \ + $(srcdir)/Objects/stringlib/find_max_char.h \ + $(srcdir)/Objects/stringlib/localeutil.h \ + $(srcdir)/Objects/stringlib/partition.h \ + $(srcdir)/Objects/stringlib/replace.h \ + $(srcdir)/Objects/stringlib/split.h \ + $(srcdir)/Objects/stringlib/ucs1lib.h \ + $(srcdir)/Objects/stringlib/ucs2lib.h \ + $(srcdir)/Objects/stringlib/ucs4lib.h \ + $(srcdir)/Objects/stringlib/undef.h \ + $(srcdir)/Objects/stringlib/unicode_format.h \ + $(srcdir)/Objects/stringlib/unicodedefs.h + +Objects/bytes_methods.o: $(srcdir)/Objects/bytes_methods.c $(BYTESTR_DEPS) +Objects/bytesobject.o: $(srcdir)/Objects/bytesobject.c $(BYTESTR_DEPS) +Objects/bytearrayobject.o: $(srcdir)/Objects/bytearrayobject.c $(BYTESTR_DEPS) + +Objects/unicodeobject.o: $(srcdir)/Objects/unicodeobject.c $(UNICODE_DEPS) + +Objects/odictobject.o: $(srcdir)/Objects/dict-common.h +Objects/dictobject.o: $(srcdir)/Objects/stringlib/eq.h $(srcdir)/Objects/dict-common.h +Objects/setobject.o: $(srcdir)/Objects/stringlib/eq.h + +.PHONY: regen-opcode-targets +regen-opcode-targets: + # Regenerate Python/opcode_targets.h from Lib/opcode.py + # using Python/makeopcodetargets.py + $(PYTHON_FOR_REGEN) $(srcdir)/Python/makeopcodetargets.py \ + $(srcdir)/Python/opcode_targets.h.new + $(UPDATE_FILE) $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/opcode_targets.h.new + +Python/ceval.o: $(srcdir)/Python/opcode_targets.h $(srcdir)/Python/ceval_gil.h \ + $(srcdir)/Python/condvar.h + +Python/frozen.o: $(srcdir)/Python/importlib.h $(srcdir)/Python/importlib_external.h \ + $(srcdir)/Python/importlib_zipimport.h $(srcdir)/Python/frozen_hello.h + +# Generate DTrace probe macros, then rename them (PYTHON_ -> PyDTrace_) to +# follow our naming conventions. dtrace(1) uses the output filename to generate +# an include guard, so we can't use a pipeline to transform its output. +Include/pydtrace_probes.h: $(srcdir)/Include/pydtrace.d + $(MKDIR_P) Include + $(DTRACE) $(DFLAGS) -o $@ -h -s $< + : sed in-place edit with POSIX-only tools + sed 's/PYTHON_/PyDTrace_/' $@ > $@.tmp + mv $@.tmp $@ + +Python/ceval.o: $(srcdir)/Include/pydtrace.h +Python/import.o: $(srcdir)/Include/pydtrace.h +Modules/gcmodule.o: $(srcdir)/Include/pydtrace.h + +Python/pydtrace.o: $(srcdir)/Include/pydtrace.d $(DTRACE_DEPS) + $(DTRACE) $(DFLAGS) -o $@ -G -s $< $(DTRACE_DEPS) + +Objects/typeobject.o: Objects/typeslots.inc + +.PHONY: regen-typeslots +regen-typeslots: + # Regenerate Objects/typeslots.inc from Include/typeslotsh + # using Objects/typeslots.py + $(PYTHON_FOR_REGEN) $(srcdir)/Objects/typeslots.py \ + < $(srcdir)/Include/typeslots.h \ + $(srcdir)/Objects/typeslots.inc.new + $(UPDATE_FILE) $(srcdir)/Objects/typeslots.inc $(srcdir)/Objects/typeslots.inc.new + +############################################################################ +# Header files + +PYTHON_HEADERS= \ + $(srcdir)/Include/Python.h \ + $(srcdir)/Include/abstract.h \ + $(srcdir)/Include/bltinmodule.h \ + $(srcdir)/Include/boolobject.h \ + $(srcdir)/Include/bytearrayobject.h \ + $(srcdir)/Include/bytesobject.h \ + $(srcdir)/Include/cellobject.h \ + $(srcdir)/Include/ceval.h \ + $(srcdir)/Include/classobject.h \ + $(srcdir)/Include/code.h \ + $(srcdir)/Include/codecs.h \ + $(srcdir)/Include/compile.h \ + $(srcdir)/Include/complexobject.h \ + $(srcdir)/Include/context.h \ + $(srcdir)/Include/descrobject.h \ + $(srcdir)/Include/dictobject.h \ + $(srcdir)/Include/dynamic_annotations.h \ + $(srcdir)/Include/enumobject.h \ + $(srcdir)/Include/errcode.h \ + $(srcdir)/Include/eval.h \ + $(srcdir)/Include/fileobject.h \ + $(srcdir)/Include/fileutils.h \ + $(srcdir)/Include/floatobject.h \ + $(srcdir)/Include/frameobject.h \ + $(srcdir)/Include/funcobject.h \ + $(srcdir)/Include/genobject.h \ + $(srcdir)/Include/import.h \ + $(srcdir)/Include/interpreteridobject.h \ + $(srcdir)/Include/intrcheck.h \ + $(srcdir)/Include/iterobject.h \ + $(srcdir)/Include/listobject.h \ + $(srcdir)/Include/longintrepr.h \ + $(srcdir)/Include/longobject.h \ + $(srcdir)/Include/marshal.h \ + $(srcdir)/Include/memoryobject.h \ + $(srcdir)/Include/methodobject.h \ + $(srcdir)/Include/modsupport.h \ + $(srcdir)/Include/moduleobject.h \ + $(srcdir)/Include/namespaceobject.h \ + $(srcdir)/Include/object.h \ + $(srcdir)/Include/objimpl.h \ + $(srcdir)/Include/opcode.h \ + $(srcdir)/Include/osdefs.h \ + $(srcdir)/Include/osmodule.h \ + $(srcdir)/Include/patchlevel.h \ + $(srcdir)/Include/pycapsule.h \ + $(srcdir)/Include/pydtrace.h \ + $(srcdir)/Include/pyerrors.h \ + $(srcdir)/Include/pyframe.h \ + $(srcdir)/Include/pyhash.h \ + $(srcdir)/Include/pylifecycle.h \ + $(srcdir)/Include/pymacconfig.h \ + $(srcdir)/Include/pymacro.h \ + $(srcdir)/Include/pymath.h \ + $(srcdir)/Include/pymem.h \ + $(srcdir)/Include/pyport.h \ + $(srcdir)/Include/pystate.h \ + $(srcdir)/Include/pystrcmp.h \ + $(srcdir)/Include/pystrhex.h \ + $(srcdir)/Include/pystrtod.h \ + $(srcdir)/Include/pythonrun.h \ + $(srcdir)/Include/pythread.h \ + $(srcdir)/Include/rangeobject.h \ + $(srcdir)/Include/setobject.h \ + $(srcdir)/Include/sliceobject.h \ + $(srcdir)/Include/structmember.h \ + $(srcdir)/Include/structseq.h \ + $(srcdir)/Include/sysmodule.h \ + $(srcdir)/Include/token.h \ + $(srcdir)/Include/traceback.h \ + $(srcdir)/Include/tracemalloc.h \ + $(srcdir)/Include/tupleobject.h \ + $(srcdir)/Include/unicodeobject.h \ + $(srcdir)/Include/warnings.h \ + $(srcdir)/Include/weakrefobject.h \ + \ + pyconfig.h \ + $(PARSER_HEADERS) \ + \ + $(srcdir)/Include/cpython/abstract.h \ + $(srcdir)/Include/cpython/bytearrayobject.h \ + $(srcdir)/Include/cpython/bytesobject.h \ + $(srcdir)/Include/cpython/ceval.h \ + $(srcdir)/Include/cpython/code.h \ + $(srcdir)/Include/cpython/compile.h \ + $(srcdir)/Include/cpython/dictobject.h \ + $(srcdir)/Include/cpython/fileobject.h \ + $(srcdir)/Include/cpython/fileutils.h \ + $(srcdir)/Include/cpython/frameobject.h \ + $(srcdir)/Include/cpython/import.h \ + $(srcdir)/Include/cpython/initconfig.h \ + $(srcdir)/Include/cpython/interpreteridobject.h \ + $(srcdir)/Include/cpython/listobject.h \ + $(srcdir)/Include/cpython/methodobject.h \ + $(srcdir)/Include/cpython/object.h \ + $(srcdir)/Include/cpython/objimpl.h \ + $(srcdir)/Include/cpython/odictobject.h \ + $(srcdir)/Include/cpython/picklebufobject.h \ + $(srcdir)/Include/cpython/pyctype.h \ + $(srcdir)/Include/cpython/pydebug.h \ + $(srcdir)/Include/cpython/pyerrors.h \ + $(srcdir)/Include/cpython/pyfpe.h \ + $(srcdir)/Include/cpython/pylifecycle.h \ + $(srcdir)/Include/cpython/pymem.h \ + $(srcdir)/Include/cpython/pystate.h \ + $(srcdir)/Include/cpython/pythonrun.h \ + $(srcdir)/Include/cpython/pytime.h \ + $(srcdir)/Include/cpython/sysmodule.h \ + $(srcdir)/Include/cpython/traceback.h \ + $(srcdir)/Include/cpython/tupleobject.h \ + $(srcdir)/Include/cpython/unicodeobject.h \ + \ + $(srcdir)/Include/internal/pycore_abstract.h \ + $(srcdir)/Include/internal/pycore_accu.h \ + $(srcdir)/Include/internal/pycore_asdl.h \ + $(srcdir)/Include/internal/pycore_ast.h \ + $(srcdir)/Include/internal/pycore_ast_state.h \ + $(srcdir)/Include/internal/pycore_atomic.h \ + $(srcdir)/Include/internal/pycore_atomic_funcs.h \ + $(srcdir)/Include/internal/pycore_bitutils.h \ + $(srcdir)/Include/internal/pycore_bytes_methods.h \ + $(srcdir)/Include/internal/pycore_call.h \ + $(srcdir)/Include/internal/pycore_ceval.h \ + $(srcdir)/Include/internal/pycore_code.h \ + $(srcdir)/Include/internal/pycore_compile.h \ + $(srcdir)/Include/internal/pycore_condvar.h \ + $(srcdir)/Include/internal/pycore_context.h \ + $(srcdir)/Include/internal/pycore_dtoa.h \ + $(srcdir)/Include/internal/pycore_fileutils.h \ + $(srcdir)/Include/internal/pycore_format.h \ + $(srcdir)/Include/internal/pycore_getopt.h \ + $(srcdir)/Include/internal/pycore_gil.h \ + $(srcdir)/Include/internal/pycore_hamt.h \ + $(srcdir)/Include/internal/pycore_hashtable.h \ + $(srcdir)/Include/internal/pycore_import.h \ + $(srcdir)/Include/internal/pycore_initconfig.h \ + $(srcdir)/Include/internal/pycore_interp.h \ + $(srcdir)/Include/internal/pycore_list.h \ + $(srcdir)/Include/internal/pycore_long.h \ + $(srcdir)/Include/internal/pycore_moduleobject.h \ + $(srcdir)/Include/internal/pycore_object.h \ + $(srcdir)/Include/internal/pycore_pathconfig.h \ + $(srcdir)/Include/internal/pycore_pyarena.h \ + $(srcdir)/Include/internal/pycore_pyerrors.h \ + $(srcdir)/Include/internal/pycore_pyhash.h \ + $(srcdir)/Include/internal/pycore_pylifecycle.h \ + $(srcdir)/Include/internal/pycore_pymem.h \ + $(srcdir)/Include/internal/pycore_pystate.h \ + $(srcdir)/Include/internal/pycore_runtime.h \ + $(srcdir)/Include/internal/pycore_structseq.h \ + $(srcdir)/Include/internal/pycore_symtable.h \ + $(srcdir)/Include/internal/pycore_sysmodule.h \ + $(srcdir)/Include/internal/pycore_traceback.h \ + $(srcdir)/Include/internal/pycore_tuple.h \ + $(srcdir)/Include/internal/pycore_ucnhash.h \ + $(srcdir)/Include/internal/pycore_unionobject.h \ + $(srcdir)/Include/internal/pycore_warnings.h \ + $(DTRACE_HEADERS) \ + \ + $(srcdir)/Python/stdlib_module_names.h + +$(LIBRARY_OBJS) $(MODOBJS) Programs/python.o: $(PYTHON_HEADERS) + + +###################################################################### + +TESTOPTS= $(EXTRATESTOPTS) +TESTPYTHON= $(RUNSHARED) ./$(BUILDPYTHON) $(TESTPYTHONOPTS) +TESTRUNNER= $(TESTPYTHON) $(srcdir)/Tools/scripts/run_tests.py +TESTTIMEOUT= 1200 + +.PHONY: test testall testuniversal buildbottest pythoninfo + +# Remove "test_python_*" directories of previous failed test jobs. +# Pass TESTOPTS options because it can contain --tempdir option. +cleantest: build_all + $(TESTRUNNER) $(TESTOPTS) --cleanup + +# Run a basic set of regression tests. +# This excludes some tests that are particularly resource-intensive. +test: all platform + $(TESTRUNNER) $(TESTOPTS) + +# Run the full test suite twice - once without .pyc files, and once with. +# In the past, we've had problems where bugs in the marshalling or +# elsewhere caused bytecode read from .pyc files to behave differently +# than bytecode generated directly from a .py source file. Sometimes +# the bytecode read from a .pyc file had the bug, sometimes the directly +# generated bytecode. This is sometimes a very shy bug needing a lot of +# sample data. +testall: all platform + -find $(srcdir)/Lib -name '*.py[co]' -print | xargs rm -f + $(TESTPYTHON) -E $(srcdir)/Lib/compileall.py + -find $(srcdir)/Lib -name '*.py[co]' -print | xargs rm -f + -$(TESTRUNNER) -u all $(TESTOPTS) + $(TESTRUNNER) -u all $(TESTOPTS) + +# Run the test suite for both architectures in a Universal build on OSX. +# Must be run on an Intel box. +testuniversal: all platform + @if [ `arch` != 'i386' ]; then \ + echo "This can only be used on OSX/i386" ;\ + exit 1 ;\ + fi + $(TESTRUNNER) -u all $(TESTOPTS) + $(RUNSHARED) /usr/libexec/oah/translate \ + ./$(BUILDPYTHON) -E -m test -j 0 -u all $(TESTOPTS) + +# Like testall, but with only one pass and without multiple processes. +# Run an optional script to include information about the build environment. +buildbottest: build_all platform + -@if which pybuildbot.identify >/dev/null 2>&1; then \ + pybuildbot.identify "CC='$(CC)'" "CXX='$(CXX)'"; \ + fi + $(TESTRUNNER) -j 1 -u all -W --slowest --fail-env-changed --timeout=$(TESTTIMEOUT) $(TESTOPTS) + +pythoninfo: build_all + $(RUNSHARED) ./$(BUILDPYTHON) -m test.pythoninfo + +QUICKTESTOPTS= $(TESTOPTS) -x test_subprocess test_io test_lib2to3 \ + test_multibytecodec test_urllib2_localnet test_itertools \ + test_multiprocessing_fork test_multiprocessing_spawn \ + test_multiprocessing_forkserver \ + test_mailbox test_socket test_poll \ + test_select test_zipfile test_concurrent_futures +quicktest: all platform + $(TESTRUNNER) $(QUICKTESTOPTS) + +# SSL tests +.PHONY: multisslcompile multissltest +multisslcompile: build_all + $(RUNSHARED) ./$(BUILDPYTHON) $(srcdir)/Tools/ssl/multissltests.py --steps=modules + +multissltest: build_all + $(RUNSHARED) ./$(BUILDPYTHON) $(srcdir)/Tools/ssl/multissltests.py + +install: commoninstall bininstall maninstall + if test "x$(ENSUREPIP)" != "xno" ; then \ + case $(ENSUREPIP) in \ + upgrade) ensurepip="--upgrade" ;; \ + install|*) ensurepip="" ;; \ + esac; \ + $(RUNSHARED) $(PYTHON_FOR_BUILD) -m ensurepip \ + $$ensurepip --root=$(DESTDIR)/ ; \ + fi + +altinstall: commoninstall + if test "x$(ENSUREPIP)" != "xno" ; then \ + case $(ENSUREPIP) in \ + upgrade) ensurepip="--altinstall --upgrade" ;; \ + install|*) ensurepip="--altinstall" ;; \ + esac; \ + $(RUNSHARED) $(PYTHON_FOR_BUILD) -m ensurepip \ + $$ensurepip --root=$(DESTDIR)/ ; \ + fi + +commoninstall: check-clean-src \ + altbininstall libinstall inclinstall libainstall \ + sharedinstall oldsharedinstall altmaninstall \ + + +# Install shared libraries enabled by Setup +DESTDIRS= $(exec_prefix) $(LIBDIR) $(BINLIBDEST) $(DESTSHARED) + +oldsharedinstall: $(DESTSHARED) $(SHAREDMODS) + @for i in X $(SHAREDMODS); do \ + if test $$i != X; then \ + echo $(INSTALL_SHARED) $$i $(DESTSHARED)/`basename $$i`; \ + $(INSTALL_SHARED) $$i $(DESTDIR)$(DESTSHARED)/`basename $$i`; \ + fi; \ + done + +$(DESTSHARED): + @for i in $(DESTDIRS); \ + do \ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done + +# Install the interpreter with $(VERSION) affixed +# This goes into $(exec_prefix) +altbininstall: $(BUILDPYTHON) + @for i in $(BINDIR) $(LIBDIR); \ + do \ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done + if test "$(PYTHONFRAMEWORKDIR)" = "no-framework" ; then \ + $(INSTALL_PROGRAM) $(BUILDPYTHON) $(DESTDIR)$(BINDIR)/python$(LDVERSION)$(EXE); \ + else \ + $(INSTALL_PROGRAM) $(STRIPFLAG) Mac/pythonw $(DESTDIR)$(BINDIR)/python$(LDVERSION)$(EXE); \ + fi + -if test "$(VERSION)" != "$(LDVERSION)"; then \ + if test -f $(DESTDIR)$(BINDIR)/python$(VERSION)$(EXE) -o -h $(DESTDIR)$(BINDIR)/python$(VERSION)$(EXE); \ + then rm -f $(DESTDIR)$(BINDIR)/python$(VERSION)$(EXE); \ + fi; \ + (cd $(DESTDIR)$(BINDIR); $(LN) python$(LDVERSION)$(EXE) python$(VERSION)$(EXE)); \ + fi + @if test "$(PY_ENABLE_SHARED)" = 1 -o "$(STATIC_LIBPYTHON)" = 1; then \ + if test -f $(LDLIBRARY) && test "$(PYTHONFRAMEWORKDIR)" = "no-framework" ; then \ + if test -n "$(DLLLIBRARY)" ; then \ + $(INSTALL_SHARED) $(DLLLIBRARY) $(DESTDIR)$(BINDIR); \ + else \ + $(INSTALL_SHARED) $(LDLIBRARY) $(DESTDIR)$(LIBDIR)/$(INSTSONAME); \ + if test $(LDLIBRARY) != $(INSTSONAME); then \ + (cd $(DESTDIR)$(LIBDIR); $(LN) -sf $(INSTSONAME) $(LDLIBRARY)) \ + fi \ + fi; \ + if test -n "$(PY3LIBRARY)"; then \ + $(INSTALL_SHARED) $(PY3LIBRARY) $(DESTDIR)$(LIBDIR)/$(PY3LIBRARY); \ + fi; \ + else true; \ + fi; \ + fi + if test "x$(LIPO_32BIT_FLAGS)" != "x" ; then \ + rm -f $(DESTDIR)$(BINDIR)python$(VERSION)-32$(EXE); \ + lipo $(LIPO_32BIT_FLAGS) \ + -output $(DESTDIR)$(BINDIR)/python$(VERSION)-32$(EXE) \ + $(DESTDIR)$(BINDIR)/python$(VERSION)$(EXE); \ + fi + if test "x$(LIPO_INTEL64_FLAGS)" != "x" ; then \ + rm -f $(DESTDIR)$(BINDIR)python$(VERSION)-intel64$(EXE); \ + lipo $(LIPO_INTEL64_FLAGS) \ + -output $(DESTDIR)$(BINDIR)/python$(VERSION)-intel64$(EXE) \ + $(DESTDIR)$(BINDIR)/python$(VERSION)$(EXE); \ + fi + +bininstall: altbininstall + if test ! -d $(DESTDIR)$(LIBPC); then \ + echo "Creating directory $(LIBPC)"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$(LIBPC); \ + fi + -if test -f $(DESTDIR)$(BINDIR)/python3$(EXE) -o -h $(DESTDIR)$(BINDIR)/python3$(EXE); \ + then rm -f $(DESTDIR)$(BINDIR)/python3$(EXE); \ + else true; \ + fi + (cd $(DESTDIR)$(BINDIR); $(LN) -s python$(VERSION)$(EXE) python3$(EXE)) + -if test "$(VERSION)" != "$(LDVERSION)"; then \ + rm -f $(DESTDIR)$(BINDIR)/python$(VERSION)-config; \ + (cd $(DESTDIR)$(BINDIR); $(LN) -s python$(LDVERSION)-config python$(VERSION)-config); \ + rm -f $(DESTDIR)$(LIBPC)/python-$(LDVERSION).pc; \ + (cd $(DESTDIR)$(LIBPC); $(LN) -s python-$(VERSION).pc python-$(LDVERSION).pc); \ + rm -f $(DESTDIR)$(LIBPC)/python-$(LDVERSION)-embed.pc; \ + (cd $(DESTDIR)$(LIBPC); $(LN) -s python-$(VERSION)-embed.pc python-$(LDVERSION)-embed.pc); \ + fi + -rm -f $(DESTDIR)$(BINDIR)/python3-config + (cd $(DESTDIR)$(BINDIR); $(LN) -s python$(VERSION)-config python3-config) + -rm -f $(DESTDIR)$(LIBPC)/python3.pc + (cd $(DESTDIR)$(LIBPC); $(LN) -s python-$(VERSION).pc python3.pc) + -rm -f $(DESTDIR)$(LIBPC)/python3-embed.pc + (cd $(DESTDIR)$(LIBPC); $(LN) -s python-$(VERSION)-embed.pc python3-embed.pc) + -rm -f $(DESTDIR)$(BINDIR)/idle3 + (cd $(DESTDIR)$(BINDIR); $(LN) -s idle$(VERSION) idle3) + -rm -f $(DESTDIR)$(BINDIR)/pydoc3 + (cd $(DESTDIR)$(BINDIR); $(LN) -s pydoc$(VERSION) pydoc3) + -rm -f $(DESTDIR)$(BINDIR)/2to3 + (cd $(DESTDIR)$(BINDIR); $(LN) -s 2to3-$(VERSION) 2to3) + if test "x$(LIPO_32BIT_FLAGS)" != "x" ; then \ + rm -f $(DESTDIR)$(BINDIR)/python3-32$(EXE); \ + (cd $(DESTDIR)$(BINDIR); $(LN) -s python$(VERSION)-32$(EXE) python3-32$(EXE)) \ + fi + if test "x$(LIPO_INTEL64_FLAGS)" != "x" ; then \ + rm -f $(DESTDIR)$(BINDIR)/python3-intel64$(EXE); \ + (cd $(DESTDIR)$(BINDIR); $(LN) -s python$(VERSION)-intel64$(EXE) python3-intel64$(EXE)) \ + fi + +# Install the versioned manual page +altmaninstall: + @for i in $(MANDIR) $(MANDIR)/man1; \ + do \ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done + $(INSTALL_DATA) $(srcdir)/Misc/python.man \ + $(DESTDIR)$(MANDIR)/man1/python$(VERSION).1 + +# Install the unversioned manual page +maninstall: altmaninstall + -rm -f $(DESTDIR)$(MANDIR)/man1/python3.1 + (cd $(DESTDIR)$(MANDIR)/man1; $(LN) -s python$(VERSION).1 python3.1) + +# Install the library +XMLLIBSUBDIRS= xml xml/dom xml/etree xml/parsers xml/sax +LIBSUBDIRS= asyncio \ + collections \ + concurrent concurrent/futures \ + csv \ + ctypes ctypes/macholib \ + curses \ + dbm \ + distutils distutils/command \ + email email/mime \ + encodings \ + ensurepip ensurepip/_bundled \ + html \ + http \ + idlelib idlelib/Icons \ + importlib importlib/metadata \ + json \ + lib2to3 lib2to3/fixes lib2to3/pgen2 \ + logging \ + multiprocessing multiprocessing/dummy \ + pydoc_data \ + site-packages \ + sqlite3 \ + tkinter \ + turtledemo \ + unittest \ + urllib \ + venv venv/scripts venv/scripts/common venv/scripts/posix \ + wsgiref \ + $(XMLLIBSUBDIRS) \ + xmlrpc \ + zoneinfo +TESTSUBDIRS= ctypes/test \ + distutils/tests \ + idlelib/idle_test \ + lib2to3/tests \ + lib2to3/tests/data \ + lib2to3/tests/data/fixers \ + lib2to3/tests/data/fixers/myfixes \ + sqlite3/test \ + test test/audiodata \ + test/capath test/cjkencodings \ + test/data test/decimaltestdata \ + test/dtracedata test/eintrdata \ + test/encoded_modules test/imghdrdata \ + test/libregrtest test/sndhdrdata \ + test/subprocessdata test/support \ + test/test_asyncio \ + test/test_email test/test_email/data \ + test/test_import \ + test/test_import/data \ + test/test_import/data/circular_imports \ + test/test_import/data/circular_imports/subpkg \ + test/test_import/data/package \ + test/test_import/data/package2 \ + test/test_import/data/unwritable \ + test/test_importlib \ + test/test_importlib/builtin \ + test/test_importlib/data \ + test/test_importlib/data01 \ + test/test_importlib/data01/subdirectory \ + test/test_importlib/data02 \ + test/test_importlib/data02/one \ + test/test_importlib/data02/two \ + test/test_importlib/data03 \ + test/test_importlib/data03/namespace \ + test/test_importlib/data03/namespace/portion1 \ + test/test_importlib/data03/namespace/portion2 \ + test/test_importlib/extension \ + test/test_importlib/frozen \ + test/test_importlib/import_ \ + test/test_importlib/namespace_pkgs \ + test/test_importlib/namespace_pkgs/both_portions \ + test/test_importlib/namespace_pkgs/both_portions/foo \ + test/test_importlib/namespace_pkgs/module_and_namespace_package \ + test/test_importlib/namespace_pkgs/module_and_namespace_package/a_test \ + test/test_importlib/namespace_pkgs/not_a_namespace_pkg \ + test/test_importlib/namespace_pkgs/not_a_namespace_pkg/foo \ + test/test_importlib/namespace_pkgs/portion1 \ + test/test_importlib/namespace_pkgs/portion1/foo \ + test/test_importlib/namespace_pkgs/portion2 \ + test/test_importlib/namespace_pkgs/portion2/foo \ + test/test_importlib/namespace_pkgs/project1 \ + test/test_importlib/namespace_pkgs/project1/parent \ + test/test_importlib/namespace_pkgs/project1/parent/child \ + test/test_importlib/namespace_pkgs/project2 \ + test/test_importlib/namespace_pkgs/project2/parent \ + test/test_importlib/namespace_pkgs/project2/parent/child \ + test/test_importlib/namespace_pkgs/project3 \ + test/test_importlib/namespace_pkgs/project3/parent \ + test/test_importlib/namespace_pkgs/project3/parent/child \ + test/test_importlib/namespacedata01 \ + test/test_importlib/partial \ + test/test_importlib/source \ + test/test_importlib/zipdata01 \ + test/test_importlib/zipdata02 \ + test/test_json \ + test/test_peg_generator \ + test/test_tools \ + test/test_warnings test/test_warnings/data \ + test/test_zoneinfo test/test_zoneinfo/data \ + test/tracedmodules \ + test/typinganndata \ + test/xmltestdata test/xmltestdata/c14n-20 \ + test/ziptestdata \ + tkinter/test tkinter/test/test_tkinter \ + tkinter/test/test_ttk \ + unittest/test unittest/test/testmock + +TEST_MODULES=yes +libinstall: build_all $(srcdir)/Modules/xxmodule.c + @for i in $(SCRIPTDIR) $(LIBDEST); \ + do \ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done + @if test "$(TEST_MODULES)" = yes; then \ + subdirs="$(LIBSUBDIRS) $(TESTSUBDIRS)"; \ + else \ + subdirs="$(LIBSUBDIRS)"; \ + fi; \ + for d in $$subdirs; \ + do \ + a=$(srcdir)/Lib/$$d; \ + if test ! -d $$a; then continue; else true; fi; \ + b=$(LIBDEST)/$$d; \ + if test ! -d $(DESTDIR)$$b; then \ + echo "Creating directory $$b"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$b; \ + else true; \ + fi; \ + done + @for i in $(srcdir)/Lib/*.py; \ + do \ + if test -x $$i; then \ + $(INSTALL_SCRIPT) $$i $(DESTDIR)$(LIBDEST); \ + echo $(INSTALL_SCRIPT) $$i $(LIBDEST); \ + else \ + $(INSTALL_DATA) $$i $(DESTDIR)$(LIBDEST); \ + echo $(INSTALL_DATA) $$i $(LIBDEST); \ + fi; \ + done + @if test "$(TEST_MODULES)" = yes; then \ + subdirs="$(LIBSUBDIRS) $(TESTSUBDIRS)"; \ + else \ + subdirs="$(LIBSUBDIRS)"; \ + fi; \ + for d in $$subdirs; \ + do \ + a=$(srcdir)/Lib/$$d; \ + if test ! -d $$a; then continue; else true; fi; \ + if test `ls $$a | wc -l` -lt 1; then continue; fi; \ + b=$(LIBDEST)/$$d; \ + for i in $$a/*; \ + do \ + case $$i in \ + *CVS) ;; \ + *.py[co]) ;; \ + *.orig) ;; \ + *~) ;; \ + *) \ + if test -d $$i; then continue; fi; \ + if test -x $$i; then \ + echo $(INSTALL_SCRIPT) $$i $$b; \ + $(INSTALL_SCRIPT) $$i $(DESTDIR)$$b; \ + else \ + echo $(INSTALL_DATA) $$i $$b; \ + $(INSTALL_DATA) $$i $(DESTDIR)$$b; \ + fi;; \ + esac; \ + done; \ + done + $(INSTALL_DATA) `cat pybuilddir.txt`/_sysconfigdata_$(ABIFLAGS)_$(MULTIARCH).py \ + $(DESTDIR)$(LIBDEST) + $(LN) -s _sysconfigdata_$(ABIFLAGS)_$(MULTIARCH).py \ + $(DESTDIR)$(LIBDEST)/_sysconfigdata_$(ABIFLAGS)_$(MACHDEP)_$(MULTIARCH).py + $(INSTALL_DATA) $(srcdir)/LICENSE $(DESTDIR)$(LIBDEST)/LICENSE.txt + if test -d $(DESTDIR)$(LIBDEST)/distutils/tests; then \ + $(INSTALL_DATA) $(srcdir)/Modules/xxmodule.c \ + $(DESTDIR)$(LIBDEST)/distutils/tests ; \ + fi + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -Wi $(DESTDIR)$(LIBDEST)/compileall.py \ + -j0 -d $(LIBDEST) -f \ + -x 'bad_coding|badsyntax|site-packages|lib2to3/tests/data' \ + $(DESTDIR)$(LIBDEST) + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -Wi -O $(DESTDIR)$(LIBDEST)/compileall.py \ + -j0 -d $(LIBDEST) -f \ + -x 'bad_coding|badsyntax|site-packages|lib2to3/tests/data' \ + $(DESTDIR)$(LIBDEST) + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -Wi -OO $(DESTDIR)$(LIBDEST)/compileall.py \ + -j0 -d $(LIBDEST) -f \ + -x 'bad_coding|badsyntax|site-packages|lib2to3/tests/data' \ + $(DESTDIR)$(LIBDEST) + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -Wi $(DESTDIR)$(LIBDEST)/compileall.py \ + -j0 -d $(LIBDEST)/site-packages -f \ + -x badsyntax $(DESTDIR)$(LIBDEST)/site-packages + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -Wi -O $(DESTDIR)$(LIBDEST)/compileall.py \ + -j0 -d $(LIBDEST)/site-packages -f \ + -x badsyntax $(DESTDIR)$(LIBDEST)/site-packages + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -Wi -OO $(DESTDIR)$(LIBDEST)/compileall.py \ + -j0 -d $(LIBDEST)/site-packages -f \ + -x badsyntax $(DESTDIR)$(LIBDEST)/site-packages + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/Grammar.txt + -PYTHONPATH=$(DESTDIR)$(LIBDEST) $(RUNSHARED) \ + $(PYTHON_FOR_BUILD) -m lib2to3.pgen2.driver $(DESTDIR)$(LIBDEST)/lib2to3/PatternGrammar.txt + +# bpo-21536: Misc/python-config.sh is generated in the build directory +# from $(srcdir)Misc/python-config.sh.in. +python-config: $(srcdir)/Misc/python-config.in Misc/python-config.sh + @ # Substitution happens here, as the completely-expanded BINDIR + @ # is not available in configure + sed -e "s,@EXENAME@,$(BINDIR)/python$(LDVERSION)$(EXE)," < $(srcdir)/Misc/python-config.in >python-config.py + @ # Replace makefile compat. variable references with shell script compat. ones; $(VAR) -> ${VAR} + LC_ALL=C sed -e 's,\$$(\([A-Za-z0-9_]*\)),\$$\{\1\},g' < Misc/python-config.sh >python-config + @ # On Darwin, always use the python version of the script, the shell + @ # version doesn't use the compiler customizations that are provided + @ # in python (_osx_support.py). + @if test `uname -s` = Darwin; then \ + cp python-config.py python-config; \ + fi + + +# Install the include files +INCLDIRSTOMAKE=$(INCLUDEDIR) $(CONFINCLUDEDIR) $(INCLUDEPY) $(CONFINCLUDEPY) +inclinstall: + @for i in $(INCLDIRSTOMAKE); \ + do \ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done + @if test ! -d $(DESTDIR)$(INCLUDEPY)/cpython; then \ + echo "Creating directory $(DESTDIR)$(INCLUDEPY)/cpython"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$(INCLUDEPY)/cpython; \ + else true; \ + fi + @if test ! -d $(DESTDIR)$(INCLUDEPY)/internal; then \ + echo "Creating directory $(DESTDIR)$(INCLUDEPY)/internal"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$(INCLUDEPY)/internal; \ + else true; \ + fi + @for i in $(srcdir)/Include/*.h; \ + do \ + echo $(INSTALL_DATA) $$i $(INCLUDEPY); \ + $(INSTALL_DATA) $$i $(DESTDIR)$(INCLUDEPY); \ + done + @for i in $(srcdir)/Include/cpython/*.h; \ + do \ + echo $(INSTALL_DATA) $$i $(INCLUDEPY)/cpython; \ + $(INSTALL_DATA) $$i $(DESTDIR)$(INCLUDEPY)/cpython; \ + done + @for i in $(srcdir)/Include/internal/*.h; \ + do \ + echo $(INSTALL_DATA) $$i $(INCLUDEPY)/internal; \ + $(INSTALL_DATA) $$i $(DESTDIR)$(INCLUDEPY)/internal; \ + done + $(INSTALL_DATA) pyconfig.h $(DESTDIR)$(CONFINCLUDEPY)/pyconfig.h + +# Install the library and miscellaneous stuff needed for extending/embedding +# This goes into $(exec_prefix) +LIBPL= $(prefix)/lib/python3.10/config-$(VERSION)$(ABIFLAGS)-x86_64-linux-gnu + +# pkgconfig directory +LIBPC= $(LIBDIR)/pkgconfig + +libainstall: all python-config + @for i in $(LIBDIR) $(LIBPL) $(LIBPC) $(BINDIR); \ + do \ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done + @if test "$(STATIC_LIBPYTHON)" = 1; then \ + if test -d $(LIBRARY); then :; else \ + if test "$(PYTHONFRAMEWORKDIR)" = no-framework; then \ + if test "$(SHLIB_SUFFIX)" = .dll; then \ + $(INSTALL_DATA) $(LDLIBRARY) $(DESTDIR)$(LIBPL) ; \ + else \ + $(INSTALL_DATA) $(LIBRARY) $(DESTDIR)$(LIBPL)/$(LIBRARY) ; \ + fi; \ + else \ + echo Skip install of $(LIBRARY) - use make frameworkinstall; \ + fi; \ + fi; \ + $(INSTALL_DATA) Programs/python.o $(DESTDIR)$(LIBPL)/python.o; \ + fi + $(INSTALL_DATA) Modules/config.c $(DESTDIR)$(LIBPL)/config.c + $(INSTALL_DATA) $(srcdir)/Modules/config.c.in $(DESTDIR)$(LIBPL)/config.c.in + $(INSTALL_DATA) Makefile $(DESTDIR)$(LIBPL)/Makefile + $(INSTALL_DATA) $(srcdir)/Modules/Setup $(DESTDIR)$(LIBPL)/Setup + $(INSTALL_DATA) Modules/Setup.local $(DESTDIR)$(LIBPL)/Setup.local + $(INSTALL_DATA) Misc/python.pc $(DESTDIR)$(LIBPC)/python-$(VERSION).pc + $(INSTALL_DATA) Misc/python-embed.pc $(DESTDIR)$(LIBPC)/python-$(VERSION)-embed.pc + $(INSTALL_SCRIPT) $(srcdir)/Modules/makesetup $(DESTDIR)$(LIBPL)/makesetup + $(INSTALL_SCRIPT) $(srcdir)/install-sh $(DESTDIR)$(LIBPL)/install-sh + $(INSTALL_SCRIPT) python-config.py $(DESTDIR)$(LIBPL)/python-config.py + $(INSTALL_SCRIPT) python-config $(DESTDIR)$(BINDIR)/python$(LDVERSION)-config + @if [ -s Modules/python.exp -a \ + "`echo $(MACHDEP) | sed 's/^\(...\).*/\1/'`" = "aix" ]; then \ + echo; echo "Installing support files for building shared extension modules on AIX:"; \ + $(INSTALL_DATA) Modules/python.exp \ + $(DESTDIR)$(LIBPL)/python.exp; \ + echo; echo "$(LIBPL)/python.exp"; \ + $(INSTALL_SCRIPT) $(srcdir)/Modules/makexp_aix \ + $(DESTDIR)$(LIBPL)/makexp_aix; \ + echo "$(LIBPL)/makexp_aix"; \ + $(INSTALL_SCRIPT) Modules/ld_so_aix \ + $(DESTDIR)$(LIBPL)/ld_so_aix; \ + echo "$(LIBPL)/ld_so_aix"; \ + echo; echo "See Misc/README.AIX for details."; \ + else true; \ + fi + +# Install the dynamically loadable modules +# This goes into $(exec_prefix) +sharedinstall: sharedmods + $(RUNSHARED) $(PYTHON_FOR_BUILD) $(srcdir)/setup.py install \ + --prefix=$(prefix) \ + --install-scripts=$(BINDIR) \ + --install-platlib=$(DESTSHARED) \ + --root=$(DESTDIR)/ + -rm $(DESTDIR)$(DESTSHARED)/_sysconfigdata_$(ABIFLAGS)_$(MULTIARCH).py + -rm $(DESTDIR)$(DESTSHARED)/_sysconfigdata_$(ABIFLAGS)_$(MACHDEP)_$(MULTIARCH).py + -rm -r $(DESTDIR)$(DESTSHARED)/__pycache__ + +# Here are a couple of targets for MacOSX again, to install a full +# framework-based Python. frameworkinstall installs everything, the +# subtargets install specific parts. Much of the actual work is offloaded to +# the Makefile in Mac +# +# +# This target is here for backward compatibility, previous versions of Python +# hadn't integrated framework installation in the normal install process. +frameworkinstall: install + +# On install, we re-make the framework +# structure in the install location, /Library/Frameworks/ or the argument to +# --enable-framework. If --enable-framework has been specified then we have +# automatically set prefix to the location deep down in the framework, so we +# only have to cater for the structural bits of the framework. + +frameworkinstallframework: frameworkinstallstructure install frameworkinstallmaclib + +frameworkinstallstructure: $(LDLIBRARY) + @if test "$(PYTHONFRAMEWORKDIR)" = no-framework; then \ + echo Not configured with --enable-framework; \ + exit 1; \ + else true; \ + fi + @for i in $(prefix)/Resources/English.lproj $(prefix)/lib; do\ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $(DESTDIR)$$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done + $(LN) -fsn include/python$(LDVERSION) $(DESTDIR)$(prefix)/Headers + sed 's/%VERSION%/'"`$(RUNSHARED) ./$(BUILDPYTHON) -c 'import platform; print(platform.python_version())'`"'/g' < $(RESSRCDIR)/Info.plist > $(DESTDIR)$(prefix)/Resources/Info.plist + $(LN) -fsn $(VERSION) $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Versions/Current + $(LN) -fsn Versions/Current/$(PYTHONFRAMEWORK) $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/$(PYTHONFRAMEWORK) + $(LN) -fsn Versions/Current/Headers $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Headers + $(LN) -fsn Versions/Current/Resources $(DESTDIR)$(PYTHONFRAMEWORKINSTALLDIR)/Resources + $(INSTALL_SHARED) $(LDLIBRARY) $(DESTDIR)$(PYTHONFRAMEWORKPREFIX)/$(LDLIBRARY) + +# This installs Mac/Lib into the framework +# Install a number of symlinks to keep software that expects a normal unix +# install (which includes python-config) happy. +frameworkinstallmaclib: + $(LN) -fs "../../../$(PYTHONFRAMEWORK)" "$(DESTDIR)$(LIBPL)/libpython$(LDVERSION).a" + $(LN) -fs "../../../$(PYTHONFRAMEWORK)" "$(DESTDIR)$(LIBPL)/libpython$(LDVERSION).dylib" + $(LN) -fs "../../../$(PYTHONFRAMEWORK)" "$(DESTDIR)$(LIBPL)/libpython$(VERSION).a" + $(LN) -fs "../../../$(PYTHONFRAMEWORK)" "$(DESTDIR)$(LIBPL)/libpython$(VERSION).dylib" + $(LN) -fs "../$(PYTHONFRAMEWORK)" "$(DESTDIR)$(prefix)/lib/libpython$(LDVERSION).dylib" + $(LN) -fs "../$(PYTHONFRAMEWORK)" "$(DESTDIR)$(prefix)/lib/libpython$(VERSION).dylib" + +# This installs the IDE, the Launcher and other apps into /Applications +frameworkinstallapps: + cd Mac && $(MAKE) installapps DESTDIR="$(DESTDIR)" + +# Build the bootstrap executable that will spawn the interpreter inside +# an app bundle within the framework. This allows the interpreter to +# run OS X GUI APIs. +frameworkpythonw: + cd Mac && $(MAKE) pythonw + +# This installs the python* and other bin symlinks in $prefix/bin or in +# a bin directory relative to the framework root +frameworkinstallunixtools: + cd Mac && $(MAKE) installunixtools DESTDIR="$(DESTDIR)" + +frameworkaltinstallunixtools: + cd Mac && $(MAKE) altinstallunixtools DESTDIR="$(DESTDIR)" + +# This installs the Tools into the applications directory. +# It is not part of a normal frameworkinstall +frameworkinstallextras: + cd Mac && $(MAKE) installextras DESTDIR="$(DESTDIR)" + +# Build the toplevel Makefile +Makefile.pre: $(srcdir)/Makefile.pre.in config.status + CONFIG_FILES=Makefile.pre CONFIG_HEADERS= $(SHELL) config.status + $(MAKE) -f Makefile.pre Makefile + +# Run the configure script. +config.status: $(srcdir)/configure + $(SHELL) $(srcdir)/configure $(CONFIG_ARGS) + +.PRECIOUS: config.status $(BUILDPYTHON) Makefile Makefile.pre + +# Some make's put the object file in the current directory +.c.o: + $(CC) -c $(PY_CORE_CFLAGS) -o $@ $< + +# bpo-30104: dtoa.c uses union to cast double to unsigned long[2]. clang 4.0 +# with -O2 or higher and strict aliasing miscompiles the ratio() function +# causing rounding issues. Compile dtoa.c using -fno-strict-aliasing on clang. +# https://bugs.llvm.org//show_bug.cgi?id=31928 +Python/dtoa.o: Python/dtoa.c + $(CC) -c $(PY_CORE_CFLAGS) $(CFLAGS_ALIASING) -o $@ $< + +# Run reindent on the library +reindent: + ./$(BUILDPYTHON) $(srcdir)/Tools/scripts/reindent.py -r $(srcdir)/Lib + +# Rerun configure with the same options as it was run last time, +# provided the config.status script exists +recheck: + $(SHELL) config.status --recheck + $(SHELL) config.status + +# Regenerate configure and pyconfig.h.in +.PHONY: autoconf +autoconf: + # Regenerate the configure script from configure.ac using autoconf + (cd $(srcdir); autoconf -Wall) + # Regenerate pyconfig.h.in from configure.ac using autoheader + (cd $(srcdir); autoheader -Wall) + +# Create a tags file for vi +tags:: + ctags -w $(srcdir)/Include/*.h $(srcdir)/Include/cpython/*.h $(srcdir)/Include/internal/*.h + for i in $(SRCDIRS); do ctags -f tags -w -a $(srcdir)/$$i/*.[ch]; done + ctags -f tags -w -a $(srcdir)/Modules/_ctypes/*.[ch] + find $(srcdir)/Lib -type f -name "*.py" -not -name "test_*.py" -not -path "*/test/*" -not -path "*/tests/*" -not -path "*/*_test/*" | ctags -f tags -w -a -L - + LC_ALL=C sort -o tags tags + +# Create a tags file for GNU Emacs +TAGS:: + cd $(srcdir); \ + etags Include/*.h Include/cpython/*.h Include/internal/*.h; \ + for i in $(SRCDIRS); do etags -a $$i/*.[ch]; done + etags -a $(srcdir)/Modules/_ctypes/*.[ch] + find $(srcdir)/Lib -type f -name "*.py" -not -name "test_*.py" -not -path "*/test/*" -not -path "*/tests/*" -not -path "*/*_test/*" | etags - -a + +# Sanitation targets -- clean leaves libraries, executables and tags +# files, which clobber removes as well +pycremoval: + -find $(srcdir) -depth -name '__pycache__' -exec rm -rf {} ';' + -find $(srcdir) -name '*.py[co]' -exec rm -f {} ';' + +rmtestturds: + -rm -f *BAD *GOOD *SKIPPED + -rm -rf OUT + -rm -f *.TXT + -rm -f *.txt + -rm -f gb-18030-2000.xml + +docclean: + $(MAKE) -C $(srcdir)/Doc clean + +# like the 'clean' target but retain the profile guided optimization (PGO) +# data. The PGO data is only valid if source code remains unchanged. +clean-retain-profile: pycremoval + find . -name '*.[oa]' -exec rm -f {} ';' + find . -name '*.s[ol]' -exec rm -f {} ';' + find . -name '*.so.[0-9]*.[0-9]*' -exec rm -f {} ';' + find . -name '*.lst' -exec rm -f {} ';' + find build -name 'fficonfig.h' -exec rm -f {} ';' || true + find build -name '*.py' -exec rm -f {} ';' || true + find build -name '*.py[co]' -exec rm -f {} ';' || true + -rm -f pybuilddir.txt + -rm -f Lib/lib2to3/*Grammar*.pickle + -rm -f Programs/_testembed Programs/_freeze_importlib + -find build -type f -a ! -name '*.gc??' -exec rm -f {} ';' + -rm -f Include/pydtrace_probes.h + -rm -f profile-gen-stamp + +profile-removal: + find . -name '*.gc??' -exec rm -f {} ';' + find . -name '*.profclang?' -exec rm -f {} ';' + find . -name '*.dyn' -exec rm -f {} ';' + rm -f $(COVERAGE_INFO) + rm -rf $(COVERAGE_REPORT) + rm -f profile-run-stamp + +clean: clean-retain-profile + @if test build_all = profile-opt; then \ + rm -f profile-gen-stamp profile-clean-stamp; \ + $(MAKE) profile-removal; \ + fi + +clobber: clean + -rm -f $(BUILDPYTHON) $(LIBRARY) $(LDLIBRARY) $(DLLLIBRARY) \ + tags TAGS \ + config.cache config.log pyconfig.h Modules/config.c + -rm -rf build platform + -rm -rf $(PYTHONFRAMEWORKDIR) + -rm -f python-config.py python-config + +# Make things extra clean, before making a distribution: +# remove all generated files, even Makefile[.pre] +# Keep configure and Python-ast.[ch], it's possible they can't be generated +distclean: clobber docclean + for file in $(srcdir)/Lib/test/data/* ; do \ + if test "$$file" != "$(srcdir)/Lib/test/data/README"; then rm "$$file"; fi; \ + done + -rm -f core Makefile Makefile.pre config.status Modules/Setup.local \ + Modules/ld_so_aix Modules/python.exp Misc/python.pc \ + Misc/python-embed.pc Misc/python-config.sh + -rm -f python*-gdb.py + # Issue #28258: set LC_ALL to avoid issues with Estonian locale. + # Expansion is performed here by shell (spawned by make) itself before + # arguments are passed to find. So LC_ALL=C must be set as a separate + # command. + LC_ALL=C; find $(srcdir)/[a-zA-Z]* '(' -name '*.fdc' -o -name '*~' \ + -o -name '[@,#]*' -o -name '*.old' \ + -o -name '*.orig' -o -name '*.rej' \ + -o -name '*.bak' ')' \ + -exec rm -f {} ';' + +# Check that all symbols exported by libpython start with "Py" or "_Py" +smelly: all + $(RUNSHARED) ./$(BUILDPYTHON) $(srcdir)/Tools/scripts/smelly.py + +# Find files with funny names +funny: + find $(SUBDIRS) $(SUBDIRSTOO) \ + -type d \ + -o -name '*.[chs]' \ + -o -name '*.py' \ + -o -name '*.pyw' \ + -o -name '*.dat' \ + -o -name '*.el' \ + -o -name '*.fd' \ + -o -name '*.in' \ + -o -name '*.gif' \ + -o -name '*.txt' \ + -o -name '*.xml' \ + -o -name '*.xbm' \ + -o -name '*.xpm' \ + -o -name '*.uue' \ + -o -name '*.decTest' \ + -o -name '*.tmCommand' \ + -o -name '*.tmSnippet' \ + -o -name 'Setup' \ + -o -name 'Setup.*' \ + -o -name README \ + -o -name NEWS \ + -o -name HISTORY \ + -o -name Makefile \ + -o -name ChangeLog \ + -o -name .hgignore \ + -o -name MANIFEST \ + -o -print + +# Perform some verification checks on any modified files. +patchcheck: all + $(RUNSHARED) ./$(BUILDPYTHON) $(srcdir)/Tools/scripts/patchcheck.py + +check-limited-abi: all + $(RUNSHARED) ./$(BUILDPYTHON) $(srcdir)/Tools/scripts/stable_abi.py --all $(srcdir)/Misc/stable_abi.txt + +.PHONY: update-config +update-config: + curl -sL -o config.guess 'https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD' + curl -sL -o config.sub 'https://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD' + chmod +x config.guess config.sub + +# Dependencies + +Python/thread.o: $(srcdir)/Python/thread_nt.h $(srcdir)/Python/thread_pthread.h $(srcdir)/Python/condvar.h + +# Declare targets that aren't real files +.PHONY: all build_all sharedmods check-clean-src oldsharedmods test quicktest +.PHONY: install altinstall oldsharedinstall bininstall altbininstall +.PHONY: maninstall libinstall inclinstall libainstall sharedinstall +.PHONY: frameworkinstall frameworkinstallframework frameworkinstallstructure +.PHONY: frameworkinstallmaclib frameworkinstallapps frameworkinstallunixtools +.PHONY: frameworkaltinstallunixtools recheck clean clobber distclean +.PHONY: smelly funny patchcheck touch altmaninstall commoninstall +.PHONY: clean-retain-profile profile-removal run_profile_task +.PHONY: build_all_generate_profile build_all_merge_profile +.PHONY: gdbhooks + +# IF YOU PUT ANYTHING HERE IT WILL GO AWAY +# Local Variables: +# mode: makefile +# End: + +# Rules appended by makesetup + +Modules/arraymodule.o: $(srcdir)/Modules/arraymodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/arraymodule.c -o Modules/arraymodule.o +Modules/array$(EXT_SUFFIX): Modules/arraymodule.o; $(BLDSHARED) Modules/arraymodule.o -o Modules/array$(EXT_SUFFIX) +Modules/cmathmodule.o: $(srcdir)/Modules/cmathmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/cmathmodule.c -o Modules/cmathmodule.o +Modules/_math.o: $(srcdir)/Modules/_math.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/_math.c -o Modules/_math.o +Modules/cmath$(EXT_SUFFIX): Modules/cmathmodule.o Modules/_math.o; $(BLDSHARED) Modules/cmathmodule.o Modules/_math.o -o Modules/cmath$(EXT_SUFFIX) +Modules/mathmodule.o: $(srcdir)/Modules/mathmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/mathmodule.c -o Modules/mathmodule.o +Modules/_math.o: $(srcdir)/Modules/_math.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/_math.c -o Modules/_math.o +Modules/math$(EXT_SUFFIX): Modules/mathmodule.o Modules/_math.o; $(BLDSHARED) Modules/mathmodule.o Modules/_math.o -o Modules/math$(EXT_SUFFIX) +Modules/_struct.o: $(srcdir)/Modules/_struct.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/_struct.c -o Modules/_struct.o +Modules/_struct$(EXT_SUFFIX): Modules/_struct.o; $(BLDSHARED) Modules/_struct.o -o Modules/_struct$(EXT_SUFFIX) +Modules/_randommodule.o: $(srcdir)/Modules/_randommodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/_randommodule.c -o Modules/_randommodule.o +Modules/_random$(EXT_SUFFIX): Modules/_randommodule.o; $(BLDSHARED) Modules/_randommodule.o -o Modules/_random$(EXT_SUFFIX) +Modules/_elementtree.o: $(srcdir)/Modules/_elementtree.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_elementtree.c -o Modules/_elementtree.o +Modules/_elementtree$(EXT_SUFFIX): Modules/_elementtree.o; $(BLDSHARED) Modules/_elementtree.o -lexpat -o Modules/_elementtree$(EXT_SUFFIX) +Modules/_pickle.o: $(srcdir)/Modules/_pickle.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/_pickle.c -o Modules/_pickle.o +Modules/_pickle$(EXT_SUFFIX): Modules/_pickle.o; $(BLDSHARED) Modules/_pickle.o -o Modules/_pickle$(EXT_SUFFIX) +Modules/_datetimemodule.o: $(srcdir)/Modules/_datetimemodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_datetimemodule.c -o Modules/_datetimemodule.o +Modules/_datetime$(EXT_SUFFIX): Modules/_datetimemodule.o; $(BLDSHARED) Modules/_datetimemodule.o -o Modules/_datetime$(EXT_SUFFIX) +Modules/_bisectmodule.o: $(srcdir)/Modules/_bisectmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_bisectmodule.c -o Modules/_bisectmodule.o +Modules/_bisect$(EXT_SUFFIX): Modules/_bisectmodule.o; $(BLDSHARED) Modules/_bisectmodule.o -o Modules/_bisect$(EXT_SUFFIX) +Modules/_heapqmodule.o: $(srcdir)/Modules/_heapqmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_MODULE -c $(srcdir)/Modules/_heapqmodule.c -o Modules/_heapqmodule.o +Modules/_heapq$(EXT_SUFFIX): Modules/_heapqmodule.o; $(BLDSHARED) Modules/_heapqmodule.o -o Modules/_heapq$(EXT_SUFFIX) +Modules/_statisticsmodule.o: $(srcdir)/Modules/_statisticsmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_statisticsmodule.c -o Modules/_statisticsmodule.o +Modules/_statistics$(EXT_SUFFIX): Modules/_statisticsmodule.o; $(BLDSHARED) Modules/_statisticsmodule.o -o Modules/_statistics$(EXT_SUFFIX) +Modules/unicodedata.o: $(srcdir)/Modules/unicodedata.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/unicodedata.c -o Modules/unicodedata.o +Modules/unicodedata$(EXT_SUFFIX): Modules/unicodedata.o; $(BLDSHARED) Modules/unicodedata.o -o Modules/unicodedata$(EXT_SUFFIX) +Modules/fcntlmodule.o: $(srcdir)/Modules/fcntlmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/fcntlmodule.c -o Modules/fcntlmodule.o +Modules/fcntl$(EXT_SUFFIX): Modules/fcntlmodule.o; $(BLDSHARED) Modules/fcntlmodule.o -o Modules/fcntl$(EXT_SUFFIX) +Modules/spwdmodule.o: $(srcdir)/Modules/spwdmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/spwdmodule.c -o Modules/spwdmodule.o +Modules/spwd$(EXT_SUFFIX): Modules/spwdmodule.o; $(BLDSHARED) Modules/spwdmodule.o -o Modules/spwd$(EXT_SUFFIX) +Modules/grpmodule.o: $(srcdir)/Modules/grpmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/grpmodule.c -o Modules/grpmodule.o +Modules/grp$(EXT_SUFFIX): Modules/grpmodule.o; $(BLDSHARED) Modules/grpmodule.o -o Modules/grp$(EXT_SUFFIX) +Modules/selectmodule.o: $(srcdir)/Modules/selectmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/selectmodule.c -o Modules/selectmodule.o +Modules/select$(EXT_SUFFIX): Modules/selectmodule.o; $(BLDSHARED) Modules/selectmodule.o -o Modules/select$(EXT_SUFFIX) +Modules/_csv.o: $(srcdir)/Modules/_csv.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_csv.c -o Modules/_csv.o +Modules/_csv$(EXT_SUFFIX): Modules/_csv.o; $(BLDSHARED) Modules/_csv.o -o Modules/_csv$(EXT_SUFFIX) +Modules/socketmodule.o: $(srcdir)/Modules/socketmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/socketmodule.c -o Modules/socketmodule.o +Modules/_socket$(EXT_SUFFIX): Modules/socketmodule.o; $(BLDSHARED) Modules/socketmodule.o -o Modules/_socket$(EXT_SUFFIX) +Modules/_posixsubprocess.o: $(srcdir)/Modules/_posixsubprocess.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/_posixsubprocess.c -o Modules/_posixsubprocess.o +Modules/_posixsubprocess$(EXT_SUFFIX): Modules/_posixsubprocess.o; $(BLDSHARED) Modules/_posixsubprocess.o -o Modules/_posixsubprocess$(EXT_SUFFIX) +Modules/md5module.o: $(srcdir)/Modules/md5module.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/md5module.c -o Modules/md5module.o +Modules/_md5$(EXT_SUFFIX): Modules/md5module.o; $(BLDSHARED) Modules/md5module.o -o Modules/_md5$(EXT_SUFFIX) +Modules/sha1module.o: $(srcdir)/Modules/sha1module.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/sha1module.c -o Modules/sha1module.o +Modules/_sha1$(EXT_SUFFIX): Modules/sha1module.o; $(BLDSHARED) Modules/sha1module.o -o Modules/_sha1$(EXT_SUFFIX) +Modules/sha256module.o: $(srcdir)/Modules/sha256module.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/sha256module.c -o Modules/sha256module.o +Modules/_sha256$(EXT_SUFFIX): Modules/sha256module.o; $(BLDSHARED) Modules/sha256module.o -o Modules/_sha256$(EXT_SUFFIX) +Modules/sha512module.o: $(srcdir)/Modules/sha512module.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/sha512module.c -o Modules/sha512module.o +Modules/_sha512$(EXT_SUFFIX): Modules/sha512module.o; $(BLDSHARED) Modules/sha512module.o -o Modules/_sha512$(EXT_SUFFIX) +Modules/sha3module.o: $(srcdir)/Modules/_sha3/sha3module.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_sha3/sha3module.c -o Modules/sha3module.o +Modules/_sha3$(EXT_SUFFIX): Modules/sha3module.o; $(BLDSHARED) Modules/sha3module.o -o Modules/_sha3$(EXT_SUFFIX) +Modules/blake2module.o: $(srcdir)/Modules/_blake2/blake2module.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_blake2/blake2module.c -o Modules/blake2module.o +Modules/blake2b_impl.o: $(srcdir)/Modules/_blake2/blake2b_impl.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_blake2/blake2b_impl.c -o Modules/blake2b_impl.o +Modules/blake2s_impl.o: $(srcdir)/Modules/_blake2/blake2s_impl.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_blake2/blake2s_impl.c -o Modules/blake2s_impl.o +Modules/_blake2$(EXT_SUFFIX): Modules/blake2module.o Modules/blake2b_impl.o Modules/blake2s_impl.o; $(BLDSHARED) Modules/blake2module.o Modules/blake2b_impl.o Modules/blake2s_impl.o -o Modules/_blake2$(EXT_SUFFIX) +Modules/syslogmodule.o: $(srcdir)/Modules/syslogmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/syslogmodule.c -o Modules/syslogmodule.o +Modules/syslog$(EXT_SUFFIX): Modules/syslogmodule.o; $(BLDSHARED) Modules/syslogmodule.o -o Modules/syslog$(EXT_SUFFIX) +Modules/binascii.o: $(srcdir)/Modules/binascii.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/binascii.c -o Modules/binascii.o +Modules/binascii$(EXT_SUFFIX): Modules/binascii.o; $(BLDSHARED) Modules/binascii.o -o Modules/binascii$(EXT_SUFFIX) +Modules/zlibmodule.o: $(srcdir)/Modules/zlibmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -I$(prefix)/include -c $(srcdir)/Modules/zlibmodule.c -o Modules/zlibmodule.o +Modules/zlib$(EXT_SUFFIX): Modules/zlibmodule.o; $(BLDSHARED) Modules/zlibmodule.o -L$(exec_prefix)/lib -lz -o Modules/zlib$(EXT_SUFFIX) +Modules/posixmodule.o: $(srcdir)/Modules/posixmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -c $(srcdir)/Modules/posixmodule.c -o Modules/posixmodule.o +Modules/posix$(EXT_SUFFIX): Modules/posixmodule.o; $(BLDSHARED) Modules/posixmodule.o -o Modules/posix$(EXT_SUFFIX) +Modules/errnomodule.o: $(srcdir)/Modules/errnomodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/errnomodule.c -o Modules/errnomodule.o +Modules/errno$(EXT_SUFFIX): Modules/errnomodule.o; $(BLDSHARED) Modules/errnomodule.o -o Modules/errno$(EXT_SUFFIX) +Modules/pwdmodule.o: $(srcdir)/Modules/pwdmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/pwdmodule.c -o Modules/pwdmodule.o +Modules/pwd$(EXT_SUFFIX): Modules/pwdmodule.o; $(BLDSHARED) Modules/pwdmodule.o -o Modules/pwd$(EXT_SUFFIX) +Modules/_sre.o: $(srcdir)/Modules/_sre.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/_sre.c -o Modules/_sre.o +Modules/_sre$(EXT_SUFFIX): Modules/_sre.o; $(BLDSHARED) Modules/_sre.o -o Modules/_sre$(EXT_SUFFIX) +Modules/_codecsmodule.o: $(srcdir)/Modules/_codecsmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_codecsmodule.c -o Modules/_codecsmodule.o +Modules/_codecs$(EXT_SUFFIX): Modules/_codecsmodule.o; $(BLDSHARED) Modules/_codecsmodule.o -o Modules/_codecs$(EXT_SUFFIX) +Modules/_weakref.o: $(srcdir)/Modules/_weakref.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_weakref.c -o Modules/_weakref.o +Modules/_weakref$(EXT_SUFFIX): Modules/_weakref.o; $(BLDSHARED) Modules/_weakref.o -o Modules/_weakref$(EXT_SUFFIX) +Modules/_functoolsmodule.o: $(srcdir)/Modules/_functoolsmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -c $(srcdir)/Modules/_functoolsmodule.c -o Modules/_functoolsmodule.o +Modules/_functools$(EXT_SUFFIX): Modules/_functoolsmodule.o; $(BLDSHARED) Modules/_functoolsmodule.o -o Modules/_functools$(EXT_SUFFIX) +Modules/_operator.o: $(srcdir)/Modules/_operator.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/_operator.c -o Modules/_operator.o +Modules/_operator$(EXT_SUFFIX): Modules/_operator.o; $(BLDSHARED) Modules/_operator.o -o Modules/_operator$(EXT_SUFFIX) +Modules/_collectionsmodule.o: $(srcdir)/Modules/_collectionsmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_collectionsmodule.c -o Modules/_collectionsmodule.o +Modules/_collections$(EXT_SUFFIX): Modules/_collectionsmodule.o; $(BLDSHARED) Modules/_collectionsmodule.o -o Modules/_collections$(EXT_SUFFIX) +Modules/_abc.o: $(srcdir)/Modules/_abc.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/_abc.c -o Modules/_abc.o +Modules/_abc$(EXT_SUFFIX): Modules/_abc.o; $(BLDSHARED) Modules/_abc.o -o Modules/_abc$(EXT_SUFFIX) +Modules/itertoolsmodule.o: $(srcdir)/Modules/itertoolsmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/itertoolsmodule.c -o Modules/itertoolsmodule.o +Modules/itertools$(EXT_SUFFIX): Modules/itertoolsmodule.o; $(BLDSHARED) Modules/itertoolsmodule.o -o Modules/itertools$(EXT_SUFFIX) +Modules/atexitmodule.o: $(srcdir)/Modules/atexitmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/atexitmodule.c -o Modules/atexitmodule.o +Modules/atexit$(EXT_SUFFIX): Modules/atexitmodule.o; $(BLDSHARED) Modules/atexitmodule.o -o Modules/atexit$(EXT_SUFFIX) +Modules/signalmodule.o: $(srcdir)/Modules/signalmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -c $(srcdir)/Modules/signalmodule.c -o Modules/signalmodule.o +Modules/_signal$(EXT_SUFFIX): Modules/signalmodule.o; $(BLDSHARED) Modules/signalmodule.o -o Modules/_signal$(EXT_SUFFIX) +Modules/_stat.o: $(srcdir)/Modules/_stat.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_stat.c -o Modules/_stat.o +Modules/_stat$(EXT_SUFFIX): Modules/_stat.o; $(BLDSHARED) Modules/_stat.o -o Modules/_stat$(EXT_SUFFIX) +Modules/timemodule.o: $(srcdir)/Modules/timemodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -c $(srcdir)/Modules/timemodule.c -o Modules/timemodule.o +Modules/time$(EXT_SUFFIX): Modules/timemodule.o; $(BLDSHARED) Modules/timemodule.o -o Modules/time$(EXT_SUFFIX) +Modules/_threadmodule.o: $(srcdir)/Modules/_threadmodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -c $(srcdir)/Modules/_threadmodule.c -o Modules/_threadmodule.o +Modules/_thread$(EXT_SUFFIX): Modules/_threadmodule.o; $(BLDSHARED) Modules/_threadmodule.o -o Modules/_thread$(EXT_SUFFIX) +Modules/_localemodule.o: $(srcdir)/Modules/_localemodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -c $(srcdir)/Modules/_localemodule.c -o Modules/_localemodule.o +Modules/_locale$(EXT_SUFFIX): Modules/_localemodule.o; $(BLDSHARED) Modules/_localemodule.o -o Modules/_locale$(EXT_SUFFIX) +Modules/_iomodule.o: $(srcdir)/Modules/_io/_iomodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io -c $(srcdir)/Modules/_io/_iomodule.c -o Modules/_iomodule.o +Modules/iobase.o: $(srcdir)/Modules/_io/iobase.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io -c $(srcdir)/Modules/_io/iobase.c -o Modules/iobase.o +Modules/fileio.o: $(srcdir)/Modules/_io/fileio.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io -c $(srcdir)/Modules/_io/fileio.c -o Modules/fileio.o +Modules/bytesio.o: $(srcdir)/Modules/_io/bytesio.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io -c $(srcdir)/Modules/_io/bytesio.c -o Modules/bytesio.o +Modules/bufferedio.o: $(srcdir)/Modules/_io/bufferedio.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io -c $(srcdir)/Modules/_io/bufferedio.c -o Modules/bufferedio.o +Modules/textio.o: $(srcdir)/Modules/_io/textio.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io -c $(srcdir)/Modules/_io/textio.c -o Modules/textio.o +Modules/stringio.o: $(srcdir)/Modules/_io/stringio.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io -c $(srcdir)/Modules/_io/stringio.c -o Modules/stringio.o +Modules/_io$(EXT_SUFFIX): Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o; $(BLDSHARED) Modules/_iomodule.o Modules/iobase.o Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o Modules/textio.o Modules/stringio.o -o Modules/_io$(EXT_SUFFIX) +Modules/faulthandler.o: $(srcdir)/Modules/faulthandler.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/faulthandler.c -o Modules/faulthandler.o +Modules/faulthandler$(EXT_SUFFIX): Modules/faulthandler.o; $(BLDSHARED) Modules/faulthandler.o -o Modules/faulthandler$(EXT_SUFFIX) +Modules/_tracemalloc.o: $(srcdir)/Modules/_tracemalloc.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/_tracemalloc.c -o Modules/_tracemalloc.o +Modules/_tracemalloc$(EXT_SUFFIX): Modules/_tracemalloc.o; $(BLDSHARED) Modules/_tracemalloc.o -o Modules/_tracemalloc$(EXT_SUFFIX) +Modules/symtablemodule.o: $(srcdir)/Modules/symtablemodule.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/symtablemodule.c -o Modules/symtablemodule.o +Modules/_symtable$(EXT_SUFFIX): Modules/symtablemodule.o; $(BLDSHARED) Modules/symtablemodule.o -o Modules/_symtable$(EXT_SUFFIX) +Modules/pyexpat.o: $(srcdir)/Modules/pyexpat.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/pyexpat.c -o Modules/pyexpat.o +Modules/pyexpat$(EXT_SUFFIX): Modules/pyexpat.o; $(BLDSHARED) Modules/pyexpat.o -lexpat -o Modules/pyexpat$(EXT_SUFFIX) +Modules/xxsubtype.o: $(srcdir)/Modules/xxsubtype.c; $(CC) $(PY_BUILTIN_MODULE_CFLAGS) -c $(srcdir)/Modules/xxsubtype.c -o Modules/xxsubtype.o +Modules/xxsubtype$(EXT_SUFFIX): Modules/xxsubtype.o; $(BLDSHARED) Modules/xxsubtype.o -o Modules/xxsubtype$(EXT_SUFFIX) diff --git a/python310/config-3.10-x86_64-linux-gnu/Setup b/python310/config-3.10-x86_64-linux-gnu/Setup new file mode 100644 index 0000000000000000000000000000000000000000..cd098256a58a0dcbb3fe07a0e89f3e6c8a111393 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/Setup @@ -0,0 +1,366 @@ +# -*- makefile -*- +# The file Setup is used by the makesetup script to construct the files +# Makefile and config.c, from Makefile.pre and config.c.in, +# respectively. Note that Makefile.pre is created from Makefile.pre.in +# by the toplevel configure script. + +# (VPATH notes: Setup and Makefile.pre are in the build directory, as +# are Makefile and config.c; the *.in files are in the source directory.) + +# Each line in this file describes one or more optional modules. +# Modules configured here will not be compiled by the setup.py script, +# so the file can be used to override setup.py's behavior. +# Tag lines containing just the word "*static*", "*shared*" or "*disabled*" +# (without the quotes but with the stars) are used to tag the following module +# descriptions. Tag lines may alternate throughout this file. Modules are +# built statically when they are preceded by a "*static*" tag line or when +# there is no tag line between the start of the file and the module +# description. Modules are built as a shared library when they are preceded by +# a "*shared*" tag line. Modules are not built at all, not by the Makefile, +# nor by the setup.py script, when they are preceded by a "*disabled*" tag +# line. + +# Lines have the following structure: +# +# ... [ ...] [ ...] [ ...] +# +# is anything ending in .c (.C, .cc, .c++ are C++ files) +# is anything starting with -I, -D, -U or -C +# is anything ending in .a or beginning with -l or -L +# is anything else but should be a valid Python +# identifier (letters, digits, underscores, beginning with non-digit) +# +# (As the makesetup script changes, it may recognize some other +# arguments as well, e.g. *.so and *.sl as libraries. See the big +# case statement in the makesetup script.) +# +# Lines can also have the form +# +# = +# +# which defines a Make variable definition inserted into Makefile.in +# +# The build process works like this: +# +# 1. Build all modules that are declared as static in Modules/Setup, +# combine them into libpythonxy.a, combine that into python. +# 2. Build all modules that are listed as shared in Modules/Setup. +# 3. Invoke setup.py. That builds all modules that +# a) are not builtin, and +# b) are not listed in Modules/Setup, and +# c) can be build on the target +# +# Therefore, modules declared to be shared will not be +# included in the config.c file, nor in the list of objects to be +# added to the library archive, and their linker options won't be +# added to the linker options. Rules to create their .o files and +# their shared libraries will still be added to the Makefile, and +# their names will be collected in the Make variable SHAREDMODS. This +# is used to build modules as shared libraries. (They can be +# installed using "make sharedinstall", which is implied by the +# toplevel "make install" target.) (For compatibility, +# *noconfig* has the same effect as *shared*.) +# +# NOTE: As a standard policy, as many modules as can be supported by a +# platform should be present. The distribution comes with all modules +# enabled that are supported by most platforms and don't require you +# to ftp sources from elsewhere. + + +# Some special rules to define PYTHONPATH. +# Edit the definitions below to indicate which options you are using. +# Don't add any whitespace or comments! + +# Directories where library files get installed. +# DESTLIB is for Python modules; MACHDESTLIB for shared libraries. +DESTLIB=$(LIBDEST) +MACHDESTLIB=$(BINLIBDEST) + +# NOTE: all the paths are now relative to the prefix that is computed +# at run time! + +# Standard path -- don't edit. +# No leading colon since this is the first entry. +# Empty since this is now just the runtime prefix. +DESTPATH= + +# Site specific path components -- should begin with : if non-empty +SITEPATH= + +# Standard path components for test modules +TESTPATH= + +COREPYTHONPATH=$(DESTPATH)$(SITEPATH)$(TESTPATH) +PYTHONPATH=$(COREPYTHONPATH) + + +# The modules listed here can't be built as shared libraries for +# various reasons; therefore they are listed here instead of in the +# normal order. + +# This only contains the minimal set of modules required to run the +# setup.py script in the root of the Python source tree. + +posix -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal posixmodule.c # posix (UNIX) system calls +errno errnomodule.c # posix (UNIX) errno values +pwd pwdmodule.c # this is needed to find out the user's home dir + # if $HOME is not set +_sre -DPy_BUILD_CORE_BUILTIN _sre.c # Fredrik Lundh's new regular expressions +_codecs _codecsmodule.c # access to the builtin codecs and codec registry +_weakref _weakref.c # weak references +_functools -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal _functoolsmodule.c # Tools for working with functions and callable objects +_operator -DPy_BUILD_CORE_BUILTIN _operator.c # operator.add() and similar goodies +_collections _collectionsmodule.c # Container types +_abc -DPy_BUILD_CORE_BUILTIN _abc.c # Abstract base classes +itertools itertoolsmodule.c # Functions creating iterators for efficient looping +atexit atexitmodule.c # Register functions to be run at interpreter-shutdown +_signal -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal signalmodule.c +_stat _stat.c # stat.h interface +time -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal timemodule.c # -lm # time operations and variables +_thread -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal _threadmodule.c # low-level threading interface + +# access to ISO C locale support +_locale -DPy_BUILD_CORE_BUILTIN _localemodule.c # -lintl + +# Standard I/O baseline +_io -DPy_BUILD_CORE_BUILTIN -I$(srcdir)/Include/internal -I$(srcdir)/Modules/_io _io/_iomodule.c _io/iobase.c _io/fileio.c _io/bytesio.c _io/bufferedio.c _io/textio.c _io/stringio.c + +# faulthandler module +faulthandler faulthandler.c + +# debug tool to trace memory blocks allocated by Python +# +# bpo-35053: The module must be builtin since _Py_NewReference() +# can call _PyTraceMalloc_NewReference(). +_tracemalloc _tracemalloc.c + +# The rest of the modules listed in this file are all commented out by +# default. Usually they can be detected and built as dynamically +# loaded modules by the new setup.py script added in Python 2.1. If +# you're on a platform that doesn't support dynamic loading, want to +# compile modules statically into the Python binary, or need to +# specify some odd set of compiler switches, you can uncomment the +# appropriate lines below. + +# ====================================================================== + +# The Python symtable module depends on .h files that setup.py doesn't track +_symtable symtablemodule.c + +# Uncommenting the following line tells makesetup that all following +# modules are to be built as shared libraries (see above for more +# detail; also note that *static* or *disabled* cancels this effect): + +#*shared* + +# GNU readline. Unlike previous Python incarnations, GNU readline is +# now incorporated in an optional module, configured in the Setup file +# instead of by a configure script switch. You may have to insert a +# -L option pointing to the directory where libreadline.* lives, +# and you may have to change -ltermcap to -ltermlib or perhaps remove +# it, depending on your system -- see the GNU readline instructions. +# It's okay for this to be a shared library, too. + +#readline readline.c -lreadline -ltermcap + + +# Modules that should always be present (non UNIX dependent): + +#array -DPy_BUILD_CORE_MODULE arraymodule.c # array objects +#cmath cmathmodule.c _math.c -DPy_BUILD_CORE_MODULE # -lm # complex math library functions +#math mathmodule.c _math.c -DPy_BUILD_CORE_MODULE # -lm # math library functions, e.g. sin() +#_contextvars _contextvarsmodule.c # Context Variables +#_struct -DPy_BUILD_CORE_MODULE _struct.c # binary structure packing/unpacking +#_weakref _weakref.c # basic weak reference support +#_testcapi _testcapimodule.c # Python C API test module +#_testinternalcapi _testinternalcapi.c -I$(srcdir)/Include/internal -DPy_BUILD_CORE_MODULE # Python internal C API test module +#_random _randommodule.c -DPy_BUILD_CORE_MODULE # Random number generator +#_elementtree _elementtree.c -lexpat # elementtree accelerator +#_pickle -DPy_BUILD_CORE_MODULE _pickle.c # pickle accelerator +#_datetime _datetimemodule.c # datetime accelerator +#_zoneinfo _zoneinfo.c -DPy_BUILD_CORE_MODULE # zoneinfo accelerator +#_bisect _bisectmodule.c # Bisection algorithms +#_heapq _heapqmodule.c -DPy_BUILD_CORE_MODULE # Heap queue algorithm +#_asyncio _asynciomodule.c # Fast asyncio Future +#_json -I$(srcdir)/Include/internal -DPy_BUILD_CORE_BUILTIN _json.c # _json speedups +#_statistics _statisticsmodule.c # statistics accelerator + +#unicodedata unicodedata.c -DPy_BUILD_CORE_BUILTIN # static Unicode character database + + +# Modules with some UNIX dependencies -- on by default: +# (If you have a really backward UNIX, select and socket may not be +# supported...) + +#fcntl fcntlmodule.c # fcntl(2) and ioctl(2) +#spwd spwdmodule.c # spwd(3) +#grp grpmodule.c # grp(3) +#select selectmodule.c # select(2); not on ancient System V + +# Memory-mapped files (also works on Win32). +#mmap mmapmodule.c + +# CSV file helper +#_csv _csv.c + +# Socket module helper for socket(2) +#_socket socketmodule.c + +# Socket module helper for SSL support; you must comment out the other +# socket line above, and edit the OPENSSL variable: +# OPENSSL=/path/to/openssl/directory +#_ssl _ssl.c -lssl -lcrypto +#_hashlib _hashopenssl.c -lcrypto + +# The crypt module is now disabled by default because it breaks builds +# on many systems (where -lcrypt is needed), e.g. Linux (I believe). + +#_crypt _cryptmodule.c # -lcrypt # crypt(3); needs -lcrypt on some systems + + +# Some more UNIX dependent modules -- off by default, since these +# are not supported by all UNIX systems: + +#nis nismodule.c -lnsl # Sun yellow pages -- not everywhere +#termios termios.c # Steen Lumholt's termios module +#resource resource.c # Jeremy Hylton's rlimit interface + +#_posixsubprocess -DPy_BUILD_CORE_BUILTIN _posixsubprocess.c # POSIX subprocess module helper + +# Multimedia modules -- off by default. +# These don't work for 64-bit platforms!!! +# #993173 says audioop works on 64-bit platforms, though. +# These represent audio samples or images as strings: + +#audioop audioop.c # Operations on audio samples + + +# Note that the _md5 and _sha modules are normally only built if the +# system does not have the OpenSSL libs containing an optimized version. + +# The _md5 module implements the RSA Data Security, Inc. MD5 +# Message-Digest Algorithm, described in RFC 1321. + +#_md5 md5module.c + +#_hashlib _hashopenssl.c -lssl -lcrypto + +# The _sha module implements the SHA checksum algorithms. +# (NIST's Secure Hash Algorithms.) +#_sha1 sha1module.c +#_sha256 sha256module.c -DPy_BUILD_CORE_BUILTIN +#_sha512 sha512module.c -DPy_BUILD_CORE_BUILTIN +#_sha3 _sha3/sha3module.c + +# _blake module +#_blake2 _blake2/blake2module.c _blake2/blake2b_impl.c _blake2/blake2s_impl.c + +# The _tkinter module. +# +# The command for _tkinter is long and site specific. Please +# uncomment and/or edit those parts as indicated. If you don't have a +# specific extension (e.g. Tix or BLT), leave the corresponding line +# commented out. (Leave the trailing backslashes in! If you +# experience strange errors, you may want to join all uncommented +# lines and remove the backslashes -- the backslash interpretation is +# done by the shell's "read" command and it may not be implemented on +# every system. + +# *** Always uncomment this (leave the leading underscore in!): +# _tkinter _tkinter.c tkappinit.c -DWITH_APPINIT \ +# *** Uncomment and edit to reflect where your Tcl/Tk libraries are: +# -L/usr/local/lib \ +# *** Uncomment and edit to reflect where your Tcl/Tk headers are: +# -I/usr/local/include \ +# *** Uncomment and edit to reflect where your X11 header files are: +# -I/usr/X11R6/include \ +# *** Or uncomment this for Solaris: +# -I/usr/openwin/include \ +# *** Uncomment and edit for Tix extension only: +# -DWITH_TIX -ltix8.1.8.2 \ +# *** Uncomment and edit for BLT extension only: +# -DWITH_BLT -I/usr/local/blt/blt8.0-unoff/include -lBLT8.0 \ +# *** Uncomment and edit for PIL (TkImaging) extension only: +# (See http://www.pythonware.com/products/pil/ for more info) +# -DWITH_PIL -I../Extensions/Imaging/libImaging tkImaging.c \ +# *** Uncomment and edit for TOGL extension only: +# -DWITH_TOGL togl.c \ +# *** Uncomment and edit to reflect your Tcl/Tk versions: +# -ltk8.2 -ltcl8.2 \ +# *** Uncomment and edit to reflect where your X11 libraries are: +# -L/usr/X11R6/lib \ +# *** Or uncomment this for Solaris: +# -L/usr/openwin/lib \ +# *** Uncomment these for TOGL extension only: +# -lGL -lGLU -lXext -lXmu \ +# *** Uncomment for AIX: +# -lld \ +# *** Always uncomment this; X11 libraries to link with: +# -lX11 + +# Lance Ellinghaus's syslog module +#syslog syslogmodule.c # syslog daemon interface + + +# Curses support, requiring the System V version of curses, often +# provided by the ncurses library. e.g. on Linux, link with -lncurses +# instead of -lcurses). + +#_curses _cursesmodule.c -lcurses -ltermcap -DPy_BUILD_CORE_MODULE +# Wrapper for the panel library that's part of ncurses and SYSV curses. +#_curses_panel _curses_panel.c -lpanel -lncurses + + +# Modules that provide persistent dictionary-like semantics. You will +# probably want to arrange for at least one of them to be available on +# your machine, though none are defined by default because of library +# dependencies. The Python module dbm/__init__.py provides an +# implementation independent wrapper for these; dbm/dumb.py provides +# similar functionality (but slower of course) implemented in Python. + +#_dbm _dbmmodule.c # dbm(3) may require -lndbm or similar + +# Anthony Baxter's gdbm module. GNU dbm(3) will require -lgdbm: + +#_gdbm _gdbmmodule.c -I/usr/local/include -L/usr/local/lib -lgdbm + +#_ctypes _ctypes/_ctypes.c _ctypes/callbacks.c _ctypes/callproc.c _ctypes/stgdict.c _ctypes/cfield.c _ctypes/malloc_closure.c -lffi + +# Helper module for various ascii-encoders +#binascii binascii.c + +# Andrew Kuchling's zlib module. +# This require zlib 1.1.3 (or later). +# See http://www.gzip.org/zlib/ +#zlib zlibmodule.c -I$(prefix)/include -L$(exec_prefix)/lib -lz + +# Interface to the Expat XML parser +# More information on Expat can be found at www.libexpat.org. +# +pyexpat pyexpat.c -lexpat + +# Hye-Shik Chang's CJKCodecs + +# multibytecodec is required for all the other CJK codec modules +#_multibytecodec cjkcodecs/multibytecodec.c + +#_codecs_cn cjkcodecs/_codecs_cn.c +#_codecs_hk cjkcodecs/_codecs_hk.c +#_codecs_iso2022 cjkcodecs/_codecs_iso2022.c +#_codecs_jp cjkcodecs/_codecs_jp.c +#_codecs_kr cjkcodecs/_codecs_kr.c +#_codecs_tw cjkcodecs/_codecs_tw.c + +# Example -- included for reference only: +# xx xxmodule.c + +# Another example -- the 'xxsubtype' module shows C-level subtyping in action +xxsubtype xxsubtype.c + +# Uncommenting the following line tells makesetup that all following modules +# are not built (see above for more detail). +# +#*disabled* +# +#_sqlite3 _tkinter _curses pyexpat +#_codecs_jp _codecs_kr _codecs_tw unicodedata diff --git a/python310/config-3.10-x86_64-linux-gnu/Setup.local b/python310/config-3.10-x86_64-linux-gnu/Setup.local new file mode 100644 index 0000000000000000000000000000000000000000..980a48bbf5efc75e99b92b0b14a3689dbbaeef65 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/Setup.local @@ -0,0 +1,29 @@ +# Edit this file for local setup changes +array -DPy_BUILD_CORE_MODULE arraymodule.c # array objects +cmath cmathmodule.c _math.c -DPy_BUILD_CORE_MODULE # -lm # complex math library functions +math mathmodule.c _math.c -DPy_BUILD_CORE_MODULE # -lm # math library functions, e.g. sin() +_struct -DPy_BUILD_CORE_MODULE _struct.c # binary structure packing/unpacking +_random _randommodule.c -DPy_BUILD_CORE_MODULE # Random number generator +_elementtree _elementtree.c -lexpat # elementtree accelerator +_pickle -DPy_BUILD_CORE_MODULE _pickle.c # pickle accelerator +_datetime _datetimemodule.c # datetime accelerator +_bisect _bisectmodule.c # Bisection algorithms +_heapq _heapqmodule.c -DPy_BUILD_CORE_MODULE # Heap queue algorithm +_statistics _statisticsmodule.c # statistics accelerator +unicodedata unicodedata.c -DPy_BUILD_CORE_BUILTIN # static Unicode character database +fcntl fcntlmodule.c # fcntl(2) and ioctl(2) +spwd spwdmodule.c # spwd(3) +grp grpmodule.c # grp(3) +select selectmodule.c # select(2); not on ancient System V +_csv _csv.c +_socket socketmodule.c +_posixsubprocess -DPy_BUILD_CORE_BUILTIN _posixsubprocess.c # POSIX subprocess module helper +_md5 md5module.c +_sha1 sha1module.c +_sha256 sha256module.c -DPy_BUILD_CORE_BUILTIN +_sha512 sha512module.c -DPy_BUILD_CORE_BUILTIN +_sha3 _sha3/sha3module.c +_blake2 _blake2/blake2module.c _blake2/blake2b_impl.c _blake2/blake2s_impl.c +syslog syslogmodule.c # syslog daemon interface +binascii binascii.c +zlib zlibmodule.c -I$(prefix)/include -L$(exec_prefix)/lib -lz diff --git a/python310/config-3.10-x86_64-linux-gnu/config.c b/python310/config-3.10-x86_64-linux-gnu/config.c new file mode 100644 index 0000000000000000000000000000000000000000..1d64233d775c7c8750d11311529804da791a4443 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/config.c @@ -0,0 +1,172 @@ +/* Generated automatically from ../Modules/config.c.in by makesetup. */ +/* -*- C -*- *********************************************** +Copyright (c) 2000, BeOpen.com. +Copyright (c) 1995-2000, Corporation for National Research Initiatives. +Copyright (c) 1990-1995, Stichting Mathematisch Centrum. +All rights reserved. + +See the file "Misc/COPYRIGHT" for information on usage and +redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES. +******************************************************************/ + +/* Module configuration */ + +/* !!! !!! !!! This file is edited by the makesetup script !!! !!! !!! */ + +/* This file contains the table of built-in modules. + See create_builtin() in import.c. */ + +#include "Python.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +extern PyObject* PyInit_array(void); +extern PyObject* PyInit_cmath(void); +extern PyObject* PyInit_math(void); +extern PyObject* PyInit__struct(void); +extern PyObject* PyInit__random(void); +extern PyObject* PyInit__elementtree(void); +extern PyObject* PyInit__pickle(void); +extern PyObject* PyInit__datetime(void); +extern PyObject* PyInit__bisect(void); +extern PyObject* PyInit__heapq(void); +extern PyObject* PyInit__statistics(void); +extern PyObject* PyInit_unicodedata(void); +extern PyObject* PyInit_fcntl(void); +extern PyObject* PyInit_spwd(void); +extern PyObject* PyInit_grp(void); +extern PyObject* PyInit_select(void); +extern PyObject* PyInit__csv(void); +extern PyObject* PyInit__socket(void); +extern PyObject* PyInit__posixsubprocess(void); +extern PyObject* PyInit__md5(void); +extern PyObject* PyInit__sha1(void); +extern PyObject* PyInit__sha256(void); +extern PyObject* PyInit__sha512(void); +extern PyObject* PyInit__sha3(void); +extern PyObject* PyInit__blake2(void); +extern PyObject* PyInit_syslog(void); +extern PyObject* PyInit_binascii(void); +extern PyObject* PyInit_zlib(void); +extern PyObject* PyInit_posix(void); +extern PyObject* PyInit_errno(void); +extern PyObject* PyInit_pwd(void); +extern PyObject* PyInit__sre(void); +extern PyObject* PyInit__codecs(void); +extern PyObject* PyInit__weakref(void); +extern PyObject* PyInit__functools(void); +extern PyObject* PyInit__operator(void); +extern PyObject* PyInit__collections(void); +extern PyObject* PyInit__abc(void); +extern PyObject* PyInit_itertools(void); +extern PyObject* PyInit_atexit(void); +extern PyObject* PyInit__signal(void); +extern PyObject* PyInit__stat(void); +extern PyObject* PyInit_time(void); +extern PyObject* PyInit__thread(void); +extern PyObject* PyInit__locale(void); +extern PyObject* PyInit__io(void); +extern PyObject* PyInit_faulthandler(void); +extern PyObject* PyInit__tracemalloc(void); +extern PyObject* PyInit__symtable(void); +extern PyObject* PyInit_pyexpat(void); +extern PyObject* PyInit_xxsubtype(void); + +/* -- ADDMODULE MARKER 1 -- */ + +extern PyObject* PyMarshal_Init(void); +extern PyObject* PyInit__imp(void); +extern PyObject* PyInit_gc(void); +extern PyObject* PyInit__ast(void); +extern PyObject* _PyWarnings_Init(void); +extern PyObject* PyInit__string(void); + +struct _inittab _PyImport_Inittab[] = { + + {"array", PyInit_array}, + {"cmath", PyInit_cmath}, + {"math", PyInit_math}, + {"_struct", PyInit__struct}, + {"_random", PyInit__random}, + {"_elementtree", PyInit__elementtree}, + {"_pickle", PyInit__pickle}, + {"_datetime", PyInit__datetime}, + {"_bisect", PyInit__bisect}, + {"_heapq", PyInit__heapq}, + {"_statistics", PyInit__statistics}, + {"unicodedata", PyInit_unicodedata}, + {"fcntl", PyInit_fcntl}, + {"spwd", PyInit_spwd}, + {"grp", PyInit_grp}, + {"select", PyInit_select}, + {"_csv", PyInit__csv}, + {"_socket", PyInit__socket}, + {"_posixsubprocess", PyInit__posixsubprocess}, + {"_md5", PyInit__md5}, + {"_sha1", PyInit__sha1}, + {"_sha256", PyInit__sha256}, + {"_sha512", PyInit__sha512}, + {"_sha3", PyInit__sha3}, + {"_blake2", PyInit__blake2}, + {"syslog", PyInit_syslog}, + {"binascii", PyInit_binascii}, + {"zlib", PyInit_zlib}, + {"posix", PyInit_posix}, + {"errno", PyInit_errno}, + {"pwd", PyInit_pwd}, + {"_sre", PyInit__sre}, + {"_codecs", PyInit__codecs}, + {"_weakref", PyInit__weakref}, + {"_functools", PyInit__functools}, + {"_operator", PyInit__operator}, + {"_collections", PyInit__collections}, + {"_abc", PyInit__abc}, + {"itertools", PyInit_itertools}, + {"atexit", PyInit_atexit}, + {"_signal", PyInit__signal}, + {"_stat", PyInit__stat}, + {"time", PyInit_time}, + {"_thread", PyInit__thread}, + {"_locale", PyInit__locale}, + {"_io", PyInit__io}, + {"faulthandler", PyInit_faulthandler}, + {"_tracemalloc", PyInit__tracemalloc}, + {"_symtable", PyInit__symtable}, + {"pyexpat", PyInit_pyexpat}, + {"xxsubtype", PyInit_xxsubtype}, + +/* -- ADDMODULE MARKER 2 -- */ + + /* This module lives in marshal.c */ + {"marshal", PyMarshal_Init}, + + /* This lives in import.c */ + {"_imp", PyInit__imp}, + + /* This lives in Python/Python-ast.c */ + {"_ast", PyInit__ast}, + + /* These entries are here for sys.builtin_module_names */ + {"builtins", NULL}, + {"sys", NULL}, + + /* This lives in gcmodule.c */ + {"gc", PyInit_gc}, + + /* This lives in _warnings.c */ + {"_warnings", _PyWarnings_Init}, + + /* This lives in Objects/unicodeobject.c */ + {"_string", PyInit__string}, + + /* Sentinel */ + {0, 0} +}; + + +#ifdef __cplusplus +} +#endif diff --git a/python310/config-3.10-x86_64-linux-gnu/config.c.in b/python310/config-3.10-x86_64-linux-gnu/config.c.in new file mode 100644 index 0000000000000000000000000000000000000000..d69e8e88b0ca458b31c247d98f3747f2f11ebe67 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/config.c.in @@ -0,0 +1,67 @@ +/* -*- C -*- *********************************************** +Copyright (c) 2000, BeOpen.com. +Copyright (c) 1995-2000, Corporation for National Research Initiatives. +Copyright (c) 1990-1995, Stichting Mathematisch Centrum. +All rights reserved. + +See the file "Misc/COPYRIGHT" for information on usage and +redistribution of this file, and for a DISCLAIMER OF ALL WARRANTIES. +******************************************************************/ + +/* Module configuration */ + +/* !!! !!! !!! This file is edited by the makesetup script !!! !!! !!! */ + +/* This file contains the table of built-in modules. + See create_builtin() in import.c. */ + +#include "Python.h" + +#ifdef __cplusplus +extern "C" { +#endif + + +/* -- ADDMODULE MARKER 1 -- */ + +extern PyObject* PyMarshal_Init(void); +extern PyObject* PyInit__imp(void); +extern PyObject* PyInit_gc(void); +extern PyObject* PyInit__ast(void); +extern PyObject* _PyWarnings_Init(void); +extern PyObject* PyInit__string(void); + +struct _inittab _PyImport_Inittab[] = { + +/* -- ADDMODULE MARKER 2 -- */ + + /* This module lives in marshal.c */ + {"marshal", PyMarshal_Init}, + + /* This lives in import.c */ + {"_imp", PyInit__imp}, + + /* This lives in Python/Python-ast.c */ + {"_ast", PyInit__ast}, + + /* These entries are here for sys.builtin_module_names */ + {"builtins", NULL}, + {"sys", NULL}, + + /* This lives in gcmodule.c */ + {"gc", PyInit_gc}, + + /* This lives in _warnings.c */ + {"_warnings", _PyWarnings_Init}, + + /* This lives in Objects/unicodeobject.c */ + {"_string", PyInit__string}, + + /* Sentinel */ + {0, 0} +}; + + +#ifdef __cplusplus +} +#endif diff --git a/python310/config-3.10-x86_64-linux-gnu/install-sh b/python310/config-3.10-x86_64-linux-gnu/install-sh new file mode 100644 index 0000000000000000000000000000000000000000..8175c640fe6288a75cc846567ea5506086f326f4 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/install-sh @@ -0,0 +1,518 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2018-03-11.20; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# 'make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +tab=' ' +nl=' +' +IFS=" $tab$nl" + +# Set DOITPROG to "echo" to test this script. + +doit=${DOITPROG-} +doit_exec=${doit:-exec} + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +is_target_a_directory=possibly + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *"$tab"* | *"$nl"* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) + is_target_a_directory=always + dst_arg=$2 + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) is_target_a_directory=never;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +# We allow the use of options -d and -T together, by making -d +# take the precedence; this is for compatibility with GNU install. + +if test -n "$dir_arg"; then + if test -n "$dst_arg"; then + echo "$0: target directory not allowed when installing a directory." >&2 + exit 1 + fi +fi + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for 'test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call 'install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + if test $# -gt 1 || test "$is_target_a_directory" = always; then + if test ! -d "$dst_arg"; then + echo "$0: $dst_arg: Is not a directory." >&2 + exit 1 + fi + fi +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for 'test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename. + if test -d "$dst"; then + if test "$is_target_a_directory" = never; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dstbase=`basename "$src"` + case $dst in + */) dst=$dst$dstbase;; + *) dst=$dst/$dstbase;; + esac + dstdir_status=0 + else + dstdir=`dirname "$dst"` + test -d "$dstdir" + dstdir_status=$? + fi + fi + + case $dstdir in + */) dstdirslash=$dstdir;; + *) dstdirslash=$dstdir/;; + esac + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + # Note that $RANDOM variable is not portable (e.g. dash); Use it + # here however when possible just to lower collision chance. + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + + trap 'ret=$?; rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" 2>/dev/null; exit $ret' 0 + + # Because "mkdir -p" follows existing symlinks and we likely work + # directly in world-writeable /tmp, make sure that the '$tmpdir' + # directory is successfully created first before we actually test + # 'mkdir -p' feature. + if (umask $mkdir_umask && + $mkdirprog $mkdir_mode "$tmpdir" && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/a/b") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + test_tmpdir="$tmpdir/a" + ls_ld_tmpdir=`ls -ld "$test_tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$test_tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$test_tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/a/b" "$tmpdir/a" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- "$tmpdir" 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + oIFS=$IFS + IFS=/ + set -f + set fnord $dstdir + shift + set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=${dstdirslash}_inst.$$_ + rmtmp=${dstdirslash}_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + set +f && + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'before-save-hook 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC0" +# time-stamp-end: "; # UTC" +# End: diff --git a/python310/config-3.10-x86_64-linux-gnu/libpython3.10-pic.a b/python310/config-3.10-x86_64-linux-gnu/libpython3.10-pic.a new file mode 100644 index 0000000000000000000000000000000000000000..6a8de333840fbd3f9f5ac9d897888d71c2d6ad4d --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/libpython3.10-pic.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39f52ccfd334a1baaa9ae502d8e09cdbff62be558ba88765a8f21cc1da4fc719 +size 9336780 diff --git a/python310/config-3.10-x86_64-linux-gnu/libpython3.10.a b/python310/config-3.10-x86_64-linux-gnu/libpython3.10.a new file mode 100644 index 0000000000000000000000000000000000000000..afa22f17e3603072f10330f00230966771605c97 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/libpython3.10.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d74d035d6a97f89cbda5417f948e740fe47274c71f6500b561cb0a103def47b +size 10484388 diff --git a/python310/config-3.10-x86_64-linux-gnu/libpython3.10.so b/python310/config-3.10-x86_64-linux-gnu/libpython3.10.so new file mode 100644 index 0000000000000000000000000000000000000000..0e400b6ec6553917aa52cbbadb611c1d3a5b7aa3 --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/libpython3.10.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12d22c136f466d0b2d1db9d3327ed02ffa2306ce7ff5c1f6603aacac764de980 +size 5834688 diff --git a/python310/config-3.10-x86_64-linux-gnu/makesetup b/python310/config-3.10-x86_64-linux-gnu/makesetup new file mode 100644 index 0000000000000000000000000000000000000000..1a767838c92be9b87b20a0deaa7f3f3810ab04fa --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/makesetup @@ -0,0 +1,309 @@ +#! /bin/sh + +# Convert templates into Makefile and config.c, based on the module +# definitions found in the file Setup. +# +# Usage: makesetup [-s dir] [-c file] [-m file] [Setup] ... [-n [Setup] ...] +# +# Options: +# -s directory: alternative source directory (default .) +# -l directory: library source directory (default derived from $0) +# -c file: alternative config.c template (default $libdir/config.c.in) +# -c -: don't write config.c +# -m file: alternative Makefile template (default ./Makefile.pre) +# -m -: don't write Makefile +# +# Remaining arguments are one or more Setup files (default ./Setup). +# Setup files after a -n option are used for their variables, modules +# and libraries but not for their .o files. +# +# See Setup for a description of the format of the Setup file. +# +# The following edits are made: +# +# Copying config.c.in to config.c: +# - insert an identifying comment at the start +# - for each mentioned in Setup before *noconfig*: +# + insert 'extern PyObject* PyInit_(void);' before MARKER 1 +# + insert '{"", PyInit_},' before MARKER 2 +# +# Copying Makefile.pre to Makefile: +# - insert an identifying comment at the start +# - replace _MODBUILT_NAMES_ by the list of *static* and *shared* modules +# from Setup +# - replace _MODDISABLED_NAMES_ by the list of *disabled* modules from Setup +# - replace _MODOBJS_ by the list of objects from Setup (except for +# Setup files after a -n option) +# - replace _MODLIBS_ by the list of libraries from Setup +# - for each object file mentioned in Setup, append a rule +# '.o: .c; ' to the end of the Makefile +# - for each module mentioned in Setup, append a rule +# which creates a shared library version to the end of the Makefile +# - for each variable definition found in Setup, insert the definition +# before the comment 'Definitions added by makesetup' + +# Loop over command line options +usage=' +usage: makesetup [-s srcdir] [-l libdir] [-c config.c.in] [-m Makefile.pre] + [Setup] ... [-n [Setup] ...]' +srcdir='.' +libdir='' +config='' +makepre='' +noobjects='' +doconfig=yes +while : +do + case $1 in + -s) shift; srcdir=$1; shift;; + -l) shift; libdir=$1; shift;; + -c) shift; config=$1; shift;; + -m) shift; makepre=$1; shift;; + --) shift; break;; + -n) noobjects=yes;; + -*) echo "$usage" 1>&2; exit 2;; + *) break;; + esac +done + +# Set default libdir and config if not set by command line +# (Not all systems have dirname) +case $libdir in +'') case $0 in + */*) libdir=`echo $0 | sed 's,/[^/]*$,,'`;; + *) libdir=.;; + esac;; +esac +case $config in +'') config=$libdir/config.c.in;; +esac +case $makepre in +'') makepre=Makefile.pre;; +esac + +# Newline for sed i and a commands +NL='\ +' + +# Setup to link with extra libraries when making shared extensions. +# Currently, only Cygwin needs this baggage. +case `uname -s` in +CYGWIN*) if test $libdir = . + then + ExtraLibDir=. + else + ExtraLibDir='$(LIBPL)' + fi + ExtraLibs="-L$ExtraLibDir -lpython\$(LDVERSION)";; +esac + +# Main loop +for i in ${*-Setup} +do + case $i in + -n) echo '*noobjects*';; + *) echo '*doconfig*'; cat "$i";; + esac +done | +sed -e 's/[ ]*#.*//' -e '/^[ ]*$/d' | +( + rulesf="@rules.$$" + trap 'rm -f $rulesf' 0 1 2 3 + echo " +# Rules appended by makesetup +" >$rulesf + DEFS= + BUILT= + DISABLED= + MODS= + SHAREDMODS= + OBJS= + LIBS= + LOCALLIBS= + BASELIBS= + while read line + do + # to handle backslashes for sh's that don't automatically + # continue a read when the last char is a backslash + while echo $line | grep '\\$' > /dev/null + do + read extraline + line=`echo $line| sed s/.$//`$extraline + done + + # Output DEFS in reverse order so first definition overrides + case $line in + *=*) DEFS="$line$NL$DEFS"; continue;; + 'include '*) DEFS="$line$NL$DEFS"; continue;; + '*noobjects*') + case $noobjects in + yes) ;; + *) LOCALLIBS=$LIBS; LIBS=;; + esac + noobjects=yes; + continue;; + '*doconfig*') doconfig=yes; continue;; + '*static*') doconfig=yes; continue;; + '*noconfig*') doconfig=no; continue;; + '*shared*') doconfig=no; continue;; + '*disabled*') doconfig=disabled; continue;; + esac + srcs= + cpps= + libs= + mods= + skip= + for arg in $line + do + case $skip in + libs) libs="$libs $arg"; skip=; continue;; + cpps) cpps="$cpps $arg"; skip=; continue;; + srcs) srcs="$srcs $arg"; skip=; continue;; + esac + case $arg in + -framework) libs="$libs $arg"; skip=libs; + # OSX/OSXS/Darwin framework link cmd + ;; + -[IDUCfF]*) cpps="$cpps $arg";; + -Xcompiler) skip=cpps;; + -Xlinker) libs="$libs $arg"; skip=libs;; + -rpath) libs="$libs $arg"; skip=libs;; + --rpath) libs="$libs $arg"; skip=libs;; + -[A-Zl]*) libs="$libs $arg";; + *.a) libs="$libs $arg";; + *.so) libs="$libs $arg";; + *.sl) libs="$libs $arg";; + /*.o) libs="$libs $arg";; + *.def) libs="$libs $arg";; + *.o) srcs="$srcs `basename $arg .o`.c";; + *.[cC]) srcs="$srcs $arg";; + *.m) srcs="$srcs $arg";; # Objective-C src + *.cc) srcs="$srcs $arg";; + *.c++) srcs="$srcs $arg";; + *.cxx) srcs="$srcs $arg";; + *.cpp) srcs="$srcs $arg";; + \$*) libs="$libs $arg" + cpps="$cpps $arg";; + *.*) echo 1>&2 "bad word $arg in $line" + exit 1;; + -u) skip=libs; libs="$libs -u";; + [a-zA-Z_]*) mods="$mods $arg";; + *) echo 1>&2 "bad word $arg in $line" + exit 1;; + esac + done + case $doconfig in + yes) + LIBS="$LIBS $libs" + MODS="$MODS $mods" + BUILT="$BUILT $mods" + ;; + no) + BUILT="$BUILT $mods" + ;; + disabled) + DISABLED="$DISABLED $mods" + continue + ;; + esac + case $noobjects in + yes) continue;; + esac + objs='' + for src in $srcs + do + case $src in + *.c) obj=`basename $src .c`.o; cc='$(CC)';; + *.cc) obj=`basename $src .cc`.o; cc='$(CXX)';; + *.c++) obj=`basename $src .c++`.o; cc='$(CXX)';; + *.C) obj=`basename $src .C`.o; cc='$(CXX)';; + *.cxx) obj=`basename $src .cxx`.o; cc='$(CXX)';; + *.cpp) obj=`basename $src .cpp`.o; cc='$(CXX)';; + *.m) obj=`basename $src .m`.o; cc='$(CC)';; # Obj-C + *) continue;; + esac + obj="$srcdir/$obj" + objs="$objs $obj" + case $src in + glmodule.c) ;; + /*) ;; + \$*) ;; + *) src='$(srcdir)/'"$srcdir/$src";; + esac + case $doconfig in + no) cc="$cc \$(CCSHARED) \$(PY_CFLAGS_NODIST) \$(PY_CPPFLAGS)";; + *) + cc="$cc \$(PY_BUILTIN_MODULE_CFLAGS)";; + esac + rule="$obj: $src; $cc $cpps -c $src -o $obj" + echo "$rule" >>$rulesf + done + case $doconfig in + yes) OBJS="$OBJS $objs";; + esac + for mod in $mods + do + file="$srcdir/$mod\$(EXT_SUFFIX)" + case $doconfig in + no) SHAREDMODS="$SHAREDMODS $file";; + esac + rule="$file: $objs" + rule="$rule; \$(BLDSHARED) $objs $libs $ExtraLibs -o $file" + echo "$rule" >>$rulesf + done + done + + case $SHAREDMODS in + '') ;; + *) DEFS="SHAREDMODS=$SHAREDMODS$NL$DEFS";; + esac + + case $noobjects in + yes) BASELIBS=$LIBS;; + *) LOCALLIBS=$LIBS;; + esac + LIBS='$(LOCALMODLIBS) $(BASEMODLIBS)' + DEFS="BASEMODLIBS=$BASELIBS$NL$DEFS" + DEFS="LOCALMODLIBS=$LOCALLIBS$NL$DEFS" + + EXTDECLS= + INITBITS= + for mod in $MODS + do + EXTDECLS="${EXTDECLS}extern PyObject* PyInit_$mod(void);$NL" + INITBITS="${INITBITS} {\"$mod\", PyInit_$mod},$NL" + done + + + case $config in + -) ;; + *) sed -e " + 1i$NL/* Generated automatically from $config by makesetup. */ + /MARKER 1/i$NL$EXTDECLS + + /MARKER 2/i$NL$INITBITS + + " $config >config.c + ;; + esac + + case $makepre in + -) ;; + *) sedf="@sed.in.$$" + trap 'rm -f $sedf' 0 1 2 3 + echo "1i\\" >$sedf + str="# Generated automatically from $makepre by makesetup." + echo "$str" >>$sedf + echo "s%_MODBUILT_NAMES_%$BUILT%" >>$sedf + echo "s%_MODDISABLED_NAMES_%$DISABLED%" >>$sedf + echo "s%_MODOBJS_%$OBJS%" >>$sedf + echo "s%_MODLIBS_%$LIBS%" >>$sedf + echo "/Definitions added by makesetup/a$NL$NL$DEFS" >>$sedf + sed -f $sedf $makepre >Makefile + cat $rulesf >>Makefile + rm -f $sedf + ;; + esac + + rm -f $rulesf +) diff --git a/python310/config-3.10-x86_64-linux-gnu/python-config.py b/python310/config-3.10-x86_64-linux-gnu/python-config.py new file mode 100644 index 0000000000000000000000000000000000000000..778bbb92cb3cc72fa453e08dcf6e018504c7a7be --- /dev/null +++ b/python310/config-3.10-x86_64-linux-gnu/python-config.py @@ -0,0 +1,74 @@ +#! /usr/bin/python3.10 +# -*- python -*- + +# Keep this script in sync with python-config.sh.in + +import getopt +import os +import sys +import sysconfig + +valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags', + 'ldflags', 'extension-suffix', 'help', 'abiflags', 'configdir', + 'embed'] + +def exit_with_usage(code=1): + print("Usage: {0} [{1}]".format( + sys.argv[0], '|'.join('--'+opt for opt in valid_opts)), file=sys.stderr) + sys.exit(code) + +try: + opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) +except getopt.error: + exit_with_usage() + +if not opts: + exit_with_usage() + +getvar = sysconfig.get_config_var +pyver = getvar('VERSION') + +opt_flags = [flag for (flag, val) in opts] + +if '--help' in opt_flags: + exit_with_usage(code=0) + +for opt in opt_flags: + if opt == '--prefix': + print(getvar('prefix')) + + elif opt == '--exec-prefix': + print(getvar('exec_prefix')) + + elif opt in ('--includes', '--cflags'): + flags = ['-I' + sysconfig.get_path('include'), + '-I' + sysconfig.get_path('platinclude')] + if opt == '--cflags': + flags.extend(getvar('CFLAGS').split()) + print(' '.join(flags)) + + elif opt in ('--libs', '--ldflags'): + libs = [] + if '--embed' in opt_flags: + libs.append('-lpython' + pyver + sys.abiflags) + else: + libpython = getvar('LIBPYTHON') + if libpython: + libs.append(libpython) + libs.extend(getvar('LIBS').split() + getvar('SYSLIBS').split()) + + # add the prefix/lib/pythonX.Y/config dir, but only if there is no + # shared library in prefix/lib/. + if opt == '--ldflags': + if not getvar('Py_ENABLE_SHARED'): + libs.insert(0, '-L' + getvar('LIBPL')) + print(' '.join(libs)) + + elif opt == '--extension-suffix': + print(getvar('EXT_SUFFIX')) + + elif opt == '--abiflags': + print(sys.abiflags) + + elif opt == '--configdir': + print(getvar('LIBPL')) diff --git a/python310/config-3.10-x86_64-linux-gnu/python.o b/python310/config-3.10-x86_64-linux-gnu/python.o new file mode 100644 index 0000000000000000000000000000000000000000..7578dd6fa9effc23a3ec77726d82bc81066ed66e Binary files /dev/null and b/python310/config-3.10-x86_64-linux-gnu/python.o differ diff --git a/python310/configparser.py b/python310/configparser.py new file mode 100644 index 0000000000000000000000000000000000000000..785527082d07a0616503f9809bb747b19bd29aaa --- /dev/null +++ b/python310/configparser.py @@ -0,0 +1,1368 @@ +"""Configuration file parser. + +A configuration file consists of sections, lead by a "[section]" header, +and followed by "name: value" entries, with continuations and such in +the style of RFC 822. + +Intrinsic defaults can be specified by passing them into the +ConfigParser constructor as a dictionary. + +class: + +ConfigParser -- responsible for parsing a list of + configuration files, and managing the parsed database. + + methods: + + __init__(defaults=None, dict_type=_default_dict, allow_no_value=False, + delimiters=('=', ':'), comment_prefixes=('#', ';'), + inline_comment_prefixes=None, strict=True, + empty_lines_in_values=True, default_section='DEFAULT', + interpolation=, converters=): + + Create the parser. When `defaults` is given, it is initialized into the + dictionary or intrinsic defaults. The keys must be strings, the values + must be appropriate for %()s string interpolation. + + When `dict_type` is given, it will be used to create the dictionary + objects for the list of sections, for the options within a section, and + for the default values. + + When `delimiters` is given, it will be used as the set of substrings + that divide keys from values. + + When `comment_prefixes` is given, it will be used as the set of + substrings that prefix comments in empty lines. Comments can be + indented. + + When `inline_comment_prefixes` is given, it will be used as the set of + substrings that prefix comments in non-empty lines. + + When `strict` is True, the parser won't allow for any section or option + duplicates while reading from a single source (file, string or + dictionary). Default is True. + + When `empty_lines_in_values` is False (default: True), each empty line + marks the end of an option. Otherwise, internal empty lines of + a multiline option are kept as part of the value. + + When `allow_no_value` is True (default: False), options without + values are accepted; the value presented for these is None. + + When `default_section` is given, the name of the special section is + named accordingly. By default it is called ``"DEFAULT"`` but this can + be customized to point to any other valid section name. Its current + value can be retrieved using the ``parser_instance.default_section`` + attribute and may be modified at runtime. + + When `interpolation` is given, it should be an Interpolation subclass + instance. It will be used as the handler for option value + pre-processing when using getters. RawConfigParser objects don't do + any sort of interpolation, whereas ConfigParser uses an instance of + BasicInterpolation. The library also provides a ``zc.buildbot`` + inspired ExtendedInterpolation implementation. + + When `converters` is given, it should be a dictionary where each key + represents the name of a type converter and each value is a callable + implementing the conversion from string to the desired datatype. Every + converter gets its corresponding get*() method on the parser object and + section proxies. + + sections() + Return all the configuration section names, sans DEFAULT. + + has_section(section) + Return whether the given section exists. + + has_option(section, option) + Return whether the given option exists in the given section. + + options(section) + Return list of configuration options for the named section. + + read(filenames, encoding=None) + Read and parse the iterable of named configuration files, given by + name. A single filename is also allowed. Non-existing files + are ignored. Return list of successfully read files. + + read_file(f, filename=None) + Read and parse one configuration file, given as a file object. + The filename defaults to f.name; it is only used in error + messages (if f has no `name` attribute, the string `` is used). + + read_string(string) + Read configuration from a given string. + + read_dict(dictionary) + Read configuration from a dictionary. Keys are section names, + values are dictionaries with keys and values that should be present + in the section. If the used dictionary type preserves order, sections + and their keys will be added in order. Values are automatically + converted to strings. + + get(section, option, raw=False, vars=None, fallback=_UNSET) + Return a string value for the named option. All % interpolations are + expanded in the return values, based on the defaults passed into the + constructor and the DEFAULT section. Additional substitutions may be + provided using the `vars` argument, which must be a dictionary whose + contents override any pre-existing defaults. If `option` is a key in + `vars`, the value from `vars` is used. + + getint(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to an integer. + + getfloat(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a float. + + getboolean(section, options, raw=False, vars=None, fallback=_UNSET) + Like get(), but convert value to a boolean (currently case + insensitively defined as 0, false, no, off for False, and 1, true, + yes, on for True). Returns False or True. + + items(section=_UNSET, raw=False, vars=None) + If section is given, return a list of tuples with (name, value) for + each option in the section. Otherwise, return a list of tuples with + (section_name, section_proxy) for each section, including DEFAULTSECT. + + remove_section(section) + Remove the given file section and all its options. + + remove_option(section, option) + Remove the given option from the given section. + + set(section, option, value) + Set the given option. + + write(fp, space_around_delimiters=True) + Write the configuration state in .ini format. If + `space_around_delimiters` is True (the default), delimiters + between keys and values are surrounded by spaces. +""" + +from collections.abc import MutableMapping +from collections import ChainMap as _ChainMap +import functools +import io +import itertools +import os +import re +import sys +import warnings + +__all__ = ["NoSectionError", "DuplicateOptionError", "DuplicateSectionError", + "NoOptionError", "InterpolationError", "InterpolationDepthError", + "InterpolationMissingOptionError", "InterpolationSyntaxError", + "ParsingError", "MissingSectionHeaderError", + "ConfigParser", "SafeConfigParser", "RawConfigParser", + "Interpolation", "BasicInterpolation", "ExtendedInterpolation", + "LegacyInterpolation", "SectionProxy", "ConverterMapping", + "DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"] + +_default_dict = dict +DEFAULTSECT = "DEFAULT" + +MAX_INTERPOLATION_DEPTH = 10 + + + +# exception classes +class Error(Exception): + """Base class for ConfigParser exceptions.""" + + def __init__(self, msg=''): + self.message = msg + Exception.__init__(self, msg) + + def __repr__(self): + return self.message + + __str__ = __repr__ + + +class NoSectionError(Error): + """Raised when no section matches a requested option.""" + + def __init__(self, section): + Error.__init__(self, 'No section: %r' % (section,)) + self.section = section + self.args = (section, ) + + +class DuplicateSectionError(Error): + """Raised when a section is repeated in an input source. + + Possible repetitions that raise this exception are: multiple creation + using the API or in strict parsers when a section is found more than once + in a single input file, string or dictionary. + """ + + def __init__(self, section, source=None, lineno=None): + msg = [repr(section), " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": section ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Section ") + Error.__init__(self, "".join(msg)) + self.section = section + self.source = source + self.lineno = lineno + self.args = (section, source, lineno) + + +class DuplicateOptionError(Error): + """Raised by strict parsers when an option is repeated in an input source. + + Current implementation raises this exception only when an option is found + more than once in a single file, string or dictionary. + """ + + def __init__(self, section, option, source=None, lineno=None): + msg = [repr(option), " in section ", repr(section), + " already exists"] + if source is not None: + message = ["While reading from ", repr(source)] + if lineno is not None: + message.append(" [line {0:2d}]".format(lineno)) + message.append(": option ") + message.extend(msg) + msg = message + else: + msg.insert(0, "Option ") + Error.__init__(self, "".join(msg)) + self.section = section + self.option = option + self.source = source + self.lineno = lineno + self.args = (section, option, source, lineno) + + +class NoOptionError(Error): + """A requested option was not found.""" + + def __init__(self, option, section): + Error.__init__(self, "No option %r in section: %r" % + (option, section)) + self.option = option + self.section = section + self.args = (option, section) + + +class InterpolationError(Error): + """Base class for interpolation-related exceptions.""" + + def __init__(self, option, section, msg): + Error.__init__(self, msg) + self.option = option + self.section = section + self.args = (option, section, msg) + + +class InterpolationMissingOptionError(InterpolationError): + """A string substitution required a setting which was not available.""" + + def __init__(self, option, section, rawval, reference): + msg = ("Bad value substitution: option {!r} in section {!r} contains " + "an interpolation key {!r} which is not a valid option name. " + "Raw value: {!r}".format(option, section, reference, rawval)) + InterpolationError.__init__(self, option, section, msg) + self.reference = reference + self.args = (option, section, rawval, reference) + + +class InterpolationSyntaxError(InterpolationError): + """Raised when the source text contains invalid syntax. + + Current implementation raises this exception when the source text into + which substitutions are made does not conform to the required syntax. + """ + + +class InterpolationDepthError(InterpolationError): + """Raised when substitutions are nested too deeply.""" + + def __init__(self, option, section, rawval): + msg = ("Recursion limit exceeded in value substitution: option {!r} " + "in section {!r} contains an interpolation key which " + "cannot be substituted in {} steps. Raw value: {!r}" + "".format(option, section, MAX_INTERPOLATION_DEPTH, + rawval)) + InterpolationError.__init__(self, option, section, msg) + self.args = (option, section, rawval) + + +class ParsingError(Error): + """Raised when a configuration file does not follow legal syntax.""" + + def __init__(self, source=None, filename=None): + # Exactly one of `source'/`filename' arguments has to be given. + # `filename' kept for compatibility. + if filename and source: + raise ValueError("Cannot specify both `filename' and `source'. " + "Use `source'.") + elif not filename and not source: + raise ValueError("Required argument `source' not given.") + elif filename: + source = filename + Error.__init__(self, 'Source contains parsing errors: %r' % source) + self.source = source + self.errors = [] + self.args = (source, ) + + @property + def filename(self): + """Deprecated, use `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in Python 3.12. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + return self.source + + @filename.setter + def filename(self, value): + """Deprecated, user `source'.""" + warnings.warn( + "The 'filename' attribute will be removed in Python 3.12. " + "Use 'source' instead.", + DeprecationWarning, stacklevel=2 + ) + self.source = value + + def append(self, lineno, line): + self.errors.append((lineno, line)) + self.message += '\n\t[line %2d]: %s' % (lineno, line) + + +class MissingSectionHeaderError(ParsingError): + """Raised when a key-value pair is found before any section header.""" + + def __init__(self, filename, lineno, line): + Error.__init__( + self, + 'File contains no section headers.\nfile: %r, line: %d\n%r' % + (filename, lineno, line)) + self.source = filename + self.lineno = lineno + self.line = line + self.args = (filename, lineno, line) + + +# Used in parser getters to indicate the default behaviour when a specific +# option is not found it to raise an exception. Created to enable `None` as +# a valid fallback value. +_UNSET = object() + + +class Interpolation: + """Dummy interpolation that passes the value through with no changes.""" + + def before_get(self, parser, section, option, value, defaults): + return value + + def before_set(self, parser, section, option, value): + return value + + def before_read(self, parser, section, option, value): + return value + + def before_write(self, parser, section, option, value): + return value + + +class BasicInterpolation(Interpolation): + """Interpolation as implemented in the classic ConfigParser. + + The option values can contain format strings which refer to other values in + the same section, or values in the special default section. + + For example: + + something: %(dir)s/whatever + + would resolve the "%(dir)s" to the value of dir. All reference + expansions are done late, on demand. If a user needs to use a bare % in + a configuration file, she can escape it by writing %%. Other % usage + is considered a user error and raises `InterpolationSyntaxError`.""" + + _KEYCRE = re.compile(r"%\(([^)]+)\)s") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('%%', '') # escaped percent signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '%' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('%'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("%") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "%": + accum.append("%") + rest = rest[2:] + elif c == "(": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + var = parser.optionxform(m.group(1)) + rest = rest[m.end():] + try: + v = map[var] + except KeyError: + raise InterpolationMissingOptionError( + option, section, rawval, var) from None + if "%" in v: + self._interpolate_some(parser, option, accum, v, + section, map, depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'%%' must be followed by '%%' or '(', " + "found: %r" % (rest,)) + + +class ExtendedInterpolation(Interpolation): + """Advanced variant of interpolation, supports the syntax used by + `zc.buildout`. Enables interpolation between sections.""" + + _KEYCRE = re.compile(r"\$\{([^}]+)\}") + + def before_get(self, parser, section, option, value, defaults): + L = [] + self._interpolate_some(parser, option, L, value, section, defaults, 1) + return ''.join(L) + + def before_set(self, parser, section, option, value): + tmp_value = value.replace('$$', '') # escaped dollar signs + tmp_value = self._KEYCRE.sub('', tmp_value) # valid syntax + if '$' in tmp_value: + raise ValueError("invalid interpolation syntax in %r at " + "position %d" % (value, tmp_value.find('$'))) + return value + + def _interpolate_some(self, parser, option, accum, rest, section, map, + depth): + rawval = parser.get(section, option, raw=True, fallback=rest) + if depth > MAX_INTERPOLATION_DEPTH: + raise InterpolationDepthError(option, section, rawval) + while rest: + p = rest.find("$") + if p < 0: + accum.append(rest) + return + if p > 0: + accum.append(rest[:p]) + rest = rest[p:] + # p is no longer used + c = rest[1:2] + if c == "$": + accum.append("$") + rest = rest[2:] + elif c == "{": + m = self._KEYCRE.match(rest) + if m is None: + raise InterpolationSyntaxError(option, section, + "bad interpolation variable reference %r" % rest) + path = m.group(1).split(':') + rest = rest[m.end():] + sect = section + opt = option + try: + if len(path) == 1: + opt = parser.optionxform(path[0]) + v = map[opt] + elif len(path) == 2: + sect = path[0] + opt = parser.optionxform(path[1]) + v = parser.get(sect, opt, raw=True) + else: + raise InterpolationSyntaxError( + option, section, + "More than one ':' found: %r" % (rest,)) + except (KeyError, NoSectionError, NoOptionError): + raise InterpolationMissingOptionError( + option, section, rawval, ":".join(path)) from None + if "$" in v: + self._interpolate_some(parser, opt, accum, v, sect, + dict(parser.items(sect, raw=True)), + depth + 1) + else: + accum.append(v) + else: + raise InterpolationSyntaxError( + option, section, + "'$' must be followed by '$' or '{', " + "found: %r" % (rest,)) + + +class LegacyInterpolation(Interpolation): + """Deprecated interpolation used in old versions of ConfigParser. + Use BasicInterpolation or ExtendedInterpolation instead.""" + + _KEYCRE = re.compile(r"%\(([^)]*)\)s|.") + + def before_get(self, parser, section, option, value, vars): + rawval = value + depth = MAX_INTERPOLATION_DEPTH + while depth: # Loop through this until it's done + depth -= 1 + if value and "%(" in value: + replace = functools.partial(self._interpolation_replace, + parser=parser) + value = self._KEYCRE.sub(replace, value) + try: + value = value % vars + except KeyError as e: + raise InterpolationMissingOptionError( + option, section, rawval, e.args[0]) from None + else: + break + if value and "%(" in value: + raise InterpolationDepthError(option, section, rawval) + return value + + def before_set(self, parser, section, option, value): + return value + + @staticmethod + def _interpolation_replace(match, parser): + s = match.group(1) + if s is None: + return match.group() + else: + return "%%(%s)s" % parser.optionxform(s) + + +class RawConfigParser(MutableMapping): + """ConfigParser that does not do interpolation.""" + + # Regular expressions for parsing section headers and options + _SECT_TMPL = r""" + \[ # [ + (?P
.+) # very permissive! + \] # ] + """ + _OPT_TMPL = r""" + (?P